source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <dmlc/serializer.h>
#include <rabit/rabit.h>
#include <xgboost/base.h>
#include <xgboost/span.h>
#include <xgboost/host_device_vector.h>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
namespace xgboost {
// forward declare dmatrix.
class DMatrix;
/*! \brief data type accepted by xgboost interface */
enum class DataType : uint8_t {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of data fields in MetaInfo */
static constexpr uint64_t kNumField = 9;
/*! \brief number of rows in the data */
uint64_t num_row_{0}; // NOLINT
/*! \brief number of columns in the data */
uint64_t num_col_{0}; // NOLINT
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0}; // NOLINT
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_; // NOLINT
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_group_t> group_ptr_; // NOLINT
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_; // NOLINT
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_; // NOLINT
/*!
* \brief lower bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT
/*!
* \brief upper bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT
/*! \brief default constructor */
MetaInfo() = default;
MetaInfo(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo const& that) {
this->num_row_ = that.num_row_;
this->num_col_ = that.num_col_;
this->num_nonzero_ = that.num_nonzero_;
this->labels_.Resize(that.labels_.Size());
this->labels_.Copy(that.labels_);
this->group_ptr_ = that.group_ptr_;
this->weights_.Resize(that.weights_.Size());
this->weights_.Copy(that.weights_);
this->base_margin_.Resize(that.base_margin_.Size());
this->base_margin_.Copy(that.base_margin_);
this->labels_lower_bound_.Resize(that.labels_lower_bound_.Size());
this->labels_lower_bound_.Copy(that.labels_lower_bound_);
this->labels_upper_bound_.Resize(that.labels_upper_bound_.Size());
this->labels_upper_bound_.Copy(that.labels_upper_bound_);
return *this;
}
/*!
* \brief Validate all metainfo.
*/
void Validate(int32_t device) const;
MetaInfo Slice(common::Span<int32_t const> ridxs) const;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
/*!
* \brief Set information in the meta info with array interface.
* \param key The key of the information.
* \param interface_str String representation of json format array interface.
*
* [ column_0, column_1, ... column_n ]
*
* Right now only 1 column is permitted.
*/
void SetInfo(const char* key, std::string const& interface_str);
/*
* \brief Extend with other MetaInfo.
*
* \param that The other MetaInfo object.
*
* \param accumulate_rows Whether rows need to be accumulated in this function. If
* client code knows number of rows in advance, set this parameter to false.
*/
void Extend(MetaInfo const& that, bool accumulate_rows);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_feature_t index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief Parameters for constructing batches.
*/
struct BatchParam {
/*! \brief The GPU device to use. */
int gpu_id;
/*! \brief Maximum number of bins per feature for histograms. */
int max_bin{0};
/*! \brief Page size for external memory mode. */
size_t gpu_page_size;
BatchParam() = default;
BatchParam(int32_t device, int32_t max_bin, size_t gpu_page_size = 0)
: gpu_id{device}, max_bin{max_bin}, gpu_page_size{gpu_page_size} {}
inline bool operator!=(const BatchParam& other) const {
return gpu_id != other.gpu_id || max_bin != other.max_bin ||
gpu_page_size != other.gpu_page_size;
}
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<bst_row_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid{};
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return Number of instances in the page. */
inline size_t Size() const {
return offset.Size() == 0 ? 0 : offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
/*! \brief Set the base row id for this page. */
inline void SetBaseRowId(size_t row_id) {
base_rowid = row_id;
}
SparsePage GetTranspose(int num_columns) const;
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
void Push(const dmlc::RowBlock<uint32_t>& batch);
/**
* \brief Pushes external data batch onto this page
*
* \tparam AdapterBatchT
* \param batch
* \param missing
* \param nthread
*
* \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns.
*/
template <typename AdapterBatchT>
uint64_t Push(const AdapterBatchT& batch, float missing, int nthread);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
};
class CSCPage: public SparsePage {
public:
CSCPage() : SparsePage() {}
explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class SortedCSCPage : public SparsePage {
public:
SortedCSCPage() : SparsePage() {}
explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class EllpackPageImpl;
/*!
* \brief A page stored in ELLPACK format.
*
* This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid
* including CUDA-specific implementation details in the header.
*/
class EllpackPage {
public:
/*!
* \brief Default constructor.
*
* This is used in the external memory case. An empty ELLPACK page is constructed with its content
* set later by the reader.
*/
EllpackPage();
/*!
* \brief Constructor from an existing DMatrix.
*
* This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix
* in CSR format.
*/
explicit EllpackPage(DMatrix* dmat, const BatchParam& param);
/*! \brief Destructor. */
~EllpackPage();
EllpackPage(EllpackPage&& that);
/*! \return Number of instances in the page. */
size_t Size() const;
/*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id);
const EllpackPageImpl* Impl() const { return impl_.get(); }
EllpackPageImpl* Impl() { return impl_.get(); }
private:
std::unique_ptr<EllpackPageImpl> impl_;
};
template<typename T>
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() = default;
virtual T& operator*() = 0;
virtual const T& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
template<typename T>
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag; // NOLINT
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
T& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const T& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::shared_ptr<BatchIteratorImpl<T>> impl_;
};
template<typename T>
class BatchSet {
public:
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {}
BatchIterator<T> begin() { return begin_iter_; } // NOLINT
BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT
private:
BatchIterator<T> begin_iter_;
};
/*!
* \brief Internal data structured used by XGBoost during training.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
virtual void SetInfo(const char *key, const void *dptr, DataType dtype,
size_t num) {
this->Info().SetInfo(key, dptr, dtype, num);
}
virtual void SetInfo(const char* key, std::string const& interface_str) {
this->Info().SetInfo(key, interface_str);
}
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/**
* \brief Gets batches. Use range based for loop over BatchSet to access individual batches.
*/
template<typename T>
BatchSet<T> GetBatches(const BatchParam& param = {});
template <typename T>
bool PageExists() const;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief virtual destructor */
virtual ~DMatrix() = default;
/*! \brief Whether the matrix is dense. */
bool IsDense() const {
return Info().num_nonzero_ == Info().num_row_ * Info().num_col_;
}
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
size_t page_size = kPageSize);
/**
* \brief Creates a new DMatrix from an external data adapter.
*
* \tparam AdapterT Type of the adapter.
* \param [in,out] adapter View onto an external data.
* \param missing Values to count as missing.
* \param nthread Number of threads for construction.
* \param cache_prefix (Optional) The cache prefix for external memory.
* \param page_size (Optional) Size of the page.
*
* \return a Created DMatrix.
*/
template <typename AdapterT>
static DMatrix* Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix = "",
size_t page_size = kPageSize);
virtual DMatrix* Slice(common::Span<int32_t const> ridxs) = 0;
/*! \brief page size 32 MB */
static const size_t kPageSize = 32UL << 20UL;
protected:
virtual BatchSet<SparsePage> GetRowBatches() = 0;
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0;
virtual bool EllpackExists() const = 0;
virtual bool SparsePageExists() const = 0;
};
template<>
inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) {
return GetRowBatches();
}
template<>
inline bool DMatrix::PageExists<EllpackPage>() const {
return this->EllpackExists();
}
template<>
inline bool DMatrix::PageExists<SparsePage>() const {
return this->SparsePageExists();
}
template<>
inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetColumnBatches();
}
template<>
inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetSortedColumnBatches();
}
template<>
inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) {
return GetEllpackBatches(param);
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
namespace serializer {
template <>
struct Handler<xgboost::Entry> {
inline static void Write(Stream* strm, const xgboost::Entry& data) {
strm->Write(data.index);
strm->Write(data.fvalue);
}
inline static bool Read(Stream* strm, xgboost::Entry* data) {
return strm->Read(&data->index) && strm->Read(&data->fvalue);
}
};
} // namespace serializer
} // namespace dmlc
#endif // XGBOOST_DATA_H_
|
GrB_Vector_wait.c | //------------------------------------------------------------------------------
// GrB_Vector_wait: wait for a vector to complete
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Finishes all work on a vector, followed by an OpenMP flush.
#include "GB.h"
#define GB_FREE_ALL ;
GrB_Info GrB_Vector_wait // finish all work on a vector
(
GrB_Vector *v
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
#pragma omp flush
GB_WHERE ((*v), "GrB_Vector_wait (&v)") ;
GB_RETURN_IF_NULL (v) ;
GB_RETURN_IF_NULL_OR_FAULTY (*v) ;
//--------------------------------------------------------------------------
// finish all pending work on the vector
//--------------------------------------------------------------------------
if (GB_ANY_PENDING_WORK (*v))
{
GrB_Info info ;
GB_BURBLE_START ("GrB_Vector_wait") ;
GB_OK (GB_Matrix_wait ((GrB_Matrix) (*v), Context)) ;
GB_BURBLE_END ;
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
convolution_1x1_pack8to4_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack8to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack8to4_int8_neon(bottom_im2col, top_blob, kernel, opt);
}
static void conv1x1s2_sgemm_pack8to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const signed char* r0 = bottom_blob.channel(p);
signed char* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
int8x8_t _v0 = vld1_s8(r0);
int8x8_t _v1 = vld1_s8(r0 + 16);
int8x8_t _v2 = vld1_s8(r0 + 32);
int8x8_t _v3 = vld1_s8(r0 + 48);
vst1_s8(outptr, _v0);
vst1_s8(outptr + 8, _v1);
vst1_s8(outptr + 16, _v2);
vst1_s8(outptr + 24, _v3);
r0 += 64;
outptr += 32;
}
for (; j + 1 < outw; j += 2)
{
int8x8_t _v0 = vld1_s8(r0);
int8x8_t _v1 = vld1_s8(r0 + 16);
vst1_s8(outptr, _v0);
vst1_s8(outptr + 8, _v1);
r0 += 32;
outptr += 16;
}
for (; j < outw; j++)
{
int8x8_t _v = vld1_s8(r0);
vst1_s8(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8to4_int8_neon(bottom_blob_shrinked, top_blob, kernel, opt);
}
|
segment.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ``fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ``classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "magick/studio.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
MagickRealType
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
MagickRealType
tau;
ssize_t
left,
right;
MagickRealType
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
MagickRealType
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static MagickRealType
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
FreeNodes(IntervalTree *),
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *),
ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const MagickRealType cluster_threshold,
% const MagickRealType weighting_exponent,
% const MagickBooleanType verbose)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This MagickRealType represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const MagickRealType cluster_threshold,
const MagickRealType weighting_exponent,const MagickBooleanType verbose)
{
#define SegmentImageTag "Segment/Image"
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
ExceptionInfo
*exception;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
MagickRealType
*free_squares;
MagickStatusType
status;
register ssize_t
i;
register MagickRealType
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) memset(&red,0,sizeof(red));
(void) memset(&green,0,sizeof(green));
(void) memset(&blue,0,sizeof(blue));
exception=(&image->exception);
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p));
cluster->green.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelGreen(p));
cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p));
cluster->count++;
break;
}
p++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowBinaryException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (MagickRealType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(MagickRealType) i*(MagickRealType) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*cluster;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(indexes+x,0);
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
if (((ssize_t) ScaleQuantumToChar(q->red) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->red) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->green) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->green) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->blue) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(q->blue) <=
(cluster->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetPixelIndex(indexes+x,cluster->id);
break;
}
}
if (cluster == (Cluster *) NULL)
{
MagickRealType
distance_squared,
local_minima,
numerator,
ratio,
sum;
register ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)-
(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->green)-
(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->blue)-
(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)-
(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->green)-
(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+
squares[(ssize_t) ScaleQuantumToChar(q->blue)-
(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetPixelIndex(indexes+x,j);
}
}
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
register ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const MagickRealType *histogram,
% MagickRealType *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of MagickRealTypes representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of MagickRealTypes is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const MagickRealType *histogram,
MagickRealType *derivative)
{
register ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% MagickPixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This MagickRealType represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
MagickPixelPacket *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
MagickRealType
threshold;
register const PixelPacket
*p;
register ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) memset(&red,0,sizeof(red));
(void) memset(&green,0,sizeof(green));
(void) memset(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelRed(p));
cluster->green.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelGreen(p));
cluster->blue.center+=(MagickRealType)
ScaleQuantumToChar(GetPixelBlue(p));
cluster->count++;
break;
}
p++;
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
if (background != (Cluster *) NULL)
{
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
}
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]++;
p++;
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
register IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
register ssize_t
count;
register MagickRealType
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(MagickRealType) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
register ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
root->mean_stability=0.0;
root->stability=0.0;
(void) memset(list,0,TreeLength*sizeof(*list));
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
}
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
MagickRealType
average_tau,
*derivative,
*second_derivative,
tau,
value;
register ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(MagickRealType *) AcquireCriticalMemory(256*sizeof(*derivative));
second_derivative=(MagickRealType *) AcquireCriticalMemory(256*
sizeof(*second_derivative));
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(MagickRealType) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(MagickRealType *) RelinquishMagickMemory(derivative);
second_derivative=(MagickRealType *)
RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
{
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
/*
Find active nodes: stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau/=(MagickRealType) number_nodes;
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const MagickRealType tau,
% MagickRealType *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of MagickRealTypes representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau,
MagickRealType *scale_histogram)
{
double
alpha,
beta,
*gamma,
sum;
register ssize_t
u,
x;
gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAllocateGammaMap");
alpha=1.0/(tau*sqrt(2.0*MagickPI));
beta=(-1.0/(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=(MagickRealType) (alpha*sum);
}
gamma=(double *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold)
{
ColorspaceType
previous_colorspace;
MagickBooleanType
status;
register ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
/*
Initialize histogram.
*/
previous_colorspace=image->colorspace;
(void) TransformImageColorspace(image,colorspace);
InitializeHistogram(image,histogram,&image->exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose);
(void) TransformImageColorspace(image,previous_colorspace);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(MagickRealType *second_derivative,
% const MagickRealType smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of MagickRealTypes representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(MagickRealType *second_derivative,
const MagickRealType smooth_threshold,short *crossings)
{
register ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
SubmanifoldConvolutionRules.h | // Copyright 2016-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#ifndef SUBMANIFOLDCONVOLUTIONRULES_H
#define SUBMANIFOLDCONVOLUTIONRULES_H
// Full input region for an output point
template <Int dimension>
RectangularRegion<dimension>
InputRegionCalculator_Submanifold(const Point<dimension> &output, long *size) {
Point<dimension> lb, ub;
for (Int i = 0; i < dimension; i++) {
Int pad = size[i] / 2;
lb[i] = output[i] - pad;
ub[i] = output[i] + size[i] - 1 - pad;
}
return RectangularRegion<dimension>(lb, ub);
}
// Call for each convolutional / max-pooling layer, once for each batch item.
// rules is used to carry out the "lowering" whilst carrying out the convolution
template <Int dimension>
double SubmanifoldConvolution_SgToRules(SparseGrid<dimension> &grid,
RuleBook &rules, long *size) {
double countActiveInputs = 0;
for (auto const &outputIter : grid.mp) {
auto inRegion =
InputRegionCalculator_Submanifold<dimension>(outputIter.first, size);
Int rulesOffset = 0;
for (auto inputPoint : inRegion) {
auto inputIter = grid.mp.find(inputPoint);
if (inputIter != grid.mp.end()) {
rules[rulesOffset].push_back(inputIter->second + grid.ctr);
rules[rulesOffset].push_back(outputIter.second + grid.ctr);
countActiveInputs++;
}
rulesOffset++;
}
}
return countActiveInputs;
}
template <Int dimension>
Int SubmanifoldConvolution_SgsToRules(SparseGrids<dimension> &SGs,
RuleBook &rules, long *size) {
Int sd = volume<dimension>(size);
Int countActiveInputs = 0;
rules.clear();
rules.resize(sd);
for (Int i = 0; i < (Int)SGs.size(); i++)
countActiveInputs +=
SubmanifoldConvolution_SgToRules<dimension>(SGs[i], rules, size);
return countActiveInputs;
}
template <Int dimension>
Int SubmanifoldConvolution_SgsToRules_OMP(SparseGrids<dimension> &SGs,
RuleBook &rules, long *size) {
std::vector<RuleBook> rbs(SGs.size());
std::vector<double> countActiveInputs(SGs.size());
rules.clear();
Int sd = volume<dimension>(size);
rules.resize(sd);
{
Int i;
#pragma omp parallel for private(i)
for (i = 0; i < (Int)SGs.size(); i++) {
rbs[i].resize(sd);
countActiveInputs[i] =
SubmanifoldConvolution_SgToRules<dimension>(SGs[i], rbs[i], size);
}
}
{
Int i;
#pragma omp parallel for private(i)
for (i = 0; i < sd; i++)
for (auto const &rb : rbs)
rules[i].insert(rules[i].end(), rb[i].begin(), rb[i].end());
}
Int countActiveInputs_ = 0;
for (auto &i : countActiveInputs)
countActiveInputs_ += i;
return countActiveInputs_;
}
#endif /* SUBMANIFOLDCONVOLUTIONRULES_H */
|
sapB_fmt_plug.c | /*
* this is a SAP-BCODE plugin for john the ripper.
* tested on linux/x86 only, rest is up to you.. at least, someone did the reversing :-)
*
* please note: this code is in a "works for me"-state, feel free to modify/speed up/clean/whatever it...
*
* (c) x7d8 sap loverz, public domain, btw
* cheers: see test-cases.
*
* Heavily modified by magnum 2011-2012 for performance and for SIMD, OMP and
* encodings support. Copyright (c) 2011, 2012 magnum, and it is hereby released
* to the general public under the following terms: Redistribution and use in
* source and binary forms, with or without modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sapB;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sapB);
#else
#include <string.h>
#include <ctype.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "unicode.h"
#include "md5.h"
#define FORMAT_LABEL "sapb"
#define FORMAT_NAME "SAP CODVN B (BCODE)"
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD5)
#endif
#include "simd-intrinsics.h"
#define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME
#if defined(_OPENMP)
#include <omp.h>
static unsigned int omp_t = 1;
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 512 // tuned on K8-dual HT.
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#endif
#include "memdbg.h"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define SALT_FIELD_LENGTH 40 /* the max listed username length */
#define SALT_LENGTH 12 /* the max used username length */
#define PLAINTEXT_LENGTH 8 /* passwordlength max 8 chars */
#define CIPHERTEXT_LENGTH SALT_FIELD_LENGTH + 1 + 16 /* SALT + $ + 2x8 bytes for BCODE-representation */
#define BINARY_SIZE 8 /* half of md5 */
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct saltstruct)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 )
#define GETOUTPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define BCODE_ARRAY_LENGTH 3*16
static const unsigned char bcodeArr[BCODE_ARRAY_LENGTH] =
{ 0x14, 0x77, 0xf3, 0xd4, 0xbb, 0x71, 0x23, 0xd0, 0x03, 0xff, 0x47, 0x93, 0x55, 0xaa, 0x66, 0x91,
0xf2, 0x88, 0x6b, 0x99, 0xbf, 0xcb, 0x32, 0x1a, 0x19, 0xd9, 0xa7, 0x82, 0x22, 0x49, 0xa2, 0x51,
0xe2, 0xb7, 0x33, 0x71, 0x8b, 0x9f, 0x5d, 0x01, 0x44, 0x70, 0xae, 0x11, 0xef, 0x28, 0xf0, 0x0d };
/* char transition table for BCODE (from disp+work) */
static const unsigned char transtable[] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x3f, 0x40, 0x41, 0x50, 0x43, 0x44, 0x45, 0x4b, 0x47, 0x48, 0x4d, 0x4e, 0x54, 0x51, 0x53, 0x46,
0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x56, 0x55, 0x5c, 0x49, 0x5d, 0x4a,
0x42, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x58, 0x5b, 0x59, 0xff, 0x52,
//0x4c, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
0x4c, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
//0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x57, 0x5e, 0x5a, 0x4f, 0xff
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x57, 0x5e, 0x5a, 0x4f, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
// For backwards compatibility, we must support salts padded with spaces to a field width of 40
static struct fmt_tests tests[] = {
{"DDIC$C94E2F7DD0178374", "DDIC"},
// While "X" and "U" are not valid SAP passwords, they might still occur
// if passwords longer than 8 characters are allowed, and if the CODVN B
// password is calculated and stored in addition to the CODVN F or
// CODVN H password.
// Although a user picking "X Y" as a password is probably
// not very likely.
{"F $E3A65AAA9676060F", "X"},
// the 9 character password CYBERPUNK will be truncated to CYBERPUN
{"JOHNNY $7F7207932E4DE471", "CYBERPUNK"},
{"VAN $487A2A40A7BA2258", "HAUSER"},
{"ROOT $8366A4E9E6B72CB0", "KID"},
{"MAN $9F48E7CE5B184D2E", "U"},
// "-------" is not a valid SAP password (first 3 characters are
// identical)
// ("^^^^^^^" would be allowed, since "^" also replaces arbitrary
// non-ascii characters, as far as the CODVN B hash algorithm is
// concerned)
// {"------------$2CF190AF13E858A2", "-------"},
{"------------$058DE95926E00F32", "--+----"},
{"SAP*$7016BFF7C5472F1B", "MASTER"},
// password DOLLAR$$$--- will be truncated to DOLLAR$$
{"DOLLAR$$$---$C3413C498C48EB67", "DOLLAR$$$---"},
// Trigger suspected over-run of sum20. We do behave like SAP so it's
// not a problem.
{"12850413$1470EF2F683C956D", "46813230"},
{NULL}
};
#define TEMP_ARRAY_SIZE 4*16
#define DEFAULT_OFFSET 15
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static int (*keyLen);
#ifdef SIMD_COEF_32
static unsigned char (*saved_key);
static unsigned char (*interm_key);
static unsigned char (*crypt_key);
static unsigned int (*clean_pos);
#else
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE/sizeof(ARCH_WORD_32)];
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
#endif
static struct saltstruct {
unsigned int l;
unsigned char s[SALT_LENGTH];
} *cur_salt;
static void init(struct fmt_main *self)
{
static int warned = 0;
if (options.target_enc == UTF_8 && warned++ == 0)
fprintf(stderr, "Warning: SAP-B format should never be UTF-8.\nUse --target-encoding=iso-8859-1 or whatever is applicable.\n");
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt = (omp_t * MIN_KEYS_PER_CRYPT);
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt = (omp_t * MAX_KEYS_PER_CRYPT);
#endif
#ifdef SIMD_COEF_32
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
64, MEM_ALIGN_SIMD);
interm_key = mem_calloc_align(self->params.max_keys_per_crypt,
64, MEM_ALIGN_SIMD);
clean_pos = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*clean_pos));
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt,
16, MEM_ALIGN_SIMD);
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain) );
keyLen = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*keyLen));
}
static void done(void)
{
MEM_FREE(keyLen);
MEM_FREE(saved_plain);
MEM_FREE(crypt_key);
#ifdef SIMD_COEF_32
MEM_FREE(clean_pos);
MEM_FREE(interm_key);
#endif
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int i;
char *p;
if (!ciphertext) return 0;
p = strrchr(ciphertext, '$');
if (!p) return 0;
if (p - ciphertext > SALT_FIELD_LENGTH) return 0;
if (strlen(&p[1]) != BINARY_SIZE * 2) return 0;
for (i = 0; i < p - ciphertext; i++) {
// even those lower case non-ascii characters with a
// corresponding upper case character could be rejected
if (ciphertext[i] >= 'a' && ciphertext[i] <= 'z') return 0;
// SAP user names cannot be longer than 12 characters
if (i >= SALT_LENGTH && ciphertext[i] != ' ') return 0;
}
// SAP user name cannot start with ! or ?
if (ciphertext[0] == '!' || ciphertext[0] == '?') return 0;
// the user name must not simply be spaces, or empty
for (i = 0; i < p - ciphertext; ++i) {
if (ciphertext[i] == ' ')
continue;
break;
}
if (ciphertext[i] == '$') return 0;
p++;
// SAP and sap2john.pl always use upper case A-F for hashes,
// so don't allow a-f
for (i = 0; i < BINARY_SIZE * 2; i++)
if (!(((p[i]>='0' && p[i]<='9')) ||
((p[i]>='A' && p[i]<='F')) ))
return 0;
return 1;
}
static void set_salt(void *salt)
{
cur_salt = salt;
}
static void set_key(char *key, int index)
{
memcpy(saved_plain[index], key, PLAINTEXT_LENGTH);
keyLen[index] = -1;
}
static char *get_key(int index)
{
int i;
// Work-around for new self-test.
if (keyLen[index] == -1)
keyLen[index] = strlen(saved_plain[index]);
for (i = 0; i < keyLen[index]; i++) {
if (saved_plain[index][i] >= 'a' && saved_plain[index][i] <= 'z')
saved_plain[index][i] ^= 0x20;
else if (saved_plain[index][i] & 0x80)
saved_plain[index][i] = '^';
}
saved_plain[index][i] = 0;
return saved_plain[index];
}
static int cmp_all(void *binary, int count) {
#ifdef SIMD_COEF_32
unsigned int x,y=0;
#ifdef _OPENMP
for(;y<SIMD_PARA_MD5*omp_t;y++)
#else
for(;y<SIMD_PARA_MD5;y++)
#endif
for(x = 0; x < SIMD_COEF_32; x++)
{
if( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[y*SIMD_COEF_32*4+x] )
return 1;
}
return 0;
#else
int index;
for (index = 0; index < count; index++)
if (!memcmp(binary, crypt_key[index], BINARY_SIZE))
return 1;
return 0;
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int cmp_one(void * binary, int index)
{
#ifdef SIMD_COEF_32
unsigned int i,x,y;
x = index&(SIMD_COEF_32-1);
y = (unsigned int)index/SIMD_COEF_32;
for(i=0;i<(BINARY_SIZE/4);i++)
if ( ((ARCH_WORD_32*)binary)[i] != ((ARCH_WORD_32*)crypt_key)[y*SIMD_COEF_32*4+i*SIMD_COEF_32+x] )
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
static unsigned int walld0rf_magic(const int index, const unsigned char *temp_key, unsigned char *destArray)
{
unsigned int sum20, I1, I2, I3;
const int len = keyLen[index];
#ifdef SIMD_COEF_32
#define key(i) saved_key[GETPOS(i, index)]
#else
#define key(i) saved_key[index][i]
#endif
// some magic in between....yes, byte 4 is ignored...
// sum20 will be between 0x20 and 0x2F
//sum20 = temp_key[5]%4 + temp_key[3]%4 + temp_key[2]%4 + temp_key[1]%4 + temp_key[0]%4 + 0x20;
sum20 = *(unsigned int*)temp_key & 0x03030303;
sum20 = (unsigned char)((sum20 >> 24) + (sum20 >> 16) +
(sum20 >> 8) + sum20);
sum20 += (temp_key[5] & 3) | 0x20;
// Some unrolling
if (temp_key[15] & 0x01) {
destArray[0] = bcodeArr[47];
I2 = 1;
}
else {
I2 = 0;
}
destArray[I2++] = key(0);
destArray[I2++] = cur_salt->s[0];
destArray[I2] = bcodeArr[I2-2];
destArray[++I2] = 0; I2++;
if( len >= 6) {
I1 = 6;
if( cur_salt->l >= 4 ) {
// key >= 6 bytes, salt >= 4 bytes
if (temp_key[14] & 0x01)
destArray[I2++] = bcodeArr[46];
destArray[I2++] = key(1);
destArray[I2++] = cur_salt->s[1];
destArray[I2] = bcodeArr[I2-4];
destArray[++I2] = 0; I2++;
if (temp_key[13] & 0x01)
destArray[I2++] = bcodeArr[45];
destArray[I2++] = key(2);
destArray[I2++] = cur_salt->s[2];
destArray[I2] = bcodeArr[I2-6];
destArray[++I2] = 0; I2++;
if (temp_key[12] & 0x01)
destArray[I2++] = bcodeArr[44];
destArray[I2++] = key(3);
destArray[I2++] = cur_salt->s[3];
destArray[I2] = bcodeArr[I2-8];
destArray[++I2] = 0; I2++;
I3 = 4;
if (temp_key[DEFAULT_OFFSET - 4] & 0x01)
destArray[I2++] = bcodeArr[43];
destArray[I2++] = key(4);
if (4 < cur_salt->l)
destArray[I2++] = cur_salt->s[I3++];
destArray[I2] = bcodeArr[I2 - 5 - I3];
destArray[++I2] = 0; I2++;
if (temp_key[DEFAULT_OFFSET - 5] & 0x01)
destArray[I2++] = bcodeArr[42];
destArray[I2++] = key(5);
if (5 < cur_salt->l)
destArray[I2++] = cur_salt->s[I3++];
destArray[I2] = bcodeArr[I2 - 6 - I3];
destArray[++I2] = 0; I2++;
if (6 < len) {
if (temp_key[DEFAULT_OFFSET - 6] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 7];
destArray[I2++] = key(6); I1++;
}
if (6 < cur_salt->l)
destArray[I2++] = cur_salt->s[I3++];
} else {
// Key >= 6 bytes, salt < 4 Bytes
I3 = 1;
if (temp_key[DEFAULT_OFFSET - 1] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 2];
destArray[I2++] = key(1);
if (1 < cur_salt->l)
destArray[I2++] = cur_salt->s[I3++];
destArray[I2] = bcodeArr[I2 - 2 - I3];
destArray[++I2] = 0; I2++;
if (temp_key[DEFAULT_OFFSET - 2] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 3];
destArray[I2++] = key(2);
if (2 < cur_salt->l)
destArray[I2++] = cur_salt->s[I3++];
destArray[I2] = bcodeArr[I2 - 3 - I3];
destArray[++I2] = 0; I2++;
if (temp_key[DEFAULT_OFFSET - 3] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 4];
destArray[I2++] = key(3);
destArray[I2] = bcodeArr[I2 - 4 - I3];
destArray[++I2] = 0; I2++;
if (temp_key[DEFAULT_OFFSET - 4] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 5];
destArray[I2++] = key(4);
destArray[I2] = bcodeArr[I2 - 5 - I3];
destArray[++I2] = 0; I2++;
if (temp_key[DEFAULT_OFFSET - 5] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 6];
destArray[I2++] = key(5);
destArray[I2] = bcodeArr[I2 - 6 - I3];
destArray[++I2] = 0; I2++;
if (6 < len) {
if (temp_key[DEFAULT_OFFSET - 6] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - 7];
destArray[I2++] = key(6); I1++;
}
}
destArray[I2] = bcodeArr[I2 - I1 - I3];
destArray[++I2] = 0; I2++;
} else {
I1 = I3 = 1;
}
// End of unrolling. Now the remaining bytes
while(I2 < sum20) {
if (I1 < len) {
if (temp_key[DEFAULT_OFFSET - I1] & 0x01)
destArray[I2++] = bcodeArr[BCODE_ARRAY_LENGTH - I1 - 1];
destArray[I2++] = key(I1); I1++;
}
if (I3 < cur_salt->l)
destArray[I2++] = cur_salt->s[I3++];
destArray[I2] = bcodeArr[I2 - I1 - I3];
destArray[++I2] = 0; I2++;
}
#if SIMD_COEF_32
// This may be unaligned here, but after the aligned vector buffer
// transfer, we will have no junk left from loop overrun
*(unsigned int*)&destArray[sum20] = 0x00000080;
#endif
return sum20;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
#if SIMD_COEF_32
#if defined(_OPENMP)
int t;
#pragma omp parallel for
for (t = 0; t < omp_t; t++)
#define ti (t*NBKEYS+index)
#else
#define t 0
#define ti index
#endif
{
unsigned int index, i;
for (index = 0; index < NBKEYS; index++) {
int len;
if ((len = keyLen[ti]) < 0) {
unsigned char *key;
// Load key into vector buffer
len = 0;
key = (unsigned char*)saved_plain[ti];
while (*key)
{
saved_key[GETPOS(len, ti)] =
transtable[*key++];
len++;
}
// Back-out of trailing spaces
while(len && *--key == ' ')
{
len--;
saved_key[GETPOS(len, ti)] = 0;
}
keyLen[ti] = len;
}
// Prepend the salt
for (i = 0; i < cur_salt->l; i++)
saved_key[GETPOS((len + i), ti)] =
cur_salt->s[i];
saved_key[GETPOS((len + i), ti)] = 0x80;
((unsigned int *)saved_key)[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (unsigned int)ti/SIMD_COEF_32*16*SIMD_COEF_32] = (len + i) << 3;
// Clean rest of buffer
for (i = i + len + 1; i <= clean_pos[ti]; i++)
saved_key[GETPOS(i, ti)] = 0;
clean_pos[ti] = len + cur_salt->l;
}
SIMDmd5body(&saved_key[t*NBKEYS*64],
(unsigned int*)&crypt_key[t*NBKEYS*16], NULL, SSEi_MIXED_IN);
for (i = 0; i < SIMD_PARA_MD5; i++)
memset(&interm_key[t*64*NBKEYS+i*64*SIMD_COEF_32+32*SIMD_COEF_32], 0, 32*SIMD_COEF_32);
for (index = 0; index < NBKEYS; index++) {
unsigned int sum20;
unsigned char temp_key[BINARY_SIZE*2];
ARCH_WORD_32 destArray[TEMP_ARRAY_SIZE / 4];
const unsigned int *sw;
unsigned int *dw;
// Temporary flat copy of crypt
sw = (unsigned int*)&crypt_key[GETOUTPOS(0, ti)];
dw = (unsigned int*)temp_key;
for (i = 0; i < 4; i++, sw += SIMD_COEF_32)
*dw++ = *sw;
//now: walld0rf-magic [tm], (c), <g>
sum20 = walld0rf_magic(ti, temp_key, (unsigned char*)destArray);
// Vectorize a word at a time
dw = (unsigned int*)&interm_key[GETPOS(0, ti)];
for (i = 0;i <= sum20; i += 4, dw += SIMD_COEF_32)
*dw = destArray[i >> 2];
((unsigned int *)interm_key)[14*SIMD_COEF_32 + (ti&(SIMD_COEF_32-1)) + (unsigned int)ti/SIMD_COEF_32*16*SIMD_COEF_32] = sum20 << 3;
}
SIMDmd5body(&interm_key[t*NBKEYS*64],
(unsigned int*)&crypt_key[t*NBKEYS*16], NULL, SSEi_MIXED_IN);
for (index = 0; index < NBKEYS; index++) {
*(ARCH_WORD_32*)&crypt_key[GETOUTPOS(0, ti)] ^= *(ARCH_WORD_32*)&crypt_key[GETOUTPOS(8, ti)];
*(ARCH_WORD_32*)&crypt_key[GETOUTPOS(4, ti)] ^= *(ARCH_WORD_32*)&crypt_key[GETOUTPOS(12, ti)];
}
}
#else
#ifdef _OPENMP
int t;
#pragma omp parallel for
for (t = 0; t < count; t++)
#else
#define t 0
#endif
{
unsigned char temp_key[BINARY_SIZE*2];
unsigned char final_key[BINARY_SIZE*2];
unsigned int i;
unsigned int sum20;
unsigned char destArray[TEMP_ARRAY_SIZE];
MD5_CTX ctx;
if (keyLen[t] < 0) {
keyLen[t] = strlen(saved_plain[t]);
// Back-out of trailing spaces
while ( saved_plain[t][keyLen[t] - 1] == ' ' )
{
if (keyLen[t] == 0) break;
saved_plain[t][--keyLen[t]] = 0;
}
for (i = 0; i < keyLen[t]; i++)
saved_key[t][i] = transtable[ARCH_INDEX(saved_plain[t][i])];
}
MD5_Init(&ctx);
MD5_Update(&ctx, saved_key[t], keyLen[t]);
MD5_Update(&ctx, cur_salt->s, cur_salt->l);
MD5_Final(temp_key,&ctx);
//now: walld0rf-magic [tm], (c), <g>
sum20 = walld0rf_magic(t, temp_key, destArray);
MD5_Init(&ctx);
MD5_Update(&ctx, destArray, sum20);
MD5_Final(final_key, &ctx);
for (i = 0; i < 8; i++)
((char*)crypt_key[t])[i] = final_key[i + 8] ^ final_key[i];
}
#endif
return count;
#undef t
#undef ti
}
static void *get_binary(char *ciphertext)
{
static ARCH_WORD_32 binary[BINARY_SIZE / sizeof(ARCH_WORD_32)];
char *realcipher = (char*)binary;
int i;
char* newCiphertextPointer;
newCiphertextPointer = strrchr(ciphertext, '$') + 1;
for(i=0;i<BINARY_SIZE;i++)
{
realcipher[i] = atoi16[ARCH_INDEX(newCiphertextPointer[i*2])]*16 + atoi16[ARCH_INDEX(newCiphertextPointer[i*2+1])];
}
return (void *)realcipher;
}
// Salt is already trimmed and 8-bit converted in split()
static void *get_salt(char *ciphertext)
{
int i;
static struct saltstruct out;
/* We don't care about trailing garbage, but loader does */
memset(out.s, 0, sizeof(out.s));
out.l = (int)(strrchr(ciphertext, '$') - ciphertext);
for (i = 0; i < out.l; ++i)
out.s[i] = transtable[ARCH_INDEX(ciphertext[i])];
return &out;
}
// Here, we remove any salt padding, trim it to 12 bytes
// and finally replace any 8-bit character with '^'
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
char *p;
int i;
p = strrchr(ciphertext, '$');
i = (int)(p - ciphertext) - 1;
while (ciphertext[i] == ' ' || i >= SALT_LENGTH)
i--;
i++;
memset(out, 0, sizeof(out));
memcpy(out, ciphertext, i);
strnzcpy(&out[i], p, CIPHERTEXT_LENGTH + 1 - i);
p = &out[i];
while(--p >= out)
if (*p & 0x80)
*p = '^';
return out;
}
#ifdef SIMD_COEF_32
#define HASH_OFFSET (index&(SIMD_COEF_32-1))+((unsigned int)index/SIMD_COEF_32)*SIMD_COEF_32*4
static int get_hash_0(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_0; }
static int get_hash_1(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_1; }
static int get_hash_2(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_2; }
static int get_hash_3(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_3; }
static int get_hash_4(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_4; }
static int get_hash_5(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_5; }
static int get_hash_6(int index) { return ((ARCH_WORD_32 *)crypt_key)[HASH_OFFSET] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return *(ARCH_WORD_32*)crypt_key[index] & PH_MASK_0; }
static int get_hash_1(int index) { return *(ARCH_WORD_32*)crypt_key[index] & PH_MASK_1; }
static int get_hash_2(int index) { return *(ARCH_WORD_32*)crypt_key[index] & PH_MASK_2; }
static int get_hash_3(int index) { return *(ARCH_WORD_32*)crypt_key[index] & PH_MASK_3; }
static int get_hash_4(int index) { return *(ARCH_WORD_32*)crypt_key[index] & PH_MASK_4; }
static int get_hash_5(int index) { return *(ARCH_WORD_32*)crypt_key[index] & PH_MASK_5; }
static int get_hash_6(int index) { return *(ARCH_WORD_32*)crypt_key[index] & PH_MASK_6; }
#endif
// Public domain hash function by DJ Bernstein
static int salt_hash(void *salt)
{
struct saltstruct *s = (struct saltstruct*)salt;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < s->l; i++)
hash = ((hash << 5) + hash) ^ s->s[i];
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_sapB = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_TRUNC | FMT_OMP | FMT_8_BIT,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
trsm_x_sky_n_lo_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
ALPHA_Number diag[m];
memset(diag, '\0', m * sizeof(ALPHA_Number));
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 1; r < A->rows + 1; r++)
{
const ALPHA_INT indx = A->pointers[r] - 1;
diag[r - 1] = A->values[indx];
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = 0; r <A->rows; r++)
{
ALPHA_Number temp;
alpha_setzero(temp);
ALPHA_INT start = A->pointers[r];
ALPHA_INT end = A->pointers[r + 1];
ALPHA_INT idx = 1;
ALPHA_INT eles_num = end - start;
for (ALPHA_INT ai = start; ai < end - 1; ++ai)
{
ALPHA_INT c = r - eles_num + idx;
alpha_madde(temp, A->values[ai], y[out_y_col * ldy + c]);
idx ++;
}
ALPHA_Number t;
alpha_mul(t, alpha, x[out_y_col * ldx + r]);
alpha_sub(t, t, temp);
alpha_div(y[out_y_col * ldy + r], t, diag[r]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
SumaVectoresOMPsections.c | /* SumaVectoresC.c
Suma de dos vectores: v3 = v1 + v2
Para compilar usar (-lrt: real time library):
gcc -O2 SumaVectores.c -o SumaVectores -lrt
Para ejecutar use: SumaVectoresC longitud
*/
#include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free()
#include <stdio.h> // biblioteca donde se encuentra la función printf()
#include <time.h> // biblioteca donde se encuentra la función clock_gettime()
#ifdef _OPENMP
#include <omp.h> // biblioteca para programas paralelos
#else
#define omp_get_thread_num() 0
#define omp_get_num_threads() 1
#endif
#define MAX 33554432 //=2^25
#define PRINT_ALL_MIN 24
// Ponemos que los elementos mínimos para que se
// impriman todas las sumas sea 24 por que atcgrid
// tiene 24 hebras
int main(int argc, char* argv[]) {
int i;
#ifdef _OPENMP
double cgt1, cgt2;
#else
struct timespec cgt1, cgt2;
#endif
double ncgt, *v1, *v2, *v3;; //para tiempo de ejecución
unsigned int N, TIME;
// la variable TIME se usa para imprimir solo el valor
// del tiempo así es mas fácil copiar desde la consola
// para realizar las gráficas
switch (argc){
case 1:
printf("Faltan nº componentes del vector\n");
exit(-1);
break;
case 2:
// Máximo N =2^32-1=4294967295 (sizeof(unsigned int) = 4 B)
N = atoi(argv[1]);
TIME = 0;
break;
case 3:
N = atoi(argv[1]);
TIME = atoi(argv[2]);
break;
default:
printf("La cantidad de parámetros es incorrecta\n");
exit(-1);
break;
}
v1 = (double*) malloc(N * sizeof(double)); // malloc necesita el tamaño en bytes
v2 = (double*) malloc(N * sizeof(double)); //si no hay espacio suficiente malloc devuelve NULL
v3 = (double*) malloc(N * sizeof(double));
if ((v1 == NULL) || (v2 == NULL) || (v3 == NULL)) {
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
#ifdef _OPENMP
#pragma omp parallel sections
#endif
{//Inicializar vectores
#ifdef _OPENMP
#pragma omp section
#endif
for (i = 0; i < N; i++){
if (TIME==2){
printf("thread %d de %d ejecuta la iteración %d del bucle\n",omp_get_thread_num(),omp_get_num_threads(),i);
printf("V1[%d] = %d * 0.1 + %d * 0.1\n",i,N,i);
}
v1[i] = N * 0.1 + i * 0.1;
}
#ifdef _OPENMP
#pragma omp section
#endif
for (i = 0; i < N; i++){
if (TIME==2){
printf("thread %d de %d ejecuta la iteración %d del bucle\n",omp_get_thread_num(),omp_get_num_threads(),i);
printf("V2[%d] = %d * 0.1 - %d * 0.1\n",i,N,i);
}
v2[i] = N * 0.1 - i * 0.1; //los valores dependen de N
}
}
#ifdef _OPENMP
cgt1 = omp_get_wtime();
#else
clock_gettime(CLOCK_REALTIME, &cgt1);
#endif
//----------------------------------------------------------------------------
#ifdef _OPENMP
#pragma omp parallel sections
#endif
{
#ifdef _OPENMP
#pragma omp section
#endif
//Calcular suma de vectores
for (i = 0; i < 1*(N/4); i++)
v3[i] = v1[i] + v2[i];
#ifdef _OPENMP
#pragma omp section
#endif
for (i = 1*(N/4); i < 2*(N/4); i++)
v3[i] = v1[i] + v2[i];
#ifdef _OPENMP
#pragma omp section
#endif
for (i = 2*(N/4); i < 3*(N/4); i++)
v3[i] = v1[i] + v2[i];
#ifdef _OPENMP
#pragma omp section
#endif
for (i = 3*(N/4); i < N; i++)
v3[i] = v1[i] + v2[i];
}
//----------------------------------------------------------------------------
#ifdef _OPENMP
cgt2 = omp_get_wtime();
#else
clock_gettime(CLOCK_REALTIME, &cgt2);
#endif
#ifdef _OPENMP
ncgt = cgt2 - cgt1;
#else
ncgt = (double) (cgt2.tv_sec - cgt1.tv_sec) + (double) ((cgt2.tv_nsec - cgt1.tv_nsec) / (1.e+9));
#endif
//Imprimir resultado de la suma y el tiempo de ejecución
if (N <= PRINT_ALL_MIN){
if (TIME==1)
printf("%11.9f\n",ncgt);
else
printf("Tiempo(seg.):%11.9f\nTamaño Vectores:%u\n",ncgt,N);
for(i=0; i<N; i++)
printf("V1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f)\n",i,i,i,v1[i],v2[i],v3[i]);
}
if (TIME==1)
printf("%11.9f\n",ncgt);
else
printf("Tiempo(seg.):%11.9f\nTamaño Vectores:%u\nV1[0]+V2[0]=V3[0](%8.6f+%8.6f=%8.6f)\nV1[%d]+V2[%d]=V3[%d](%8.6f+%8.6f=%8.6f)\n", ncgt,N,v1[0],v2[0],v3[0],N-1,N-1,N-1,v1[N-1],v2[N-1],v3[N-1]);
free(v1); // libera el espacio reservado para v1
free(v2); // libera el espacio reservado para v2
free(v3); // libera el espacio reservado para v3
return 0;
}
|
GB_unop__abs_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__abs_fp64_fp64
// op(A') function: GB_unop_tran__abs_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = fabs (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__abs_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = fabs (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = fabs (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__abs_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__first_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__first_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__first_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__first_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_uint16)
// A*D function (colscale): GB (_AxD__first_uint16)
// D*A function (rowscale): GB (_DxB__first_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__first_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__first_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_uint16)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 1
// BinaryOp: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_UINT16 || GxB_NO_FIRST_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__first_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
UpdateCombinedNeighboursWorklet.h | //============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
// Copyright 2014 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
// Copyright 2014 UT-Battelle, LLC.
// Copyright 2014 Los Alamos National Security.
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
// Laboratory (LANL), the U.S. Government retains certain rights in
// this software.
//============================================================================
// Copyright (c) 2018, The Regents of the University of California, through
// Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
// from the U.S. Dept. of Energy). All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// (1) Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// (3) Neither the name of the University of California, Lawrence Berkeley National
// Laboratory, U.S. Dept. of Energy nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
//
//=============================================================================
//
// This code is an extension of the algorithm presented in the paper:
// Parallel Peak Pruning for Scalable SMP Contour Tree Computation.
// Hamish Carr, Gunther Weber, Christopher Sewell, and James Ahrens.
// Proceedings of the IEEE Symposium on Large Data Analysis and Visualization
// (LDAV), October 2016, Baltimore, Maryland.
//
// The PPP2 algorithm and software were jointly developed by
// Hamish Carr (University of Leeds), Gunther H. Weber (LBNL), and
// Oliver Ruebel (LBNL)
//==============================================================================
#ifndef vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_update_combined_neighbours_worklet_h
#define vtkm_worklet_contourtree_augmented_contourtree_mesh_inc_update_combined_neighbours_worklet_h
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/worklet/contourtree_augmented/Types.h>
namespace vtkm
{
namespace worklet
{
namespace contourtree_augmented
{
namespace mesh_dem_contourtree_mesh_inc
{
class UpdateCombinedNeighboursWorklet : public vtkm::worklet::WorkletMapField
{
public:
typedef void ControlSignature(
WholeArrayIn firstNeighbour, // (input) this->firstNerighbour or other.firstNeighbour
WholeArrayIn neighbours, // (input) this->neighbours or other.neighbours array
WholeArrayIn
toCombinedSortOrder, // (input) thisToCombinedSortOrder or otherToCombinedSortOrder array
WholeArrayIn combinedFirstNeighbour, // (input) combinedFirstNeighbour array in both cases
WholeArrayIn
combinedOtherStartIndex, // (input) const 0 array of length combinedOtherStartIndex for this and combinedOtherStartIndex for other loop
WholeArrayOut combinedNeighbours); // (output) combinedNeighbours array in both cases
typedef void ExecutionSignature(_1, InputIndex, _2, _3, _4, _5, _6);
typedef _1 InputDomain;
// Default Constructor
VTKM_EXEC_CONT
UpdateCombinedNeighboursWorklet() {}
template <typename InFieldPortalType, typename InFieldPortalType2, typename OutFieldPortalType>
VTKM_EXEC void operator()(
const InFieldPortalType& firstNeighbourPortal,
const vtkm::Id vtx,
const InFieldPortalType& neighboursPortal,
const InFieldPortalType& toCombinedSortOrderPortal,
const InFieldPortalType& combinedFirstNeighbourPortal,
const InFieldPortalType2&
combinedOtherStartIndexPortal, // We need another InFieldPortalType here to allow us to hand in a smart array handle instead of a VTKM array
const OutFieldPortalType& combinedNeighboursPortal) const
{
vtkm::Id totalNumNeighbours = neighboursPortal.GetNumberOfValues();
vtkm::Id totalNumVertices = firstNeighbourPortal.GetNumberOfValues();
vtkm::Id numNeighbours = (vtx < totalNumVertices - 1)
? firstNeighbourPortal.Get(vtx + 1) - firstNeighbourPortal.Get(vtx)
: totalNumNeighbours - firstNeighbourPortal.Get(vtx);
for (vtkm::Id nbrNo = 0; nbrNo < numNeighbours; ++nbrNo)
{
combinedNeighboursPortal.Set(
combinedFirstNeighbourPortal.Get(toCombinedSortOrderPortal.Get(vtx)) +
combinedOtherStartIndexPortal.Get(toCombinedSortOrderPortal.Get(vtx)) + nbrNo,
toCombinedSortOrderPortal.Get(neighboursPortal.Get(firstNeighbourPortal.Get(vtx) + nbrNo)));
}
/*
This worklet implemnts the following two loops from the original OpenMP code
The two loops are the same but the arrays required are different
#pragma omp parallel for
for (indexVector::size_type vtx = 0; vtx < firstNeighbour.size(); ++vtx)
{
indexType numNeighbours = (vtx < GetNumberOfVertices() - 1) ? firstNeighbour[vtx+1] - firstNeighbour[vtx] : neighbours.size() - firstNeighbour[vtx];
for (indexType nbrNo = 0; nbrNo < numNeighbours; ++nbrNo)
{
combinedNeighbours[combinedFirstNeighbour[thisToCombinedSortOrder[vtx]] + nbrNo] = thisToCombinedSortOrder[neighbours[firstNeighbour[vtx] + nbrNo]];
}
}
#pragma omp parallel for
for (indexVector::size_type vtx = 0; vtx < other.firstNeighbour.size(); ++vtx)
{
indexType numNeighbours = (vtx < other.GetNumberOfVertices() - 1) ? other.firstNeighbour[vtx+1] - other.firstNeighbour[vtx] : other.neighbours.size() - other.firstNeighbour[vtx];
for (indexType nbrNo = 0; nbrNo < numNeighbours; ++nbrNo)
{
combinedNeighbours[combinedFirstNeighbour[otherToCombinedSortOrder[vtx]] + combinedOtherStartIndex[otherToCombinedSortOrder[vtx]] + nbrNo] = otherToCombinedSortOrder[other.neighbours[other.firstNeighbour[vtx] + nbrNo]];
}
}
*/
}
}; // AdditionAssignWorklet
} // namespace mesh_dem_contourtree_mesh_inc
} // namespace contourtree_augmented
} // namespace worklet
} // namespace vtkm
#endif
|
convolution_1x1_pack4to1_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack4to1_bf16s_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch)
{
// interleave
// src = inch-outch
// dst = 4a-inch/4a-outch
#if __aarch64__
kernel_tm_pack4.create(8, inch / 4, outch / 8 + (outch % 8) / 4 + outch % 4, (size_t)2u * 4, 4);
#else
kernel_tm_pack4.create(4, inch / 4, outch / 4 + outch % 4, (size_t)2u * 4, 4);
#endif
int p = 0;
#if __aarch64__
for (; p + 7 < outch; p += 8)
{
const float* k0 = (const float*)kernel + (p + 0) * inch;
const float* k1 = (const float*)kernel + (p + 1) * inch;
const float* k2 = (const float*)kernel + (p + 2) * inch;
const float* k3 = (const float*)kernel + (p + 3) * inch;
const float* k4 = (const float*)kernel + (p + 4) * inch;
const float* k5 = (const float*)kernel + (p + 5) * inch;
const float* k6 = (const float*)kernel + (p + 6) * inch;
const float* k7 = (const float*)kernel + (p + 7) * inch;
unsigned short* ktmp = kernel_tm_pack4.channel(p / 8);
for (int q = 0; q + 3 < inch; q += 4)
{
ktmp[0] = float32_to_bfloat16(k0[0]);
ktmp[1] = float32_to_bfloat16(k1[0]);
ktmp[2] = float32_to_bfloat16(k2[0]);
ktmp[3] = float32_to_bfloat16(k3[0]);
ktmp[4] = float32_to_bfloat16(k4[0]);
ktmp[5] = float32_to_bfloat16(k5[0]);
ktmp[6] = float32_to_bfloat16(k6[0]);
ktmp[7] = float32_to_bfloat16(k7[0]);
ktmp[8] = float32_to_bfloat16(k0[1]);
ktmp[9] = float32_to_bfloat16(k1[1]);
ktmp[10] = float32_to_bfloat16(k2[1]);
ktmp[11] = float32_to_bfloat16(k3[1]);
ktmp[12] = float32_to_bfloat16(k4[1]);
ktmp[13] = float32_to_bfloat16(k5[1]);
ktmp[14] = float32_to_bfloat16(k6[1]);
ktmp[15] = float32_to_bfloat16(k7[1]);
ktmp[16] = float32_to_bfloat16(k0[2]);
ktmp[17] = float32_to_bfloat16(k1[2]);
ktmp[18] = float32_to_bfloat16(k2[2]);
ktmp[19] = float32_to_bfloat16(k3[2]);
ktmp[20] = float32_to_bfloat16(k4[2]);
ktmp[21] = float32_to_bfloat16(k5[2]);
ktmp[22] = float32_to_bfloat16(k6[2]);
ktmp[23] = float32_to_bfloat16(k7[2]);
ktmp[24] = float32_to_bfloat16(k0[3]);
ktmp[25] = float32_to_bfloat16(k1[3]);
ktmp[26] = float32_to_bfloat16(k2[3]);
ktmp[27] = float32_to_bfloat16(k3[3]);
ktmp[28] = float32_to_bfloat16(k4[3]);
ktmp[29] = float32_to_bfloat16(k5[3]);
ktmp[30] = float32_to_bfloat16(k6[3]);
ktmp[31] = float32_to_bfloat16(k7[3]);
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
k4 += 4;
k5 += 4;
k6 += 4;
k7 += 4;
ktmp += 32;
}
}
#endif
for (; p + 3 < outch; p += 4)
{
const float* k0 = (const float*)kernel + (p + 0) * inch;
const float* k1 = (const float*)kernel + (p + 1) * inch;
const float* k2 = (const float*)kernel + (p + 2) * inch;
const float* k3 = (const float*)kernel + (p + 3) * inch;
#if __aarch64__
unsigned short* ktmp = kernel_tm_pack4.channel(p / 8 + (p % 8) / 4);
#else
unsigned short* ktmp = kernel_tm_pack4.channel(p / 4);
#endif
for (int q = 0; q + 3 < inch; q += 4)
{
ktmp[0] = float32_to_bfloat16(k0[0]);
ktmp[1] = float32_to_bfloat16(k1[0]);
ktmp[2] = float32_to_bfloat16(k2[0]);
ktmp[3] = float32_to_bfloat16(k3[0]);
ktmp[4] = float32_to_bfloat16(k0[1]);
ktmp[5] = float32_to_bfloat16(k1[1]);
ktmp[6] = float32_to_bfloat16(k2[1]);
ktmp[7] = float32_to_bfloat16(k3[1]);
ktmp[8] = float32_to_bfloat16(k0[2]);
ktmp[9] = float32_to_bfloat16(k1[2]);
ktmp[10] = float32_to_bfloat16(k2[2]);
ktmp[11] = float32_to_bfloat16(k3[2]);
ktmp[12] = float32_to_bfloat16(k0[3]);
ktmp[13] = float32_to_bfloat16(k1[3]);
ktmp[14] = float32_to_bfloat16(k2[3]);
ktmp[15] = float32_to_bfloat16(k3[3]);
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
ktmp += 16;
}
}
for (; p < outch; p++)
{
const float* k0 = (const float*)kernel + p * inch;
#if __aarch64__
unsigned short* ktmp = kernel_tm_pack4.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
unsigned short* ktmp = kernel_tm_pack4.channel(p / 4 + p % 4);
#endif
for (int q = 0; q + 3 < inch; q += 4)
{
ktmp[0] = float32_to_bfloat16(k0[0]);
ktmp[1] = float32_to_bfloat16(k0[1]);
ktmp[2] = float32_to_bfloat16(k0[2]);
ktmp[3] = float32_to_bfloat16(k0[3]);
k0 += 4;
ktmp += 4;
}
}
}
static void conv1x1s1_sgemm_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp;
#if __aarch64__
if (size >= 12)
tmp.create(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + size % 12 % 4, elemsize, elempack, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8, inch, size / 8 + (size % 8) / 4 + size % 4, elemsize, elempack, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4, inch, size / 4 + size % 4, elemsize, elempack, opt.workspace_allocator);
else // if (size >= 1)
tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator);
#else
if (size >= 8)
tmp.create(8, inch, size / 8 + (size % 8) / 4 + size % 4, elemsize, elempack, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4, inch, size / 4 + size % 4, elemsize, elempack, opt.workspace_allocator);
else // if (size >= 1)
tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator);
#endif
{
int nn_size;
int remain_size_start;
#if __aarch64__
nn_size = size / 12;
remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 12;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i * 4;
unsigned short* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
// transpose 4x12
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.4h, v5.4h, v6.4h, v7.4h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
"st1 {v4.4h}, [%1], #8 \n"
"st1 {v1.8h}, [%1], #16 \n"
"st1 {v5.4h}, [%1], #8 \n"
"sub %0, %0, #64 \n"
"st1 {v2.8h}, [%1], #16 \n"
"st1 {v6.4h}, [%1], #8 \n"
"st1 {v3.8h}, [%1], #16 \n"
"st1 {v7.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
img0 += bottom_blob.cstep * 4;
}
}
#else
remain_size_start = 0;
#endif
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
#else
unsigned short* tmpptr = tmp.channel(i / 8);
#endif
for (int q = 0; q < inch; q++)
{
// transpose 4x8
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.u16 {d0-d3}, [%0]! \n"
"pld [%0, #256] \n"
"vld4.u16 {d4-d7}, [%0] \n"
"sub %0, %0, #32 \n"
"vst1.u16 {d0}, [%1 :64]! \n"
"vst1.u16 {d4}, [%1 :64]! \n"
"vst1.u16 {d1}, [%1 :64]! \n"
"vst1.u16 {d5}, [%1 :64]! \n"
"vst1.u16 {d2}, [%1 :64]! \n"
"vst1.u16 {d6}, [%1 :64]! \n"
"vst1.u16 {d3}, [%1 :64]! \n"
"vst1.u16 {d7}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#endif
for (int q = 0; q < inch; q++)
{
// transpose 4x4
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld4 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0] \n"
"st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.u16 {d0-d3}, [%0 :128] \n"
"vst1.u16 {d0-d3}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const unsigned short* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#endif
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.4h}, [%0] \n"
"st1 {v0.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #64] \n"
"vld1.u16 {d0}, [%0 :64] \n"
"vst1.u16 {d0}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
#if __aarch64__
nn_outch = outch >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
unsigned short* outptr0 = top_blob.channel(p);
unsigned short* outptr1 = top_blob.channel(p + 1);
unsigned short* outptr2 = top_blob.channel(p + 2);
unsigned short* outptr3 = top_blob.channel(p + 3);
unsigned short* outptr4 = top_blob.channel(p + 4);
unsigned short* outptr5 = top_blob.channel(p + 5);
unsigned short* outptr6 = top_blob.channel(p + 6);
unsigned short* outptr7 = top_blob.channel(p + 7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
unsigned short* tmpptr = tmp.channel(i / 12);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v30.4s, v31.4s}, [%22] \n"
"dup v8.4s, v30.s[0] \n"
"dup v9.4s, v30.s[0] \n"
"dup v10.4s, v30.s[0] \n"
"dup v11.4s, v30.s[1] \n"
"dup v12.4s, v30.s[1] \n"
"dup v13.4s, v30.s[1] \n"
"dup v14.4s, v30.s[2] \n"
"dup v15.4s, v30.s[2] \n"
"dup v16.4s, v30.s[2] \n"
"dup v17.4s, v30.s[3] \n"
"dup v18.4s, v30.s[3] \n"
"dup v19.4s, v30.s[3] \n"
"dup v20.4s, v31.s[0] \n"
"dup v21.4s, v31.s[0] \n"
"dup v22.4s, v31.s[0] \n"
"dup v23.4s, v31.s[1] \n"
"dup v24.4s, v31.s[1] \n"
"dup v25.4s, v31.s[1] \n"
"dup v26.4s, v31.s[2] \n"
"dup v27.4s, v31.s[2] \n"
"dup v28.4s, v31.s[2] \n"
"dup v29.4s, v31.s[3] \n"
"dup v30.4s, v31.s[3] \n"
"dup v31.4s, v31.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v23.4s, v0.4s, v5.s[1] \n"
"fmla v26.4s, v0.4s, v5.s[2] \n"
"fmla v29.4s, v0.4s, v5.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v21.4s, v1.4s, v5.s[0] \n"
"fmla v24.4s, v1.4s, v5.s[1] \n"
"fmla v27.4s, v1.4s, v5.s[2] \n"
"fmla v30.4s, v1.4s, v5.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"fmla v22.4s, v2.4s, v5.s[0] \n"
"fmla v25.4s, v2.4s, v5.s[1] \n"
"fmla v28.4s, v2.4s, v5.s[2] \n"
"fmla v31.4s, v2.4s, v5.s[3] \n"
"fmla v8.4s, v3.4s, v6.s[0] \n"
"fmla v11.4s, v3.4s, v6.s[1] \n"
"fmla v14.4s, v3.4s, v6.s[2] \n"
"fmla v17.4s, v3.4s, v6.s[3] \n"
"fmla v20.4s, v3.4s, v7.s[0] \n"
"fmla v23.4s, v3.4s, v7.s[1] \n"
"fmla v26.4s, v3.4s, v7.s[2] \n"
"fmla v29.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v9.4s, v0.4s, v6.s[0] \n"
"fmla v12.4s, v0.4s, v6.s[1] \n"
"fmla v15.4s, v0.4s, v6.s[2] \n"
"fmla v18.4s, v0.4s, v6.s[3] \n"
"fmla v21.4s, v0.4s, v7.s[0] \n"
"fmla v24.4s, v0.4s, v7.s[1] \n"
"fmla v27.4s, v0.4s, v7.s[2] \n"
"fmla v30.4s, v0.4s, v7.s[3] \n"
"fmla v10.4s, v1.4s, v6.s[0] \n"
"fmla v13.4s, v1.4s, v6.s[1] \n"
"fmla v16.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v22.4s, v1.4s, v7.s[0] \n"
"fmla v25.4s, v1.4s, v7.s[1] \n"
"fmla v28.4s, v1.4s, v7.s[2] \n"
"fmla v31.4s, v1.4s, v7.s[3] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v2.4s, v4.s[0] \n"
"fmla v11.4s, v2.4s, v4.s[1] \n"
"fmla v14.4s, v2.4s, v4.s[2] \n"
"fmla v17.4s, v2.4s, v4.s[3] \n"
"fmla v20.4s, v2.4s, v5.s[0] \n"
"fmla v23.4s, v2.4s, v5.s[1] \n"
"fmla v26.4s, v2.4s, v5.s[2] \n"
"fmla v29.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v4.s[0] \n"
"fmla v12.4s, v3.4s, v4.s[1] \n"
"fmla v15.4s, v3.4s, v4.s[2] \n"
"fmla v18.4s, v3.4s, v4.s[3] \n"
"fmla v21.4s, v3.4s, v5.s[0] \n"
"fmla v24.4s, v3.4s, v5.s[1] \n"
"fmla v27.4s, v3.4s, v5.s[2] \n"
"fmla v30.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v10.4s, v0.4s, v4.s[0] \n"
"fmla v13.4s, v0.4s, v4.s[1] \n"
"fmla v16.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v22.4s, v0.4s, v5.s[0] \n"
"fmla v25.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v31.4s, v0.4s, v5.s[3] \n"
"fmla v8.4s, v1.4s, v6.s[0] \n"
"fmla v11.4s, v1.4s, v6.s[1] \n"
"fmla v14.4s, v1.4s, v6.s[2] \n"
"fmla v17.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v23.4s, v1.4s, v7.s[1] \n"
"fmla v26.4s, v1.4s, v7.s[2] \n"
"fmla v29.4s, v1.4s, v7.s[3] \n"
"fmla v9.4s, v2.4s, v6.s[0] \n"
"fmla v12.4s, v2.4s, v6.s[1] \n"
"fmla v15.4s, v2.4s, v6.s[2] \n"
"fmla v18.4s, v2.4s, v6.s[3] \n"
"fmla v21.4s, v2.4s, v7.s[0] \n"
"fmla v24.4s, v2.4s, v7.s[1] \n"
"fmla v27.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v3.4s, v6.s[0] \n"
"fmla v13.4s, v3.4s, v6.s[1] \n"
"fmla v16.4s, v3.4s, v6.s[2] \n"
"fmla v19.4s, v3.4s, v6.s[3] \n"
"fmla v22.4s, v3.4s, v7.s[0] \n"
"fmla v25.4s, v3.4s, v7.s[1] \n"
"fmla v28.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n"
"st1 {v11.4h, v12.4h, v13.4h}, [%2], #24 \n"
"st1 {v14.4h, v15.4h, v16.4h}, [%3], #24 \n"
"st1 {v17.4h, v18.4h, v19.4h}, [%4], #24 \n"
"st1 {v20.4h, v21.4h, v22.4h}, [%5], #24 \n"
"st1 {v23.4h, v24.4h, v25.4h}, [%6], #24 \n"
"st1 {v26.4h, v27.4h, v28.4h}, [%7], #24 \n"
"st1 {v29.4h, v30.4h, v31.4h}, [%8], #24 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < size; i += 8)
{
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v30.4s, v31.4s}, [%22] \n"
"dup v16.4s, v30.s[0] \n"
"dup v17.4s, v30.s[0] \n"
"dup v18.4s, v30.s[1] \n"
"dup v19.4s, v30.s[1] \n"
"dup v20.4s, v30.s[2] \n"
"dup v21.4s, v30.s[2] \n"
"dup v22.4s, v30.s[3] \n"
"dup v23.4s, v30.s[3] \n"
"dup v24.4s, v31.s[0] \n"
"dup v25.4s, v31.s[0] \n"
"dup v26.4s, v31.s[1] \n"
"dup v27.4s, v31.s[1] \n"
"dup v28.4s, v31.s[2] \n"
"dup v29.4s, v31.s[2] \n"
"dup v30.4s, v31.s[3] \n"
"dup v31.4s, v31.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v18.4s, v0.4s, v4.s[1] \n"
"fmla v20.4s, v0.4s, v4.s[2] \n"
"fmla v22.4s, v0.4s, v4.s[3] \n"
"fmla v24.4s, v0.4s, v5.s[0] \n"
"fmla v26.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v30.4s, v0.4s, v5.s[3] \n"
"fmla v17.4s, v1.4s, v4.s[0] \n"
"fmla v19.4s, v1.4s, v4.s[1] \n"
"fmla v21.4s, v1.4s, v4.s[2] \n"
"fmla v23.4s, v1.4s, v4.s[3] \n"
"fmla v25.4s, v1.4s, v5.s[0] \n"
"fmla v27.4s, v1.4s, v5.s[1] \n"
"fmla v29.4s, v1.4s, v5.s[2] \n"
"fmla v31.4s, v1.4s, v5.s[3] \n"
"fmla v16.4s, v2.4s, v6.s[0] \n"
"fmla v18.4s, v2.4s, v6.s[1] \n"
"fmla v20.4s, v2.4s, v6.s[2] \n"
"fmla v22.4s, v2.4s, v6.s[3] \n"
"fmla v24.4s, v2.4s, v7.s[0] \n"
"fmla v26.4s, v2.4s, v7.s[1] \n"
"fmla v28.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"fmla v17.4s, v3.4s, v6.s[0] \n"
"fmla v19.4s, v3.4s, v6.s[1] \n"
"fmla v21.4s, v3.4s, v6.s[2] \n"
"fmla v23.4s, v3.4s, v6.s[3] \n"
"fmla v25.4s, v3.4s, v7.s[0] \n"
"fmla v27.4s, v3.4s, v7.s[1] \n"
"fmla v29.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v12.4s, v8.s[0] \n"
"fmla v18.4s, v12.4s, v8.s[1] \n"
"fmla v20.4s, v12.4s, v8.s[2] \n"
"fmla v22.4s, v12.4s, v8.s[3] \n"
"fmla v24.4s, v12.4s, v9.s[0] \n"
"fmla v26.4s, v12.4s, v9.s[1] \n"
"fmla v28.4s, v12.4s, v9.s[2] \n"
"fmla v30.4s, v12.4s, v9.s[3] \n"
"fmla v17.4s, v13.4s, v8.s[0] \n"
"fmla v19.4s, v13.4s, v8.s[1] \n"
"fmla v21.4s, v13.4s, v8.s[2] \n"
"fmla v23.4s, v13.4s, v8.s[3] \n"
"fmla v25.4s, v13.4s, v9.s[0] \n"
"fmla v27.4s, v13.4s, v9.s[1] \n"
"fmla v29.4s, v13.4s, v9.s[2] \n"
"fmla v31.4s, v13.4s, v9.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v10.s[0] \n"
"fmla v18.4s, v14.4s, v10.s[1] \n"
"fmla v20.4s, v14.4s, v10.s[2] \n"
"fmla v22.4s, v14.4s, v10.s[3] \n"
"fmla v24.4s, v14.4s, v11.s[0] \n"
"fmla v26.4s, v14.4s, v11.s[1] \n"
"fmla v28.4s, v14.4s, v11.s[2] \n"
"fmla v30.4s, v14.4s, v11.s[3] \n"
"fmla v17.4s, v15.4s, v10.s[0] \n"
"fmla v19.4s, v15.4s, v10.s[1] \n"
"fmla v21.4s, v15.4s, v10.s[2] \n"
"fmla v23.4s, v15.4s, v10.s[3] \n"
"fmla v25.4s, v15.4s, v11.s[0] \n"
"fmla v27.4s, v15.4s, v11.s[1] \n"
"fmla v29.4s, v15.4s, v11.s[2] \n"
"fmla v31.4s, v15.4s, v11.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v16.4h, v17.4h}, [%1], #16 \n"
"st1 {v18.4h, v19.4h}, [%2], #16 \n"
"st1 {v20.4h, v21.4h}, [%3], #16 \n"
"st1 {v22.4h, v23.4h}, [%4], #16 \n"
"st1 {v24.4h, v25.4h}, [%5], #16 \n"
"st1 {v26.4h, v27.4h}, [%6], #16 \n"
"st1 {v28.4h, v29.4h}, [%7], #16 \n"
"st1 {v30.4h, v31.4h}, [%8], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v22.4s, v23.4s}, [%22] \n"
"dup v16.4s, v22.s[0] \n"
"dup v17.4s, v22.s[1] \n"
"dup v18.4s, v22.s[2] \n"
"dup v19.4s, v22.s[3] \n"
"dup v20.4s, v23.s[0] \n"
"dup v21.4s, v23.s[1] \n"
"dup v22.4s, v23.s[2] \n"
"dup v23.4s, v23.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v17.4s, v0.4s, v4.s[1] \n"
"fmla v18.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v21.4s, v0.4s, v5.s[1] \n"
"fmla v22.4s, v0.4s, v5.s[2] \n"
"fmla v23.4s, v0.4s, v5.s[3] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v1.4s, v6.s[0] \n"
"fmla v17.4s, v1.4s, v6.s[1] \n"
"fmla v18.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v21.4s, v1.4s, v7.s[1] \n"
"fmla v22.4s, v1.4s, v7.s[2] \n"
"fmla v23.4s, v1.4s, v7.s[3] \n"
"fmla v16.4s, v2.4s, v8.s[0] \n"
"fmla v17.4s, v2.4s, v8.s[1] \n"
"fmla v18.4s, v2.4s, v8.s[2] \n"
"fmla v19.4s, v2.4s, v8.s[3] \n"
"fmla v20.4s, v2.4s, v9.s[0] \n"
"fmla v21.4s, v2.4s, v9.s[1] \n"
"fmla v22.4s, v2.4s, v9.s[2] \n"
"fmla v23.4s, v2.4s, v9.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v3.4s, v10.s[0] \n"
"fmla v17.4s, v3.4s, v10.s[1] \n"
"fmla v18.4s, v3.4s, v10.s[2] \n"
"fmla v19.4s, v3.4s, v10.s[3] \n"
"fmla v20.4s, v3.4s, v11.s[0] \n"
"fmla v21.4s, v3.4s, v11.s[1] \n"
"fmla v22.4s, v3.4s, v11.s[2] \n"
"fmla v23.4s, v3.4s, v11.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v16.4h}, [%1], #8 \n"
"st1 {v17.4h}, [%2], #8 \n"
"st1 {v18.4h}, [%3], #8 \n"
"st1 {v19.4h}, [%4], #8 \n"
"st1 {v20.4h}, [%5], #8 \n"
"st1 {v21.4h}, [%6], #8 \n"
"st1 {v22.4h}, [%7], #8 \n"
"st1 {v23.4h}, [%8], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i < size; i++)
{
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v16.4s, v17.4s}, [%22] \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #64] \n"
"ld1 {v0.4h}, [%9], #8 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v5.4s, v0.s[0] \n"
"fmla v18.4s, v6.4s, v0.s[1] \n"
"fmla v19.4s, v7.4s, v0.s[1] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[2] \n"
"fmla v17.4s, v9.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v18.4s, v10.4s, v0.s[3] \n"
"fmla v19.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"st1 {v16.h}[0], [%1], #2 \n"
"st1 {v16.h}[1], [%2], #2 \n"
"st1 {v16.h}[2], [%3], #2 \n"
"st1 {v16.h}[3], [%4], #2 \n"
"st1 {v17.h}[0], [%5], #2 \n"
"st1 {v17.h}[1], [%6], #2 \n"
"st1 {v17.h}[2], [%7], #2 \n"
"st1 {v17.h}[3], [%8], #2 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19");
}
}
remain_outch_start += nn_outch << 3;
nn_outch = (outch - remain_outch_start) >> 2;
#else // __aarch64__
nn_outch = outch >> 2;
#endif // __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
unsigned short* outptr0 = top_blob.channel(p);
unsigned short* outptr1 = top_blob.channel(p + 1);
unsigned short* outptr2 = top_blob.channel(p + 2);
unsigned short* outptr3 = top_blob.channel(p + 3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
#if __aarch64__
for (; i + 11 < size; i += 12)
{
unsigned short* tmpptr = tmp.channel(i / 12);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v19.4s}, [%14] \n"
"dup v8.4s, v19.s[0] \n"
"dup v9.4s, v19.s[0] \n"
"dup v10.4s, v19.s[0] \n"
"dup v11.4s, v19.s[1] \n"
"dup v12.4s, v19.s[1] \n"
"dup v13.4s, v19.s[1] \n"
"dup v14.4s, v19.s[2] \n"
"dup v15.4s, v19.s[2] \n"
"dup v16.4s, v19.s[2] \n"
"dup v17.4s, v19.s[3] \n"
"dup v18.4s, v19.s[3] \n"
"dup v19.4s, v19.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5], #32 \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v8.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v14.4s, v3.4s, v5.s[2] \n"
"fmla v17.4s, v3.4s, v5.s[3] \n"
"fmla v9.4s, v20.4s, v5.s[0] \n"
"fmla v12.4s, v20.4s, v5.s[1] \n"
"fmla v15.4s, v20.4s, v5.s[2] \n"
"fmla v18.4s, v20.4s, v5.s[3] \n"
"fmla v10.4s, v21.4s, v5.s[0] \n"
"fmla v13.4s, v21.4s, v5.s[1] \n"
"fmla v16.4s, v21.4s, v5.s[2] \n"
"fmla v19.4s, v21.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%5], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v8.4s, v22.4s, v6.s[0] \n"
"fmla v11.4s, v22.4s, v6.s[1] \n"
"fmla v14.4s, v22.4s, v6.s[2] \n"
"fmla v17.4s, v22.4s, v6.s[3] \n"
"fmla v9.4s, v23.4s, v6.s[0] \n"
"fmla v12.4s, v23.4s, v6.s[1] \n"
"fmla v15.4s, v23.4s, v6.s[2] \n"
"fmla v18.4s, v23.4s, v6.s[3] \n"
"fmla v10.4s, v24.4s, v6.s[0] \n"
"fmla v13.4s, v24.4s, v6.s[1] \n"
"fmla v16.4s, v24.4s, v6.s[2] \n"
"fmla v19.4s, v24.4s, v6.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v25.4s, v7.s[0] \n"
"fmla v11.4s, v25.4s, v7.s[1] \n"
"fmla v14.4s, v25.4s, v7.s[2] \n"
"fmla v17.4s, v25.4s, v7.s[3] \n"
"fmla v9.4s, v26.4s, v7.s[0] \n"
"fmla v12.4s, v26.4s, v7.s[1] \n"
"fmla v15.4s, v26.4s, v7.s[2] \n"
"fmla v18.4s, v26.4s, v7.s[3] \n"
"fmla v10.4s, v27.4s, v7.s[0] \n"
"fmla v13.4s, v27.4s, v7.s[1] \n"
"fmla v16.4s, v27.4s, v7.s[2] \n"
"fmla v19.4s, v27.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n"
"st1 {v11.4h, v12.4h, v13.4h}, [%2], #24 \n"
"st1 {v14.4h, v15.4h, v16.4h}, [%3], #24 \n"
"st1 {v17.4h, v18.4h, v19.4h}, [%4], #24 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif // __aarch64__
for (; i + 7 < size; i += 8)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v15.4s}, [%14] \n"
"dup v8.4s, v15.s[0] \n"
"dup v9.4s, v15.s[0] \n"
"dup v10.4s, v15.s[1] \n"
"dup v11.4s, v15.s[1] \n"
"dup v12.4s, v15.s[2] \n"
"dup v13.4s, v15.s[2] \n"
"dup v14.4s, v15.s[3] \n"
"dup v15.4s, v15.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v10.4s, v0.4s, v4.s[1] \n"
"fmla v12.4s, v0.4s, v4.s[2] \n"
"fmla v14.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v11.4s, v1.4s, v4.s[1] \n"
"fmla v13.4s, v1.4s, v4.s[2] \n"
"fmla v15.4s, v1.4s, v4.s[3] \n"
"fmla v8.4s, v2.4s, v5.s[0] \n"
"fmla v10.4s, v2.4s, v5.s[1] \n"
"fmla v12.4s, v2.4s, v5.s[2] \n"
"fmla v14.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v13.4s, v3.4s, v5.s[2] \n"
"fmla v15.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v8.4s, v16.4s, v6.s[0] \n"
"fmla v10.4s, v16.4s, v6.s[1] \n"
"fmla v12.4s, v16.4s, v6.s[2] \n"
"fmla v14.4s, v16.4s, v6.s[3] \n"
"fmla v9.4s, v17.4s, v6.s[0] \n"
"fmla v11.4s, v17.4s, v6.s[1] \n"
"fmla v13.4s, v17.4s, v6.s[2] \n"
"fmla v15.4s, v17.4s, v6.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v18.4s, v7.s[0] \n"
"fmla v10.4s, v18.4s, v7.s[1] \n"
"fmla v12.4s, v18.4s, v7.s[2] \n"
"fmla v14.4s, v18.4s, v7.s[3] \n"
"fmla v9.4s, v19.4s, v7.s[0] \n"
"fmla v11.4s, v19.4s, v7.s[1] \n"
"fmla v13.4s, v19.4s, v7.s[2] \n"
"fmla v15.4s, v19.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"st1 {v8.4h, v9.4h}, [%1], #16 \n"
"st1 {v10.4h, v11.4h}, [%2], #16 \n"
"st1 {v12.4h, v13.4h}, [%3], #16 \n"
"st1 {v14.4h, v15.4h}, [%4], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
#else // __aarch64__
asm volatile(
"vld1.f32 {d30-d31}, [%14] \n"
"vdup.f32 q8, d30[0] \n"
"vdup.f32 q9, d30[0] \n"
"vdup.f32 q10, d30[1] \n"
"vdup.f32 q11, d30[1] \n"
"vdup.f32 q12, d31[0] \n"
"vdup.f32 q13, d31[0] \n"
"vdup.f32 q14, d31[1] \n"
"vdup.f32 q15, d31[1] \n"
"0: \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5]! \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q10, q0, d8[1] \n"
"vmla.f32 q12, q0, d9[0] \n"
"vmla.f32 q14, q0, d9[1] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q11, q1, d8[1] \n"
"vmla.f32 q13, q1, d9[0] \n"
"vmla.f32 q15, q1, d9[1] \n"
"vmla.f32 q8, q2, d10[0] \n"
"vmla.f32 q10, q2, d10[1] \n"
"vmla.f32 q12, q2, d11[0] \n"
"vmla.f32 q14, q2, d11[1] \n"
"vmla.f32 q9, q3, d10[0] \n"
"vmla.f32 q11, q3, d10[1] \n"
"vmla.f32 q13, q3, d11[0] \n"
"vmla.f32 q15, q3, d11[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q8, q0, d12[0] \n"
"vmla.f32 q10, q0, d12[1] \n"
"vmla.f32 q12, q0, d13[0] \n"
"vmla.f32 q14, q0, d13[1] \n"
"vmla.f32 q9, q1, d12[0] \n"
"vmla.f32 q11, q1, d12[1] \n"
"vmla.f32 q13, q1, d13[0] \n"
"vmla.f32 q15, q1, d13[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d14[0] \n"
"vmla.f32 q10, q2, d14[1] \n"
"vmla.f32 q12, q2, d15[0] \n"
"vmla.f32 q14, q2, d15[1] \n"
"vmla.f32 q9, q3, d14[0] \n"
"vmla.f32 q11, q3, d14[1] \n"
"vmla.f32 q13, q3, d15[0] \n"
"vmla.f32 q15, q3, d15[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d21, q11, #16 \n"
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vshrn.u32 d28, q14, #16 \n"
"vshrn.u32 d29, q15, #16 \n"
"vst1.u16 {d16-d17}, [%1 :64]! \n"
"vst1.u16 {d20-d21}, [%2 :64]! \n"
"vst1.u16 {d24-d25}, [%3 :64]! \n"
"vst1.u16 {d28-d29}, [%4 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < size; i += 4)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v11.4s}, [%14] \n"
"dup v8.4s, v11.s[0] \n"
"dup v9.4s, v11.s[1] \n"
"dup v10.4s, v11.s[2] \n"
"dup v11.4s, v11.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v0.4s, v4.s[1] \n"
"fmla v10.4s, v0.4s, v4.s[2] \n"
"fmla v11.4s, v0.4s, v4.s[3] \n"
"fmla v8.4s, v1.4s, v5.s[0] \n"
"fmla v9.4s, v1.4s, v5.s[1] \n"
"fmla v10.4s, v1.4s, v5.s[2] \n"
"fmla v11.4s, v1.4s, v5.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v2.4s, v6.s[0] \n"
"fmla v9.4s, v2.4s, v6.s[1] \n"
"fmla v10.4s, v2.4s, v6.s[2] \n"
"fmla v11.4s, v2.4s, v6.s[3] \n"
"fmla v8.4s, v3.4s, v7.s[0] \n"
"fmla v9.4s, v3.4s, v7.s[1] \n"
"fmla v10.4s, v3.4s, v7.s[2] \n"
"fmla v11.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"st1 {v8.4h}, [%1], #8 \n"
"st1 {v9.4h}, [%2], #8 \n"
"st1 {v10.4h}, [%3], #8 \n"
"st1 {v11.4h}, [%4], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"vld1.f32 {d22-d23}, [%14] \n"
"vdup.f32 q8, d22[0] \n"
"vdup.f32 q9, d22[1] \n"
"vdup.f32 q10, d23[0] \n"
"vdup.f32 q11, d23[1] \n"
"0: \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5]! \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q0, d8[1] \n"
"vmla.f32 q10, q0, d9[0] \n"
"vmla.f32 q11, q0, d9[1] \n"
"vmla.f32 q8, q1, d10[0] \n"
"vmla.f32 q9, q1, d10[1] \n"
"vmla.f32 q10, q1, d11[0] \n"
"vmla.f32 q11, q1, d11[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d12[0] \n"
"vmla.f32 q9, q2, d12[1] \n"
"vmla.f32 q10, q2, d13[0] \n"
"vmla.f32 q11, q2, d13[1] \n"
"vmla.f32 q8, q3, d14[0] \n"
"vmla.f32 q9, q3, d14[1] \n"
"vmla.f32 q10, q3, d15[0] \n"
"vmla.f32 q11, q3, d15[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d18, q9, #16 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d22, q11, #16 \n"
"vst1.u16 {d16}, [%1 :64]! \n"
"vst1.u16 {d18}, [%2 :64]! \n"
"vst1.u16 {d20}, [%3 :64]! \n"
"vst1.u16 {d22}, [%4 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
for (; i < size; i++)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v8.4s}, [%14] \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v6.4s, v0.s[2] \n"
"fmla v11.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"st1 {v8.h}[0], [%1], #2 \n"
"st1 {v8.h}[1], [%2], #2 \n"
"st1 {v8.h}[2], [%3], #2 \n"
"st1 {v8.h}[3], [%4], #2 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"vld1.f32 {d16-d17}, [%14] \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%5, #64] \n"
"vld1.u16 {d1}, [%5]! \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6]! \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q7, d1[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vshrn.u32 d16, q8, #16 \n"
"vst1.u16 {d16[0]}, [%1]! \n"
"vst1.u16 {d16[1]}, [%2]! \n"
"vst1.u16 {d16[2]}, [%3]! \n"
"vst1.u16 {d16[3]}, [%4]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
unsigned short* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i = 0;
#if __aarch64__
for (; i + 11 < size; i += 12)
{
unsigned short* tmpptr = tmp.channel(i / 12);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
int nn = inch; // inch always > 0
asm volatile(
"dup v8.4s, %w8 \n"
"dup v9.4s, %w8 \n"
"dup v10.4s, %w8 \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%2], #32 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v5.4s, v3.4s, v4.s[1] \n"
"fmla v6.4s, v12.4s, v4.s[1] \n"
"fmla v7.4s, v13.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v8.4s, v14.4s, v4.s[2] \n"
"fmla v9.4s, v15.4s, v4.s[2] \n"
"fmla v10.4s, v16.4s, v4.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v5.4s, v17.4s, v4.s[3] \n"
"fmla v6.4s, v18.4s, v4.s[3] \n"
"fmla v7.4s, v19.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v5.4s \n"
"fadd v9.4s, v9.4s, v6.4s \n"
"fadd v10.4s, v10.4s, v7.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
#endif // __aarch64__
for (; i + 7 < size; i += 8)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4 + p % 4);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"dup v8.4s, %w8 \n"
"dup v9.4s, %w8 \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[1] \n"
"fmla v11.4s, v3.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%2], #32 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v8.4s, v12.4s, v4.s[2] \n"
"fmla v9.4s, v13.4s, v4.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v14.4s, v4.s[3] \n"
"fmla v11.4s, v15.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v9.4s, v9.4s, v11.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"st1 {v8.4h, v9.4h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %8 \n"
"vdup.f32 q9, %8 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"pld [%3, #64] \n"
"vld1.u16 {d9}, [%3]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q10, q2, d8[1] \n"
"vmla.f32 q11, q3, d8[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d28-d31}, [%2]! \n"
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q8, q12, d9[0] \n"
"vmla.f32 q9, q13, d9[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q10, q14, d9[1] \n"
"vmla.f32 q11, q15, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q9, q9, q11 \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vst1.u16 {d16-d17}, [%1 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < size; i += 4)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4 + p % 4);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"dup v8.4s, %w8 \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"st1 {v8.4h}, [%1], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"pld [%3, #64] \n"
"vld1.u16 {d9}, [%3]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vshrn.u32 d16, q8, #16 \n"
"vst1.u16 {d16}, [%1]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
for (; i < size; i++)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4 + p % 4);
#endif
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (int q = 0; q < inch; q++)
{
float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(tmpptr));
float32x4_t _k0 = vcvt_f32_bf16(vld1_u16(kptr));
_sum0 = vmlaq_f32(_sum0, _r0, _k0);
kptr += 4;
tmpptr += 4;
}
#if __aarch64__
float sum0 = vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss2 = vpadd_f32(_ss, _ss);
float sum0 = vget_lane_f32(_ss2, 0);
#endif
outptr0[0] = float32_to_bfloat16(bias0 + sum0);
outptr0++;
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const float bias0 = bias ? bias[p] : 0.f;
//
// unsigned short* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// float sum = bias0;
//
// const unsigned short* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const unsigned short* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s2_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const unsigned short* r0 = bottom_blob.channel(p);
unsigned short* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
uint16x4_t _v0 = vld1_u16(r0);
uint16x4_t _v1 = vld1_u16(r0 + 8);
uint16x4_t _v2 = vld1_u16(r0 + 16);
uint16x4_t _v3 = vld1_u16(r0 + 24);
uint16x8_t _v01 = vcombine_u16(_v0, _v1);
uint16x8_t _v23 = vcombine_u16(_v2, _v3);
vst1q_u16(outptr, _v01);
vst1q_u16(outptr + 8, _v23);
r0 += 32;
outptr += 16;
}
for (; j + 1 < outw; j += 2)
{
uint16x4_t _v0 = vld1_u16(r0);
uint16x4_t _v1 = vld1_u16(r0 + 8);
uint16x8_t _v = vcombine_u16(_v0, _v1);
vst1q_u16(outptr, _v);
r0 += 16;
outptr += 8;
}
for (; j < outw; j++)
{
uint16x4_t _v = vld1_u16(r0);
vst1_u16(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4to1_bf16s_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
atomic-15.c | // { dg-do run }
extern void abort (void);
int x = 6;
int
main ()
{
int v, l = 2, s = 1;
#pragma omp atomic
x = -3 + x;
#pragma omp atomic read
v = x;
if (v != 3)
abort ();
#pragma omp atomic update
x = 3 * 2 * 1 + x;
#pragma omp atomic read
v = x;
if (v != 9)
abort ();
#pragma omp atomic capture
v = x = x | 16;
if (v != 25)
abort ();
#pragma omp atomic capture
v = x = x + 14 * 2 / 4;
if (v != 32)
abort ();
#pragma omp atomic capture
v = x = 5 | x;
if (v != 37)
abort ();
#pragma omp atomic capture
v = x = 40 + 12 - 2 - 7 - x;
if (v != 6)
abort ();
#pragma omp atomic read
v = x;
if (v != 6)
abort ();
#pragma omp atomic capture
{ v = x; x = 3 + x; }
if (v != 6)
abort ();
#pragma omp atomic capture
{ v = x; x = -1 * -1 * -1 * -1 - x; }
if (v != 9)
abort ();
#pragma omp atomic read
v = x;
if (v != -8)
abort ();
#pragma omp atomic capture
{ x = 2 * 2 - x; v = x; }
if (v != 12)
abort ();
#pragma omp atomic capture
{ x = 7 & x; v = x; }
if (v != 4)
abort ();
#pragma omp atomic capture
{ v = x; x = 6; }
if (v != 4)
abort ();
#pragma omp atomic read
v = x;
if (v != 6)
abort ();
#pragma omp atomic capture
{ v = x; x = 7 * 8 + 23; }
if (v != 6)
abort ();
#pragma omp atomic read
v = x;
if (v != 79)
abort ();
#pragma omp atomic capture
{ v = x; x = 23 + 6 * 4; }
if (v != 79)
abort ();
#pragma omp atomic read
v = x;
if (v != 47)
abort ();
#pragma omp atomic capture
{ v = x; x = l ? 17 : 12; }
if (v != 47)
abort ();
#pragma omp atomic capture
{ v = x; x = l = s++ + 3; }
if (v != 17 || l != 4 || s != 2)
abort ();
#pragma omp atomic read
v = x;
if (v != 4)
abort ();
return 0;
}
|
examen_dynamic.c | #include <omp.h>
#include <stdio.h>
int main ()
{
int iam=0,np=1,i=0;
double start = omp_get_wtime();
#pragma omp parallel private(iam,np,i)
{
#if defined (_OPENMP)
np = omp_get_num_threads();
iam = omp_get_thread_num();
#endif
printf("Hello from thread %d out of %d \n",iam,np);
#pragma omp for schedule(dynamic)
for(i=0;i<(np*2);i++)
{
printf("Thread %d,contador %d\n",iam,i);
}
}
double end = omp_get_wtime();
printf("start time = %f\n",start);
printf("end time = %f\n",end);
printf("diff time = %f\n",end - start);
}
|
train_share_states.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TRAIN_SHARE_STATES_H_
#define LIGHTGBM_TRAIN_SHARE_STATES_H_
#include <LightGBM/bin.h>
#include <LightGBM/feature_group.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/threading.h>
#include <algorithm>
#include <memory>
#include <vector>
namespace LightGBM {
class MultiValBinWrapper {
public:
MultiValBinWrapper(MultiValBin* bin, data_size_t num_data,
const std::vector<int>& feature_groups_contained);
bool IsSparse() {
if (multi_val_bin_ != nullptr) {
return multi_val_bin_->IsSparse();
}
return false;
}
void InitTrain(const std::vector<int>& group_feature_start,
const std::vector<std::unique_ptr<FeatureGroup>>& feature_groups,
const std::vector<int8_t>& is_feature_used,
const data_size_t* bagging_use_indices,
data_size_t bagging_indices_cnt);
void HistMove(const std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>& hist_buf);
void HistMerge(std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>* hist_buf);
void ResizeHistBuf(std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>* hist_buf,
MultiValBin* sub_multi_val_bin,
hist_t* origin_hist_data);
template <bool USE_INDICES, bool ORDERED>
void ConstructHistograms(const data_size_t* data_indices,
data_size_t num_data,
const score_t* gradients,
const score_t* hessians,
std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>* hist_buf,
hist_t* origin_hist_data) {
const auto cur_multi_val_bin = (is_use_subcol_ || is_use_subrow_)
? multi_val_bin_subset_.get()
: multi_val_bin_.get();
if (cur_multi_val_bin != nullptr) {
global_timer.Start("Dataset::sparse_bin_histogram");
n_data_block_ = 1;
data_block_size_ = num_data;
Threading::BlockInfo<data_size_t>(num_threads_, num_data, min_block_size_,
&n_data_block_, &data_block_size_);
ResizeHistBuf(hist_buf, cur_multi_val_bin, origin_hist_data);
OMP_INIT_EX();
#pragma omp parallel for schedule(static) num_threads(num_threads_)
for (int block_id = 0; block_id < n_data_block_; ++block_id) {
OMP_LOOP_EX_BEGIN();
data_size_t start = block_id * data_block_size_;
data_size_t end = std::min<data_size_t>(start + data_block_size_, num_data);
ConstructHistogramsForBlock<USE_INDICES, ORDERED>(
cur_multi_val_bin, start, end, data_indices, gradients, hessians,
block_id, hist_buf);
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
global_timer.Stop("Dataset::sparse_bin_histogram");
global_timer.Start("Dataset::sparse_bin_histogram_merge");
HistMerge(hist_buf);
global_timer.Stop("Dataset::sparse_bin_histogram_merge");
global_timer.Start("Dataset::sparse_bin_histogram_move");
HistMove(*hist_buf);
global_timer.Stop("Dataset::sparse_bin_histogram_move");
}
}
template <bool USE_INDICES, bool ORDERED>
void ConstructHistogramsForBlock(const MultiValBin* sub_multi_val_bin,
data_size_t start, data_size_t end, const data_size_t* data_indices,
const score_t* gradients, const score_t* hessians, int block_id,
std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>>* hist_buf) {
hist_t* data_ptr = origin_hist_data_;
if (block_id == 0) {
if (is_use_subcol_) {
data_ptr = hist_buf->data() + hist_buf->size() - 2 * static_cast<size_t>(num_bin_aligned_);
}
} else {
data_ptr = hist_buf->data() +
static_cast<size_t>(num_bin_aligned_) * (block_id - 1) * 2;
}
std::memset(reinterpret_cast<void*>(data_ptr), 0, num_bin_ * kHistBufferEntrySize);
if (USE_INDICES) {
if (ORDERED) {
sub_multi_val_bin->ConstructHistogramOrdered(data_indices, start, end,
gradients, hessians, data_ptr);
} else {
sub_multi_val_bin->ConstructHistogram(data_indices, start, end, gradients,
hessians, data_ptr);
}
} else {
sub_multi_val_bin->ConstructHistogram(start, end, gradients, hessians,
data_ptr);
}
}
void CopyMultiValBinSubset(const std::vector<int>& group_feature_start,
const std::vector<std::unique_ptr<FeatureGroup>>& feature_groups,
const std::vector<int8_t>& is_feature_used,
const data_size_t* bagging_use_indices,
data_size_t bagging_indices_cnt);
void SetUseSubrow(bool is_use_subrow) {
is_use_subrow_ = is_use_subrow;
}
void SetSubrowCopied(bool is_subrow_copied) {
is_subrow_copied_ = is_subrow_copied;
}
private:
bool is_use_subcol_ = false;
bool is_use_subrow_ = false;
bool is_subrow_copied_ = false;
std::unique_ptr<MultiValBin> multi_val_bin_;
std::unique_ptr<MultiValBin> multi_val_bin_subset_;
std::vector<uint32_t> hist_move_src_;
std::vector<uint32_t> hist_move_dest_;
std::vector<uint32_t> hist_move_size_;
const std::vector<int> feature_groups_contained_;
int num_threads_;
int num_bin_;
int num_bin_aligned_;
int n_data_block_;
int data_block_size_;
int min_block_size_;
int num_data_;
hist_t* origin_hist_data_;
const size_t kHistBufferEntrySize = 2 * sizeof(hist_t);
};
struct TrainingShareStates {
int num_threads = 0;
bool is_col_wise = true;
bool is_constant_hessian = true;
const data_size_t* bagging_use_indices;
data_size_t bagging_indices_cnt;
TrainingShareStates() {
multi_val_bin_wrapper_.reset(nullptr);
}
int num_hist_total_bin() { return num_hist_total_bin_; }
const std::vector<uint32_t>& feature_hist_offsets() { return feature_hist_offsets_; }
bool IsSparseRowwise() {
return (multi_val_bin_wrapper_ != nullptr && multi_val_bin_wrapper_->IsSparse());
}
void SetMultiValBin(MultiValBin* bin, data_size_t num_data,
const std::vector<std::unique_ptr<FeatureGroup>>& feature_groups,
bool dense_only, bool sparse_only);
void CalcBinOffsets(const std::vector<std::unique_ptr<FeatureGroup>>& feature_groups,
std::vector<uint32_t>* offsets, bool is_col_wise);
void InitTrain(const std::vector<int>& group_feature_start,
const std::vector<std::unique_ptr<FeatureGroup>>& feature_groups,
const std::vector<int8_t>& is_feature_used) {
if (multi_val_bin_wrapper_ != nullptr) {
multi_val_bin_wrapper_->InitTrain(group_feature_start,
feature_groups,
is_feature_used,
bagging_use_indices,
bagging_indices_cnt);
}
}
template <bool USE_INDICES, bool ORDERED>
void ConstructHistograms(const data_size_t* data_indices,
data_size_t num_data,
const score_t* gradients,
const score_t* hessians,
hist_t* hist_data) {
if (multi_val_bin_wrapper_ != nullptr) {
multi_val_bin_wrapper_->ConstructHistograms<USE_INDICES, ORDERED>(
data_indices, num_data, gradients, hessians, &hist_buf_, hist_data);
}
}
void SetUseSubrow(bool is_use_subrow) {
if (multi_val_bin_wrapper_ != nullptr) {
multi_val_bin_wrapper_->SetUseSubrow(is_use_subrow);
}
}
void SetSubrowCopied(bool is_subrow_copied) {
if (multi_val_bin_wrapper_ != nullptr) {
multi_val_bin_wrapper_->SetSubrowCopied(is_subrow_copied);
}
}
private:
std::vector<uint32_t> feature_hist_offsets_;
int num_hist_total_bin_ = 0;
std::unique_ptr<MultiValBinWrapper> multi_val_bin_wrapper_;
std::vector<hist_t, Common::AlignmentAllocator<hist_t, kAlignedSize>> hist_buf_;
int num_total_bin_ = 0;
double num_elements_per_row_ = 0.0f;
};
} // namespace LightGBM
#endif // LightGBM_TRAIN_SHARE_STATES_H_
|
cg.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <unistd.h>
#include "globals.h"
#include "randdp.h"
#include "timers.h"
#include <omp.h>
//---------------------------------------------------------------------
#define CACHE_LINE_SIZE_PAD 128
#define INT_PAD_SIZE CACHE_LINE_SIZE_PAD/sizeof(int)
#define DOUBLE_PAD_SIZE CACHE_LINE_SIZE_PAD/sizeof(double)
/* common / main_int_mem / */
static int colidx[NZ];
static int rowstr[NA+1];
static int iv[NA];
static int arow[NA];
static int acol[NAZ];
/* common / main_flt_mem / */
static double aelt[NAZ];
static double a[NZ];
static double x[NA+2];
static double z[NA+2];
static double p[NA+2];
static double q[NA+2];
static double r[NA+2];
/* common / partit_size / */
static int naa;
static int nzz;
static int firstrow;
static int lastrow;
static int firstcol;
static int lastcol;
/* common /urando/ */
static double amult;
static double tran;
/* common /timers/ */
static logical timeron;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
static void conj_grad(int colidx[],
int rowstr[],
double x[],
double z[],
double a[],
double p[],
double q[],
double r[],
double *rnorm);
static void makea(int n,
int nz,
double a[],
int colidx[],
int rowstr[],
int firstrow,
int lastrow,
int firstcol,
int lastcol,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int iv[]);
static void sparse(double a[],
int colidx[],
int rowstr[],
int n,
int nz,
int nozer,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int firstrow,
int lastrow,
int nzloc[],
double rcond,
double shift);
static void sprnvc(int n, int nz, int nn1, double v[], int iv[]);
static int icnvrt(double x, int ipwr2);
static void vecset(int n, double v[], int iv[], int *nzv, int i, double val);
//---------------------------------------------------------------------
int main(int argc, char *argv[])
{
omp_set_num_threads(omp_get_num_procs());
int i, j, k, it;
double zeta;
double rnorm;
double norm_temp1, norm_temp2;
double t, mflops, tmax;
//char Class;
logical verified;
double zeta_verify_value, epsilon, err;
char *t_names[T_last];
for (i = 0; i < T_last; i++) {
timer_clear(i);
}
timer_start(T_init);
firstrow = 0;
lastrow = NA-1;
firstcol = 0;
lastcol = NA-1;
zeta_verify_value = VALID_RESULT;
printf("\nCG start...\n\n");
printf(" Size: %11d\n", NA);
printf(" Iterations: %5d\n", NITER);
printf("\n");
naa = NA;
nzz = NZ;
//---------------------------------------------------------------------
// Inialize random number generator
//---------------------------------------------------------------------
tran = 314159265.0;
amult = 1220703125.0;
zeta = randlc(&tran, amult);
//---------------------------------------------------------------------
//
//---------------------------------------------------------------------
makea(naa, nzz, a, colidx, rowstr,
firstrow, lastrow, firstcol, lastcol,
arow,
(int (*)[NONZER+1])(void*)acol,
(double (*)[NONZER+1])(void*)aelt,
iv);
//---------------------------------------------------------------------
// Note: as a result of the above call to makea:
// values of j used in indexing rowstr go from 0 --> lastrow-firstrow
// values of colidx which are col indexes go from firstcol --> lastcol
// So:
// Shift the col index vals from actual (firstcol --> lastcol )
// to local, i.e., (0 --> lastcol-firstcol)
//---------------------------------------------------------------------
int j0, j1, j2, j3, j4;
int k0, k1;
int total_num = lastcol-firstcol+1;
int residue = total_num%8;
int total_num_NA = NA+1;
int residue_NA = total_num_NA%8;
#pragma omp parallel default(shared) private(i, j, k, j0, j1, j2, j3, j4, k0, k1)
{
#pragma omp for nowait
for (j = 0; j < lastrow - firstrow + 1; j++) {
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
colidx[k] = colidx[k] - firstcol;
}
}
//---------------------------------------------------------------------
// set starting vector to (1, 1, .... 1)
//---------------------------------------------------------------------
#pragma omp for nowait
for (k0 = 0; k0 < residue_NA; k0++) {
x[k0] = 1;
}
#pragma omp for nowait
for (j4 = residue_NA; j4 < total_num_NA; j4=j4+8) {
x[j4] = 1;
x[j4+1] = 1;
x[j4+2] = 1;
x[j4+3] = 1;
x[j4+4] = 1;
x[j4+5] = 1;
x[j4+6] = 1;
x[j4+7] = 1;
}
#pragma omp for nowait
for (k1 = 0; k1 < residue; k1++) {
q[k1] = 0.0;
z[k1] = 0.0;
r[k1] = 0.0;
p[k1] = 0.0;
}
#pragma omp for nowait
for (j2 = residue; j2 < total_num; j2=j2+8) {
r[j2] = 0.0;
r[j2+1] = 0.0;
r[j2+2] = 0.0;
r[j2+3] = 0.0;
r[j2+4] = 0.0;
r[j2+5] = 0.0;
r[j2+6] = 0.0;
r[j2+7] = 0.0;
}
#pragma omp for nowait
for (j0 = residue; j0 < total_num; j0=j0+8) {
q[j0] = 0.0;
q[j0+1] = 0.0;
q[j0+2] = 0.0;
q[j0+3] = 0.0;
q[j0+4] = 0.0;
q[j0+5] = 0.0;
q[j0+6] = 0.0;
q[j0+7] = 0.0;
}
#pragma omp for nowait
for (j1 = residue; j1 < total_num; j1=j1+8) {
z[j1] = 0.0;
z[j1+1] = 0.0;
z[j1+2] = 0.0;
z[j1+3] = 0.0;
z[j1+4] = 0.0;
z[j1+5] = 0.0;
z[j1+6] = 0.0;
z[j1+7] = 0.0;
}
#pragma omp for
for (j3 = residue; j3 < total_num; j3=j3+8) {
p[j3] = 0.0;
p[j3+1] = 0.0;
p[j3+2] = 0.0;
p[j3+3] = 0.0;
p[j3+4] = 0.0;
p[j3+5] = 0.0;
p[j3+6] = 0.0;
p[j3+7] = 0.0;
}
/*
#pragma omp for nowait
for (i = 0; i < NA+1; i++) {
x[i] = 1.0;
}
#pragma omp for nowait
for (j = 0; j < lastcol - firstcol + 1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = 0.0;
p[j] = 0.0;
}
*/
}
/*
memset(x, 1, (NA+1) * sizeof(double));
memset(q, 0, (lastcol-firstcol+1) * sizeof(double));
memset(z, 0, (lastcol-firstcol+1) * sizeof(double));
memset(r, 0, (lastcol-firstcol+1) * sizeof(double));
memset(p, 0, (lastcol-firstcol+1) * sizeof(double));
*/
zeta = 0.0;
//---------------------------------------------------------------------
//---->
// Do one iteration untimed to init all code and data page tables
//----> (then reinit, start timing, to niter its)
//---------------------------------------------------------------------
for (it = 1; it <= 1; it++) {
//---------------------------------------------------------------------
// The call to the conjugate gradient routine:
//---------------------------------------------------------------------
conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm);
//---------------------------------------------------------------------
// zeta = shift + 1/(x.z)
// So, first: (x.z)
// Also, find norm of z
// So, first: (z.z)
//---------------------------------------------------------------------
norm_temp1 = 0.0;
norm_temp2 = 0.0;
#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp1,norm_temp2)
for (j = 0; j < total_num; j++) {
norm_temp1 = norm_temp1 + x[j] * z[j];
norm_temp2 = norm_temp2 + z[j] * z[j];
}
norm_temp2 = 1.0 / sqrt(norm_temp2);
//---------------------------------------------------------------------
// Normalize z to obtain x
//---------------------------------------------------------------------
/*
#pragma omp parallel default(shared) private(j, k)
{
#pragma omp for nowait
for (k = 0; k < residue; k++) {
x[k] = norm_temp2 * z[k];
}
#pragma omp for
for (j = residue; j < total_num; j=j+8) {
x[j] = norm_temp2 * z[j];
}
}
*/
/*
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < lastcol - firstcol + 1; j++) {
x[j] = norm_temp2 * z[j];
}
*/
} // end of do one iteration untimed
//---------------------------------------------------------------------
// set starting vector to (1, 1, .... 1)
//---------------------------------------------------------------------
#pragma omp parallel default(shared) private(j, k)
{
#pragma omp for nowait
for (k = 0; k < residue_NA; k++) {
x[k] = 1;
}
#pragma omp for
for (j = residue_NA; j < total_num_NA; j=j+8) {
x[j] = 1;
x[j+1] = 1;
x[j+2] = 1;
x[j+3] = 1;
x[j+4] = 1;
x[j+5] = 1;
x[j+6] = 1;
x[j+7] = 1;
}
}
/*
#pragma omp parallel for default(shared) private(i)
for (i = 0; i < NA+1; i++) {
x[i] = 1.0;
}
*/
//memset(x, 1, (NA+1) * sizeof(double));
zeta = 0.0;
timer_stop(T_init);
printf(" Initialization time = %15.3f seconds\n", timer_read(T_init));
timer_start(T_bench);
//---------------------------------------------------------------------
//---->
// Main Iteration for inverse power method
//---->
//---------------------------------------------------------------------
for (it = 1; it <= NITER; it++) {
//---------------------------------------------------------------------
// The call to the conjugate gradient routine:
//---------------------------------------------------------------------
if (timeron) timer_start(T_conj_grad);
conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm);
if (timeron) timer_stop(T_conj_grad);
//---------------------------------------------------------------------
// zeta = shift + 1/(x.z)
// So, first: (x.z)
// Also, find norm of z
// So, first: (z.z)
//---------------------------------------------------------------------
norm_temp1 = 0.0;
norm_temp2 = 0.0;
#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp1,norm_temp2)
for (j = 0; j < lastcol - firstcol + 1; j++) {
norm_temp1 = norm_temp1 + x[j]*z[j];
norm_temp2 = norm_temp2 + z[j]*z[j];
}
norm_temp2 = 1.0 / sqrt(norm_temp2);
zeta = SHIFT + 1.0 / norm_temp1;
if (it == 1)
printf("\n iteration ||r|| zeta\n");
printf(" %5d %20.14E%20.13f\n", it, rnorm, zeta);
//---------------------------------------------------------------------
// Normalize z to obtain x
//---------------------------------------------------------------------
#pragma omp parallel default(shared) private(j, k)
{
#pragma omp for nowait
for (k = 0; k < residue; k++) {
x[k] = norm_temp2 * z[k];
}
#pragma omp for
for (j = residue; j < total_num; j=j+8) {
x[j] = norm_temp2 * z[j];
x[j+1] = norm_temp2 * z[j+1];
x[j+2] = norm_temp2 * z[j+2];
x[j+3] = norm_temp2 * z[j+3];
x[j+4] = norm_temp2 * z[j+4];
x[j+5] = norm_temp2 * z[j+5];
x[j+6] = norm_temp2 * z[j+6];
x[j+7] = norm_temp2 * z[j+7];
}
}
/*
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < lastcol - firstcol + 1; j++) {
x[j] = norm_temp2 * z[j];
}
*/
} // end of main iter inv pow meth
timer_stop(T_bench);
//---------------------------------------------------------------------
// End of timed section
//---------------------------------------------------------------------
t = timer_read(T_bench);
printf("\nComplete...\n");
epsilon = 1.0e-10;
err = fabs(zeta - zeta_verify_value) / zeta_verify_value;
if (err <= epsilon) {
verified = true;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.13E\n", zeta);
printf(" Error is %20.13E\n", err);
} else {
verified = false;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.13E\n", zeta);
printf(" The correct zeta is %20.13E\n", zeta_verify_value);
}
printf("\n\nExecution time : %lf seconds\n\n", t);
return 0;
}
//---------------------------------------------------------------------
// Floaging point arrays here are named as in spec discussion of
// CG algorithm
//---------------------------------------------------------------------
static void conj_grad(int colidx[],
int rowstr[],
double x[],
double z[],
double a[],
double p[],
double q[],
double r[],
double *rnorm)
{
int j, k, j0, j1, j2, j3, j4;
int cgit, cgitmax = 25;
double d, sum, rho, rho0, alpha, beta;
int total_num = naa+1;
int residue = (total_num)%8;
int rho_num = lastcol-firstcol+1;
int residue2 = (rho_num)%8;
rho = 0.0;
//---------------------------------------------------------------------
// Initialize the CG algorithm:
//---------------------------------------------------------------------
/*
memset(q, 0, total_num*sizeof(double));
memset(z, 0, total_num*sizeof(double));
memcpy(r, x, total_num*sizeof(double));
memcpy(p, x, total_num*sizeof(double));
*/
#pragma omp parallel default(shared) private(j, k, j0, j1, j2, j3, j4)
{
#pragma omp for nowait
for (k = 0; k < residue; k++) {
q[k] = 0.0;
z[k] = 0.0;
r[k] = x[k];
p[k] = x[k];
}
#pragma omp for nowait
for (j2 = residue; j2 < total_num; j2=j2+8) {
r[j2] = x[j2];
r[j2+1] = x[j2+1];
r[j2+2] = x[j2+2];
r[j2+3] = x[j2+3];
r[j2+4] = x[j2+4];
r[j2+5] = x[j2+5];
r[j2+6] = x[j2+6];
r[j2+7] = x[j2+7];
}
#pragma omp for nowait
for (j0 = residue; j0 < total_num; j0=j0+8) {
q[j0] = 0.0;
q[j0+1] = 0.0;
q[j0+2] = 0.0;
q[j0+3] = 0.0;
q[j0+4] = 0.0;
q[j0+5] = 0.0;
q[j0+6] = 0.0;
q[j0+7] = 0.0;
}
#pragma omp for nowait
for (j1 = residue; j1 < total_num; j1=j1+8) {
z[j1] = 0.0;
z[j1+1] = 0.0;
z[j1+2] = 0.0;
z[j1+3] = 0.0;
z[j1+4] = 0.0;
z[j1+5] = 0.0;
z[j1+6] = 0.0;
z[j1+7] = 0.0;
}
#pragma omp for nowait
for (j3 = residue; j3 < total_num; j3=j3+8) {
p[j3] = x[j3];
p[j3+1] = x[j3+1];
p[j3+2] = x[j3+2];
p[j3+3] = x[j3+3];
p[j3+4] = x[j3+4];
p[j3+5] = x[j3+5];
p[j3+6] = x[j3+6];
p[j3+7] = x[j3+7];
}
//---------------------------------------------------------------------
// rho = r.r
// Now, obtain the norm of r: First, sum squares of r elements locally...
//---------------------------------------------------------------------
//#pragma omp for reduction(+:rho)
#pragma omp for reduction(+:rho)
for (j4 = 0; j4 < residue2; j4++) {
rho = rho + r[j4]*r[j4];
}
#pragma omp for reduction(+:rho)
for(j=residue2; j<rho_num; j+=8){
rho = rho + r[j]*r[j]
+ r[j+1]*r[j+1]
+ r[j+2]*r[j+2]
+ r[j+3]*r[j+3]
+ r[j+4]*r[j+4]
+ r[j+5]*r[j+5]
+ r[j+6]*r[j+6]
+ r[j+7]*r[j+7];
}
}
//---------------------------------------------------------------------
//---->
// The conj grad iteration loop
//---->
//---------------------------------------------------------------------
for (cgit = 1; cgit <= cgitmax; cgit++) {
//---------------------------------------------------------------------
// q = A.p
// The partition submatrix-vector multiply: use workspace w
//---------------------------------------------------------------------
//
// NOTE: this version of the multiply is actually (slightly: maybe %5)
// faster on the sp2 on 16 nodes than is the unrolled-by-2 version
// below. On the Cray t3d, the reverse is true, i.e., the
// unrolled-by-two version is some 10% faster.
// The unrolled-by-8 version below is significantly faster
// on the Cray t3d - overall speed of code is 1.5 times faster.
rho0 = rho;
d = 0.0;
rho = 0.0;
#pragma omp parallel default(shared)
{
#pragma omp for private(sum, j, k)
for (j = 0; j < lastrow - firstrow + 1; j++) {
sum = 0.0;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}
q[j] = sum;
}
//---------------------------------------------------------------------
// Obtain p.q
//---------------------------------------------------------------------
#pragma omp for private(j) reduction(+:d)
for (j = 0; j < lastcol - firstcol + 1; j++) {
d = d + p[j]*q[j];
}
//---------------------------------------------------------------------
// Obtain alpha = rho / (p.q)
//---------------------------------------------------------------------
#pragma omp single
alpha = rho0 / d;
//---------------------------------------------------------------------
// Obtain z = z + alpha*p
// and r = r - alpha*q
//---------------------------------------------------------------------
#pragma omp for private(j)
for (j = 0; j < lastcol - firstcol + 1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
}
//---------------------------------------------------------------------
// rho = r.r
// Now, obtain the norm of r: First, sum squares of r elements locally...
//---------------------------------------------------------------------
#pragma omp for private(j) reduction(+:rho)
for (j = 0; j < lastcol - firstcol + 1; j++) {
rho = rho + r[j]*r[j];
}
//---------------------------------------------------------------------
// Obtain beta:
//---------------------------------------------------------------------
#pragma omp single
beta = rho / rho0;
//---------------------------------------------------------------------
// p = r + beta*p
//---------------------------------------------------------------------
#pragma omp for private(j)
for (j = 0; j < lastcol - firstcol + 1; j++) {
p[j] = r[j] + beta*p[j];
}
}
} // end of do cgit=1,cgitmax
//---------------------------------------------------------------------
// Compute residual norm explicitly: ||r|| = ||x - A.z||
// First, form A.z
// The partition submatrix-vector multiply
//---------------------------------------------------------------------
/*
for (j = 0; j < lastrow - firstrow + 1; j++) {
printf("j = %d, colidx[%d] = %d, z[%d] = %lf\n", j, j, colidx[j], colidx[j], z[colidx[j]][0]);
}
*/
sum = 0.0;
#pragma omp parallel default(shared) private(j, d) shared(sum)
{
#pragma omp for
for (j = 0; j < lastrow - firstrow + 1; j++) {
d = 0.0;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
d = d + a[k]*z[colidx[k]];
}
r[j] = d;
}
//---------------------------------------------------------------------
// At this point, r contains A.z
//---------------------------------------------------------------------
#pragma omp for reduction(+:sum)
for (j = 0; j < lastcol-firstcol+1; j++) {
double d_tmp = x[j] - r[j];
sum = sum + d_tmp*d_tmp;
}
}
*rnorm = sqrt(sum);
}
//---------------------------------------------------------------------
// generate the test problem for benchmark 6
// makea generates a sparse matrix with a
// prescribed sparsity distribution
//
// parameter type usage
//
// input
//
// n i number of cols/rows of matrix
// nz i nonzeros as declared array size
// rcond r*8 condition number
// shift r*8 main diagonal shift
//
// output
//
// a r*8 array for nonzeros
// colidx i col indices
// rowstr i row pointers
//
// workspace
//
// iv, arow, acol i
// aelt r*8
//---------------------------------------------------------------------
static void makea(int n,
int nz,
double a[],
int colidx[],
int rowstr[],
int firstrow,
int lastrow,
int firstcol,
int lastcol,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int iv[])
{
int iouter, ivelt, nzv, nn1;
int ivc[NONZER+1];
double vc[NONZER+1];
//---------------------------------------------------------------------
// nonzer is approximately (int(sqrt(nnza /n)));
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// nn1 is the smallest power of two not less than n
//---------------------------------------------------------------------
nn1 = 1;
do {
nn1 = 2 * nn1;
} while (nn1 < n);
//---------------------------------------------------------------------
// Generate nonzero positions and save for the use in sparse.
//---------------------------------------------------------------------
for (iouter = 0; iouter < n; iouter++) {
nzv = NONZER;
sprnvc(n, nzv, nn1, vc, ivc);
vecset(n, vc, ivc, &nzv, iouter+1, 0.5);
arow[iouter] = nzv;
for (ivelt = 0; ivelt < nzv; ivelt++) {
acol[iouter][ivelt] = ivc[ivelt] - 1;
aelt[iouter][ivelt] = vc[ivelt];
}
}
//---------------------------------------------------------------------
// ... make the sparse matrix from list of elements with duplicates
// (iv is used as workspace)
//---------------------------------------------------------------------
sparse(a, colidx, rowstr, n, nz, NONZER, arow, acol,
aelt, firstrow, lastrow,
iv, RCOND, SHIFT);
}
//---------------------------------------------------------------------
// rows range from firstrow to lastrow
// the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
//---------------------------------------------------------------------
static void sparse(double a[],
int colidx[],
int rowstr[],
int n,
int nz,
int nozer,
int arow[],
int acol[][NONZER+1],
double aelt[][NONZER+1],
int firstrow,
int lastrow,
int nzloc[],
double rcond,
double shift)
{
int nrows;
//---------------------------------------------------
// generate a sparse matrix from a list of
// [col, row, element] tri
//---------------------------------------------------
int i, j, j1, j2, nza, k, kk, nzrow, jcol;
double size, scale, ratio, va;
logical cont40;
//---------------------------------------------------------------------
// how many rows of result
//---------------------------------------------------------------------
nrows = lastrow - firstrow + 1;
//---------------------------------------------------------------------
// ...count the number of triples in each row
//---------------------------------------------------------------------
//memset(rowstr, 0, (nrows+1)*sizeof(int));
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < nrows+1; j++) {
rowstr[j] = 0;
}
for (i = 0; i < n; i++) {
for (nza = 0; nza < arow[i]; nza++) {
j = acol[i][nza] + 1;
rowstr[j] = rowstr[j] + arow[i];
}
}
rowstr[0] = 0;
for (j = 1; j < nrows+1; j++) {
rowstr[j] = rowstr[j] + rowstr[j-1];
}
nza = rowstr[nrows] - 1;
//---------------------------------------------------------------------
// ... rowstr(j) now is the location of the first nonzero
// of row j of a
//---------------------------------------------------------------------
if (nza > nz) {
printf("Space for matrix elements exceeded in sparse\n");
printf("nza, nzmax = %d, %d\n", nza, nz);
exit(EXIT_FAILURE);
}
//---------------------------------------------------------------------
// ... preload data pages
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j, k)
for (j = 0; j < nrows; j++) {
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
a[k] = 0.0;
colidx[k] = -1;
}
nzloc[j] = 0;
}
//---------------------------------------------------------------------
// ... generate actual values by summing duplicates
//---------------------------------------------------------------------
size = 1.0;
ratio = pow(rcond, (1.0 / (double)(n)));
for (i = 0; i < n; i++) {
for (nza = 0; nza < arow[i]; nza++) {
j = acol[i][nza];
scale = size * aelt[i][nza];
for (nzrow = 0; nzrow < arow[i]; nzrow++) {
jcol = acol[i][nzrow];
va = aelt[i][nzrow] * scale;
//--------------------------------------------------------------------
// ... add the identity * rcond to the generated matrix to bound
// the smallest eigenvalue from below by rcond
//--------------------------------------------------------------------
if (jcol == j && j == i) {
va = va + rcond - shift;
}
cont40 = false;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
if (colidx[k] > jcol) {
//----------------------------------------------------------------
// ... insert colidx here orderly
//----------------------------------------------------------------
for (kk = rowstr[j+1]-2; kk >= k; kk--) {
if (colidx[kk] > -1) {
a[kk+1] = a[kk];
colidx[kk+1] = colidx[kk];
}
}
colidx[k] = jcol;
a[k] = 0.0;
cont40 = true;
break;
} else if (colidx[k] == -1) {
colidx[k] = jcol;
cont40 = true;
break;
} else if (colidx[k] == jcol) {
//--------------------------------------------------------------
// ... mark the duplicated entry
//--------------------------------------------------------------
nzloc[j] = nzloc[j] + 1;
cont40 = true;
break;
}
}
if (cont40 == false) {
printf("internal error in sparse: i=%d\n", i);
exit(EXIT_FAILURE);
}
a[k] = a[k] + va;
}
}
size = size * ratio;
}
//---------------------------------------------------------------------
// ... remove empty entries and generate final results
//---------------------------------------------------------------------
for (j = 1; j < nrows; j++) {
nzloc[j] = nzloc[j] + nzloc[j-1];
}
for (j = 0; j < nrows; j++) {
if (j > 0) {
j1 = rowstr[j] - nzloc[j-1];
} else {
j1 = 0;
}
j2 = rowstr[j+1] - nzloc[j];
nza = rowstr[j];
for (k = j1; k < j2; k++) {
a[k] = a[nza];
colidx[k] = colidx[nza];
nza = nza + 1;
}
}
#pragma omp parallel for default(shared) private(j)
for (j = 1; j < nrows+1; j++) {
rowstr[j] = rowstr[j] - nzloc[j-1];
}
nza = rowstr[nrows] - 1;
}
//---------------------------------------------------------------------
// generate a sparse n-vector (v, iv)
// having nzv nonzeros
//
// mark(i) is set to 1 if position i is nonzero.
// mark is all zero on entry and is reset to all zero before exit
// this corrects a performance bug found by John G. Lewis, caused by
// reinitialization of mark on every one of the n calls to sprnvc
//---------------------------------------------------------------------
static void sprnvc(int n, int nz, int nn1, double v[], int iv[])
{
int nzv, ii, i;
double vecelt, vecloc;
nzv = 0;
while (nzv < nz) {
vecelt = randlc(&tran, amult);
//---------------------------------------------------------------------
// generate an integer between 1 and n in a portable manner
//---------------------------------------------------------------------
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if (i > n) continue;
//---------------------------------------------------------------------
// was this integer generated already?
//---------------------------------------------------------------------
logical was_gen = false;
for (ii = 0; ii < nzv; ii++) {
if (iv[ii] == i) {
was_gen = true;
break;
}
}
if (was_gen) continue;
v[nzv] = vecelt;
iv[nzv] = i;
nzv = nzv + 1;
}
}
//---------------------------------------------------------------------
// scale a double precision number x in (0,1) by a power of 2 and chop it
//---------------------------------------------------------------------
static int icnvrt(double x, int ipwr2)
{
return (int)(ipwr2 * x);
}
//---------------------------------------------------------------------
// set ith element of sparse vector (v, iv) with
// nzv nonzeros to val
//---------------------------------------------------------------------
static void vecset(int n, double v[], int iv[], int *nzv, int i, double val)
{
int k;
logical set;
set = false;
for (k = 0; k < *nzv; k++) {
if (iv[k] == i) {
v[k] = val;
set = true;
}
}
if (set == false) {
v[*nzv] = val;
iv[*nzv] = i;
*nzv = *nzv + 1;
}
}
|
ChooserDemandCalculator.h | #pragma once
#include <cassert>
#include <fstream>
#include <iostream>
#include <random>
#include <string>
#include "DataStructures/Graph/Graph.h"
#include "Tools/CommandLine/ProgressBar.h"
#include "Tools/OpenMP.h"
#include "Tools/Timer.h"
// A travel demand calculator based on repeatedly choosing random sources and corresponding targets.
// Each source is chosen with probability proportional to its population. The target is chosen to be
// the closest opportunity with sufficiently high fitness.
template <typename GraphT, template <typename> class OpportunityChooserT>
class ChooserDemandCalculator {
public:
// Constructs a travel demand calculator for the specified network.
explicit ChooserDemandCalculator(const GraphT& graph, const int seed, const bool verbose) noexcept
: graph(graph), numOpportunities(0), seed(seed), verbose(verbose) {
FORALL_VERTICES(graph, v)
numOpportunities += graph.numOpportunities(v);
assert(numOpportunities > 0);
assert(seed >= 0);
}
// Generates OD pairs and writes them to the specified file.
void calculateDemand(
int numODPairs, double lambda, double swapProb, const std::string& fileName) const {
Timer timer;
if (verbose) std::cout << "Calculating demand: ";
ProgressBar bar(numODPairs, verbose);
const auto firstWeight = &graph.population(0);
const auto lastWeight = &graph.population(graph.numVertices() - 1) + 1;
#pragma omp parallel
{
OpportunityChooserT<GraphT> chooser(graph, seed);
std::minstd_rand rand(seed + omp_get_thread_num() + 1);
std::discrete_distribution<> sourceDist(firstWeight, lastWeight);
std::uniform_int_distribution<> rankDist(1, numOpportunities);
std::bernoulli_distribution swapDist(swapProb);
std::ofstream out(fileName + ".part" + std::to_string(omp_get_thread_num()));
assert(out.good());
#pragma omp for schedule(static, 1) nowait
for (auto i = 0; i < numODPairs; ++i) {
const auto src = sourceDist(rand);
auto dst = src;
while (src == dst) {
auto numFitOpportunities = 0;
while (numFitOpportunities == 0) {
const auto rank = rankDist(rand);
numFitOpportunities = std::binomial_distribution<>(rank, 1 - lambda)(rand);
}
dst = chooser.findClosestOpportunityWithHighFitness(src, numFitOpportunities);
}
if (swapDist(rand))
out << dst << ',' << src << '\n';
else
out << src << ',' << dst << '\n';
++bar;
}
}
bar.finish();
if (verbose) std::cout << "done (" << timer.elapsed() << "ms)." << std::endl;
}
private:
const GraphT& graph; // The network we work on.
int numOpportunities; // The total number of opportunities in the network.
const int seed; // The seed with which the random number generators will be started.
const bool verbose; // Should we display informative messages?
};
|
pr30494.c | /* PR middle-end/30494 */
/* { dg-do run } */
#include <omp.h>
int errors;
int
check (int m, int i, int *v, int *w)
{
int j;
int n = omp_get_thread_num ();
for (j = 0; j < m; j++)
if (v[j] != j + n)
#pragma omp atomic
errors += 1;
for (j = 0; j < m * 3 + i; j++)
if (w[j] != j + 10 + n)
#pragma omp atomic
errors += 1;
}
int
foo (int n, int m)
{
int i;
#pragma omp for
for (i = 0; i < 6; i++)
{
int v[n], w[n * 3 + i], j;
for (j = 0; j < n; j++)
v[j] = j + omp_get_thread_num ();
for (j = 0; j < n * 3 + i; j++)
w[j] = j + 10 + omp_get_thread_num ();
check (m, i, v, w);
}
return 0;
}
int
bar (int n, int m)
{
int i;
#pragma omp parallel for num_threads (4)
for (i = 0; i < 6; i++)
{
int v[n], w[n * 3 + i], j;
for (j = 0; j < n; j++)
v[j] = j + omp_get_thread_num ();
for (j = 0; j < n * 3 + i; j++)
w[j] = j + 10 + omp_get_thread_num ();
check (m, i, v, w);
}
return 0;
}
int
main (void)
{
#pragma omp parallel num_threads (3)
foo (128, 128);
bar (256, 256);
return 0;
}
|
facedetectcnn.h | /*
By downloading, copying, installing or using the software you agree to this license.
If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement For libfacedetection
(3-clause BSD License)
Copyright (c) 2018-2019, Shiqi Yu, all rights reserved.
shiqi.yu@gmail.com
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are disclaimed.
In no event shall copyright holders or contributors be liable for any direct,
indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
*/
#pragma once
#define _ENABLE_AVX2 //Please enable it if X64 CPU
//#define _ENABLE_NEON //Please enable it if ARM CPU
int * facedetect_cnn(unsigned char * result_buffer, //buffer memory for storing face detection results, !!its size must be 0x20000 Bytes!!
unsigned char * rgb_image_data, int width, int height, int step); //input image, it must be BGR (three channels) insteed of RGB image!
//DO NOT EDIT the following code if you don't really understand it.
#if defined(_ENABLE_AVX2)
#include <immintrin.h>
#endif
#if defined(_ENABLE_NEON)
#include "arm_neon.h"
//NEON does not support UINT8*INT8 dot product
//to conver the input data to range [0, 127],
//and then use INT8*INT8 dot product
#define _MAX_UINT8_VALUE 127
#error NEON support is unfinished.
#else
#define _MAX_UINT8_VALUE 255
#endif
#if defined(_ENABLE_AVX2)
#define _MALLOC_ALIGN 256
#else
#define _MALLOC_ALIGN 128
#endif
#if defined(_ENABLE_AVX2)&& defined(_ENABLE_NEON)
#error Cannot enable the two of SSE2 AVX and NEON at the same time.
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#include <string.h>
#include <vector>
#include <iostream>
using namespace std;
void* myAlloc(size_t size);
void myFree_(void* ptr);
#define myFree(ptr) (myFree_(*(ptr)), *(ptr)=0);
#ifndef MIN
# define MIN(a,b) ((a) > (b) ? (b) : (a))
#endif
#ifndef MAX
# define MAX(a,b) ((a) < (b) ? (b) : (a))
#endif
typedef struct FaceRect_
{
float score;
int x;
int y;
int w;
int h;
}FaceRect;
template <class T>
class CDataBlob
{
public:
T * data;
int width;
int height;
int channels;
int channelStep;
float scale;
public:
CDataBlob() {
data = 0;
width = 0;
height = 0;
channels = 0;
channelStep = 0;
scale = 1.0f;
}
CDataBlob(int w, int h, int c)
{
data = 0;
create(w, h, c);
}
~CDataBlob()
{
setNULL();
}
void setNULL()
{
if (data)
myFree(&data);
width = height = channels = channelStep = 0;
scale = 1.0f;
}
bool create(int w, int h, int c)
{
setNULL();
width = w;
height = h;
channels = c;
//alloc space for int8 array
int remBytes = (sizeof(T)* channels) % (_MALLOC_ALIGN / 8);
if (remBytes == 0)
this->channelStep = channels * sizeof(T);
else
this->channelStep = (channels * sizeof(T)) + (_MALLOC_ALIGN / 8) - remBytes;
data = (T*)myAlloc(width * height * this->channelStep);
if (data == NULL)
{
cerr << "Cannot alloc memeory for uint8 data blob: "
<< width << "*"
<< height << "*"
<< channels << endl;
return false;
}
//memset(data, 0, width * height * channelStep);
//the following code is faster than memset
//but not only the padding bytes are set to zero.
//BE CAREFUL!!!
//#if defined(_OPENMP)
//#pragma omp parallel for
//#endif
for (int r = 0; r < this->height; r++)
{
for (int c = 0; c < this->width; c++)
{
int pixel_end = this->channelStep / sizeof(T);
T * pI = (this->data + (r * this->width + c) * this->channelStep /sizeof(T));
for (int ch = this->channels; ch < pixel_end; ch++)
pI[ch] = 0;
}
}
return true;
}
bool setInt8DataFromCaffeFormat(signed char * pData, int dataWidth, int dataHeight, int dataChannels)
{
if (pData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (typeid(signed char) != typeid(T))
{
cerr << "Data must be signed char, the same with the source data." << endl;
return false;
}
if (dataWidth != this->width ||
dataHeight != this->height ||
dataChannels != this->channels)
{
cerr << "The dim of the data can not match that of the Blob." << endl;
return false;
}
for(int row = 0; row < height; row++)
for (int col = 0; col < width; col++)
{
T * p = (this->data + (width * row + col) * channelStep /sizeof(T));
for (int ch = 0; ch < channels; ch++)
{
p[ch] = pData[ch * height * width + row * width + col];
}
}
return true;
}
bool setDataFrom3x3S2P1to1x1S1P0FromImage(const unsigned char * imgData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep)
{
if (imgData == NULL)
{
cerr << "The input image data is null." << endl;
return false;
}
if (typeid(unsigned char) != typeid(T))
{
cerr << "Data must be unsigned char, the same with the source data." << endl;
return false;
}
if (imgChannels != 3)
{
cerr << "The input image must be a 3-channel RGB image." << endl;
return false;
}
create((imgWidth+1)/2, (imgHeight+1)/2, 27);
//since the pixel assignment cannot fill all the elements in the blob.
//some elements in the blob should be initialized to 0
memset(data, 0, width * height * channelStep);
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (int r = 0; r < this->height; r++)
{
for (int c = 0; c < this->width; c++)
{
T * pData = (unsigned char*)this->data + (r * this->width + c) * this->channelStep;
for (int fy = -1; fy <= 1; fy++)
{
int srcy = r * 2 + fy;
if (srcy < 0 || srcy >= imgHeight) //out of the range of the image
continue;
for (int fx = -1; fx <= 1; fx++)
{
int srcx = c * 2 + fx;
if (srcx < 0 || srcx >= imgWidth) //out of the range of the image
continue;
const unsigned char * pImgData = imgData + imgWidthStep * srcy + imgChannels * srcx;
int output_channel_offset = ((fy + 1) * 3 + fx + 1) * 3; //3x3 filters, 3-channel image
pData[output_channel_offset] = (pImgData[0]);
pData[output_channel_offset+1] = (pImgData[1]);
pData[output_channel_offset+2] = (pImgData[2]);
}
}
}
}
return true;
}
T getElement(int x, int y, int channel)
{
if (this->data)
{
if (x >= 0 && x < this->width &&
y >= 0 && y < this->height &&
channel >= 0 && channel < this->channels)
{
T * p = this->data + (y*this->width + x)*this->channelStep/sizeof(T);
return (p[channel]);
}
}
return (T)(0);
}
friend ostream &operator<<(ostream &output, const CDataBlob &dataBlob)
{
output << "DataBlob Size (Width, Height, Channel, scale) = ("
<< dataBlob.width
<< ", " << dataBlob.height
<< ", " << dataBlob.channels
<< ", " << dataBlob.scale
<< ")" << endl;
for (int ch = 0; ch < dataBlob.channels; ch++)
{
output << "Channel " << ch << ": " << endl;
for (int row = 0; row < dataBlob.height; row++)
{
output << "(";
for (int col = 0; col < dataBlob.width; col++)
{
T * p = (dataBlob.data + (dataBlob.width * row + col) * dataBlob.channelStep /sizeof(T) );
if(sizeof(T)<4)
output << (int)(p[ch]);
else
output << p[ch];
if (col != dataBlob.width - 1)
output << ", ";
}
output << ")" << endl;
}
}
return output;
}
};
class Filters {
public:
vector<CDataBlob<signed char> *> filters;
int pad;
int stride;
float scale; //element * scale = original value
};
bool convertInt2Float(CDataBlob<int> * inputData, CDataBlob<float> * outputData);
bool convolution(CDataBlob<unsigned char> *inputData, const Filters* filters, CDataBlob<int> *outputData);
bool convolution_relu(CDataBlob<unsigned char> *inputData, const Filters* filters, CDataBlob<unsigned char> *outputData);
bool maxpooling2x2S2(const CDataBlob<unsigned char> *inputData, CDataBlob<unsigned char> *outputData);
bool normalize(CDataBlob<unsigned char> * inputOutputData, float * pScale);
bool priorbox(const CDataBlob<unsigned char> * featureData, const CDataBlob<unsigned char> * imageData, int num_sizes, float * pWinSizes, CDataBlob<float> * outputData);
template<typename T>
bool concat4(const CDataBlob<T> *inputData1, const CDataBlob<T> *inputData2, const CDataBlob<T> *inputData3, const CDataBlob<T> *inputData4, CDataBlob<T> *outputData);
/* the input data for softmax must be a vector, the data stored in a multi-channel blob with size 1x1 */
template<typename T>
bool blob2vector(const CDataBlob<T> * inputData, CDataBlob<T> * outputData);
bool softmax1vector2class(CDataBlob<float> *inputOutputData);
bool detection_output(const CDataBlob<float> * priorbox, const CDataBlob<float> * loc, const CDataBlob<float> * conf, float overlap_threshold, float confidence_threshold, int top_k, int keep_top_k, CDataBlob<float> * outputData);
vector<FaceRect> objectdetect_cnn(unsigned char * rgbImageData, int with, int height, int step);
|
gimple-pretty-print.c | /* Pretty formatting of GIMPLE statements and expressions.
Copyright (C) 2001-2020 Free Software Foundation, Inc.
Contributed by Aldy Hernandez <aldyh@redhat.com> and
Diego Novillo <dnovillo@google.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "dumpfile.h"
#include "backend.h"
#include "tree.h"
#include "gimple.h"
#include "gimple-predict.h"
#include "ssa.h"
#include "cgraph.h"
#include "gimple-pretty-print.h"
#include "internal-fn.h"
#include "tree-eh.h"
#include "gimple-iterator.h"
#include "tree-cfg.h"
#include "dumpfile.h" /* for dump_flags */
#include "value-prof.h"
#include "trans-mem.h"
#include "cfganal.h"
#include "stringpool.h"
#include "attribs.h"
#include "asan.h"
#include "cfgloop.h"
/* Disable warnings about quoting issues in the pp_xxx calls below
that (intentionally) don't follow GCC diagnostic conventions. */
#if __GNUC__ >= 10
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wformat-diag"
#endif
#define INDENT(SPACE) \
do { int i; for (i = 0; i < SPACE; i++) pp_space (buffer); } while (0)
#define GIMPLE_NIY do_niy (buffer,gs)
/* Try to print on BUFFER a default message for the unrecognized
gimple statement GS. */
static void
do_niy (pretty_printer *buffer, const gimple *gs)
{
pp_printf (buffer, "<<< Unknown GIMPLE statement: %s >>>\n",
gimple_code_name[(int) gimple_code (gs)]);
}
/* Emit a newline and SPC indentation spaces to BUFFER. */
static void
newline_and_indent (pretty_printer *buffer, int spc)
{
pp_newline (buffer);
INDENT (spc);
}
/* Print the GIMPLE statement GS on stderr. */
DEBUG_FUNCTION void
debug_gimple_stmt (gimple *gs)
{
print_gimple_stmt (stderr, gs, 0, TDF_VOPS|TDF_MEMSYMS);
}
/* Return formatted string of a VALUE probability
(biased by REG_BR_PROB_BASE). Returned string is allocated
by xstrdup_for_dump. */
static const char *
dump_profile (profile_count &count)
{
char *buf = NULL;
if (!count.initialized_p ())
return "";
if (count.ipa_p ())
buf = xasprintf ("[count: %" PRId64 "]",
count.to_gcov_type ());
else if (count.initialized_p ())
buf = xasprintf ("[local count: %" PRId64 "]",
count.to_gcov_type ());
const char *ret = xstrdup_for_dump (buf);
free (buf);
return ret;
}
/* Return formatted string of a VALUE probability
(biased by REG_BR_PROB_BASE). Returned string is allocated
by xstrdup_for_dump. */
static const char *
dump_probability (profile_probability probability)
{
float minimum = 0.01f;
float fvalue = -1;
if (probability.initialized_p ())
{
fvalue = probability.to_reg_br_prob_base () * 100.0f / REG_BR_PROB_BASE;
if (fvalue < minimum && probability.to_reg_br_prob_base ())
fvalue = minimum;
}
char *buf;
if (probability.initialized_p ())
buf = xasprintf ("[%.2f%%]", fvalue);
else
buf = xasprintf ("[INV]");
const char *ret = xstrdup_for_dump (buf);
free (buf);
return ret;
}
/* Dump E probability to BUFFER. */
static void
dump_edge_probability (pretty_printer *buffer, edge e)
{
pp_scalar (buffer, " %s", dump_probability (e->probability));
}
/* Print GIMPLE statement G to FILE using SPC indentation spaces and
FLAGS as in pp_gimple_stmt_1. */
void
print_gimple_stmt (FILE *file, gimple *g, int spc, dump_flags_t flags)
{
pretty_printer buffer;
pp_needs_newline (&buffer) = true;
buffer.buffer->stream = file;
pp_gimple_stmt_1 (&buffer, g, spc, flags);
pp_newline_and_flush (&buffer);
}
DEBUG_FUNCTION void
debug (gimple &ref)
{
print_gimple_stmt (stderr, &ref, 0, TDF_NONE);
}
DEBUG_FUNCTION void
debug (gimple *ptr)
{
if (ptr)
debug (*ptr);
else
fprintf (stderr, "<nil>\n");
}
/* Print GIMPLE statement G to FILE using SPC indentation spaces and
FLAGS as in pp_gimple_stmt_1. Print only the right-hand side
of the statement. */
void
print_gimple_expr (FILE *file, gimple *g, int spc, dump_flags_t flags)
{
flags |= TDF_RHS_ONLY;
pretty_printer buffer;
pp_needs_newline (&buffer) = true;
buffer.buffer->stream = file;
pp_gimple_stmt_1 (&buffer, g, spc, flags);
pp_flush (&buffer);
}
/* Print the GIMPLE sequence SEQ on BUFFER using SPC indentation
spaces and FLAGS as in pp_gimple_stmt_1.
The caller is responsible for calling pp_flush on BUFFER to finalize
the pretty printer. */
static void
dump_gimple_seq (pretty_printer *buffer, gimple_seq seq, int spc,
dump_flags_t flags)
{
gimple_stmt_iterator i;
for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
{
gimple *gs = gsi_stmt (i);
INDENT (spc);
pp_gimple_stmt_1 (buffer, gs, spc, flags);
if (!gsi_one_before_end_p (i))
pp_newline (buffer);
}
}
/* Print GIMPLE sequence SEQ to FILE using SPC indentation spaces and
FLAGS as in pp_gimple_stmt_1. */
void
print_gimple_seq (FILE *file, gimple_seq seq, int spc, dump_flags_t flags)
{
pretty_printer buffer;
pp_needs_newline (&buffer) = true;
buffer.buffer->stream = file;
dump_gimple_seq (&buffer, seq, spc, flags);
pp_newline_and_flush (&buffer);
}
/* Print the GIMPLE sequence SEQ on stderr. */
DEBUG_FUNCTION void
debug_gimple_seq (gimple_seq seq)
{
print_gimple_seq (stderr, seq, 0, TDF_VOPS|TDF_MEMSYMS);
}
/* A simple helper to pretty-print some of the gimple tuples in the printf
style. The format modifiers are preceded by '%' and are:
'G' - outputs a string corresponding to the code of the given gimple,
'S' - outputs a gimple_seq with indent of spc + 2,
'T' - outputs the tree t,
'd' - outputs an int as a decimal,
's' - outputs a string,
'n' - outputs a newline,
'x' - outputs an int as hexadecimal,
'+' - increases indent by 2 then outputs a newline,
'-' - decreases indent by 2 then outputs a newline. */
static void
dump_gimple_fmt (pretty_printer *buffer, int spc, dump_flags_t flags,
const char *fmt, ...)
{
va_list args;
const char *c;
const char *tmp;
va_start (args, fmt);
for (c = fmt; *c; c++)
{
if (*c == '%')
{
gimple_seq seq;
tree t;
gimple *g;
switch (*++c)
{
case 'G':
g = va_arg (args, gimple *);
tmp = gimple_code_name[gimple_code (g)];
pp_string (buffer, tmp);
break;
case 'S':
seq = va_arg (args, gimple_seq);
pp_newline (buffer);
dump_gimple_seq (buffer, seq, spc + 2, flags);
newline_and_indent (buffer, spc);
break;
case 'T':
t = va_arg (args, tree);
if (t == NULL_TREE)
pp_string (buffer, "NULL");
else
dump_generic_node (buffer, t, spc, flags, false);
break;
case 'd':
pp_decimal_int (buffer, va_arg (args, int));
break;
case 's':
pp_string (buffer, va_arg (args, char *));
break;
case 'n':
newline_and_indent (buffer, spc);
break;
case 'x':
pp_scalar (buffer, "%x", va_arg (args, int));
break;
case '+':
spc += 2;
newline_and_indent (buffer, spc);
break;
case '-':
spc -= 2;
newline_and_indent (buffer, spc);
break;
default:
gcc_unreachable ();
}
}
else
pp_character (buffer, *c);
}
va_end (args);
}
/* Helper for dump_gimple_assign. Print the unary RHS of the
assignment GS. BUFFER, SPC and FLAGS are as in pp_gimple_stmt_1. */
static void
dump_unary_rhs (pretty_printer *buffer, const gassign *gs, int spc,
dump_flags_t flags)
{
enum tree_code rhs_code = gimple_assign_rhs_code (gs);
tree lhs = gimple_assign_lhs (gs);
tree rhs = gimple_assign_rhs1 (gs);
switch (rhs_code)
{
case VIEW_CONVERT_EXPR:
case ASSERT_EXPR:
dump_generic_node (buffer, rhs, spc, flags, false);
break;
case FIXED_CONVERT_EXPR:
case ADDR_SPACE_CONVERT_EXPR:
case FIX_TRUNC_EXPR:
case FLOAT_EXPR:
CASE_CONVERT:
pp_left_paren (buffer);
dump_generic_node (buffer, TREE_TYPE (lhs), spc, flags, false);
pp_string (buffer, ") ");
if (op_prio (rhs) < op_code_prio (rhs_code))
{
pp_left_paren (buffer);
dump_generic_node (buffer, rhs, spc, flags, false);
pp_right_paren (buffer);
}
else
dump_generic_node (buffer, rhs, spc, flags, false);
break;
case PAREN_EXPR:
pp_string (buffer, "((");
dump_generic_node (buffer, rhs, spc, flags, false);
pp_string (buffer, "))");
break;
case ABS_EXPR:
case ABSU_EXPR:
if (flags & TDF_GIMPLE)
{
pp_string (buffer,
rhs_code == ABS_EXPR ? "__ABS " : "__ABSU ");
dump_generic_node (buffer, rhs, spc, flags, false);
}
else
{
pp_string (buffer,
rhs_code == ABS_EXPR ? "ABS_EXPR <" : "ABSU_EXPR <");
dump_generic_node (buffer, rhs, spc, flags, false);
pp_greater (buffer);
}
break;
default:
if (TREE_CODE_CLASS (rhs_code) == tcc_declaration
|| TREE_CODE_CLASS (rhs_code) == tcc_constant
|| TREE_CODE_CLASS (rhs_code) == tcc_reference
|| rhs_code == SSA_NAME
|| rhs_code == ADDR_EXPR
|| rhs_code == CONSTRUCTOR)
{
dump_generic_node (buffer, rhs, spc, flags, false);
break;
}
else if (rhs_code == BIT_NOT_EXPR)
pp_complement (buffer);
else if (rhs_code == TRUTH_NOT_EXPR)
pp_exclamation (buffer);
else if (rhs_code == NEGATE_EXPR)
pp_minus (buffer);
else
{
pp_left_bracket (buffer);
pp_string (buffer, get_tree_code_name (rhs_code));
pp_string (buffer, "] ");
}
if (op_prio (rhs) < op_code_prio (rhs_code))
{
pp_left_paren (buffer);
dump_generic_node (buffer, rhs, spc, flags, false);
pp_right_paren (buffer);
}
else
dump_generic_node (buffer, rhs, spc, flags, false);
break;
}
}
/* Helper for dump_gimple_assign. Print the binary RHS of the
assignment GS. BUFFER, SPC and FLAGS are as in pp_gimple_stmt_1. */
static void
dump_binary_rhs (pretty_printer *buffer, const gassign *gs, int spc,
dump_flags_t flags)
{
const char *p;
enum tree_code code = gimple_assign_rhs_code (gs);
switch (code)
{
case MIN_EXPR:
case MAX_EXPR:
if (flags & TDF_GIMPLE)
{
pp_string (buffer, code == MIN_EXPR ? "__MIN (" : "__MAX (");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags,
false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags,
false);
pp_string (buffer, ")");
break;
}
else
{
gcc_fallthrough ();
}
case COMPLEX_EXPR:
case VEC_WIDEN_MULT_HI_EXPR:
case VEC_WIDEN_MULT_LO_EXPR:
case VEC_WIDEN_MULT_EVEN_EXPR:
case VEC_WIDEN_MULT_ODD_EXPR:
case VEC_PACK_TRUNC_EXPR:
case VEC_PACK_SAT_EXPR:
case VEC_PACK_FIX_TRUNC_EXPR:
case VEC_PACK_FLOAT_EXPR:
case VEC_WIDEN_LSHIFT_HI_EXPR:
case VEC_WIDEN_LSHIFT_LO_EXPR:
case VEC_SERIES_EXPR:
for (p = get_tree_code_name (code); *p; p++)
pp_character (buffer, TOUPPER (*p));
pp_string (buffer, " <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_greater (buffer);
break;
default:
if (op_prio (gimple_assign_rhs1 (gs)) <= op_code_prio (code))
{
pp_left_paren (buffer);
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags,
false);
pp_right_paren (buffer);
}
else
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_space (buffer);
pp_string (buffer, op_symbol_code (gimple_assign_rhs_code (gs)));
pp_space (buffer);
if (op_prio (gimple_assign_rhs2 (gs)) <= op_code_prio (code))
{
pp_left_paren (buffer);
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags,
false);
pp_right_paren (buffer);
}
else
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
}
}
/* Helper for dump_gimple_assign. Print the ternary RHS of the
assignment GS. BUFFER, SPC and FLAGS are as in pp_gimple_stmt_1. */
static void
dump_ternary_rhs (pretty_printer *buffer, const gassign *gs, int spc,
dump_flags_t flags)
{
const char *p;
enum tree_code code = gimple_assign_rhs_code (gs);
switch (code)
{
case WIDEN_MULT_PLUS_EXPR:
case WIDEN_MULT_MINUS_EXPR:
for (p = get_tree_code_name (code); *p; p++)
pp_character (buffer, TOUPPER (*p));
pp_string (buffer, " <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
pp_greater (buffer);
break;
case DOT_PROD_EXPR:
pp_string (buffer, "DOT_PROD_EXPR <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
pp_greater (buffer);
break;
case SAD_EXPR:
pp_string (buffer, "SAD_EXPR <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
pp_greater (buffer);
break;
case VEC_PERM_EXPR:
if (flags & TDF_GIMPLE)
pp_string (buffer, "__VEC_PERM (");
else
pp_string (buffer, "VEC_PERM_EXPR <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
if (flags & TDF_GIMPLE)
pp_right_paren (buffer);
else
pp_greater (buffer);
break;
case REALIGN_LOAD_EXPR:
pp_string (buffer, "REALIGN_LOAD <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
pp_greater (buffer);
break;
case COND_EXPR:
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, " ? ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, " : ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
break;
case VEC_COND_EXPR:
pp_string (buffer, "VEC_COND_EXPR <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc, flags, false);
pp_greater (buffer);
break;
case BIT_INSERT_EXPR:
if (flags & TDF_GIMPLE)
{
pp_string (buffer, "__BIT_INSERT (");
dump_generic_node (buffer, gimple_assign_rhs1 (gs), spc,
flags | TDF_SLIM, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs), spc,
flags | TDF_SLIM, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs), spc,
flags | TDF_SLIM, false);
pp_right_paren (buffer);
}
else
{
pp_string (buffer, "BIT_INSERT_EXPR <");
dump_generic_node (buffer, gimple_assign_rhs1 (gs),
spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs2 (gs),
spc, flags, false);
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_assign_rhs3 (gs),
spc, flags, false);
if (INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_rhs2 (gs))))
{
pp_string (buffer, " (");
pp_decimal_int (buffer, TYPE_PRECISION
(TREE_TYPE (gimple_assign_rhs2 (gs))));
pp_string (buffer, " bits)");
}
pp_greater (buffer);
}
break;
default:
gcc_unreachable ();
}
}
/* Dump the gimple assignment GS. BUFFER, SPC and FLAGS are as in
pp_gimple_stmt_1. */
static void
dump_gimple_assign (pretty_printer *buffer, const gassign *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
{
tree arg1 = NULL;
tree arg2 = NULL;
tree arg3 = NULL;
switch (gimple_num_ops (gs))
{
case 4:
arg3 = gimple_assign_rhs3 (gs);
/* FALLTHRU */
case 3:
arg2 = gimple_assign_rhs2 (gs);
/* FALLTHRU */
case 2:
arg1 = gimple_assign_rhs1 (gs);
break;
default:
gcc_unreachable ();
}
dump_gimple_fmt (buffer, spc, flags, "%G <%s, %T, %T, %T, %T>", gs,
get_tree_code_name (gimple_assign_rhs_code (gs)),
gimple_assign_lhs (gs), arg1, arg2, arg3);
}
else
{
if (!(flags & TDF_RHS_ONLY))
{
dump_generic_node (buffer, gimple_assign_lhs (gs), spc, flags, false);
pp_space (buffer);
pp_equal (buffer);
if (gimple_assign_nontemporal_move_p (gs))
pp_string (buffer, "{nt}");
if (gimple_has_volatile_ops (gs))
pp_string (buffer, "{v}");
pp_space (buffer);
}
if (gimple_num_ops (gs) == 2)
dump_unary_rhs (buffer, gs, spc, flags);
else if (gimple_num_ops (gs) == 3)
dump_binary_rhs (buffer, gs, spc, flags);
else if (gimple_num_ops (gs) == 4)
dump_ternary_rhs (buffer, gs, spc, flags);
else
gcc_unreachable ();
if (!(flags & TDF_RHS_ONLY))
pp_semicolon (buffer);
}
}
/* Dump the return statement GS. BUFFER, SPC and FLAGS are as in
pp_gimple_stmt_1. */
static void
dump_gimple_return (pretty_printer *buffer, const greturn *gs, int spc,
dump_flags_t flags)
{
tree t;
t = gimple_return_retval (gs);
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T>", gs, t);
else
{
pp_string (buffer, "return");
if (t)
{
pp_space (buffer);
dump_generic_node (buffer, t, spc, flags, false);
}
pp_semicolon (buffer);
}
}
/* Dump the call arguments for a gimple call. BUFFER, FLAGS are as in
dump_gimple_call. */
static void
dump_gimple_call_args (pretty_printer *buffer, const gcall *gs,
dump_flags_t flags)
{
size_t i = 0;
/* Pretty print first arg to certain internal fns. */
if (gimple_call_internal_p (gs))
{
const char *const *enums = NULL;
unsigned limit = 0;
switch (gimple_call_internal_fn (gs))
{
case IFN_UNIQUE:
#define DEF(X) #X
static const char *const unique_args[] = {IFN_UNIQUE_CODES};
#undef DEF
enums = unique_args;
limit = ARRAY_SIZE (unique_args);
break;
case IFN_GOACC_LOOP:
#define DEF(X) #X
static const char *const loop_args[] = {IFN_GOACC_LOOP_CODES};
#undef DEF
enums = loop_args;
limit = ARRAY_SIZE (loop_args);
break;
case IFN_GOACC_REDUCTION:
#define DEF(X) #X
static const char *const reduction_args[]
= {IFN_GOACC_REDUCTION_CODES};
#undef DEF
enums = reduction_args;
limit = ARRAY_SIZE (reduction_args);
break;
case IFN_ASAN_MARK:
#define DEF(X) #X
static const char *const asan_mark_args[] = {IFN_ASAN_MARK_FLAGS};
#undef DEF
enums = asan_mark_args;
limit = ARRAY_SIZE (asan_mark_args);
break;
default:
break;
}
if (limit)
{
tree arg0 = gimple_call_arg (gs, 0);
HOST_WIDE_INT v;
if (TREE_CODE (arg0) == INTEGER_CST
&& tree_fits_shwi_p (arg0)
&& (v = tree_to_shwi (arg0)) >= 0 && v < limit)
{
i++;
pp_string (buffer, enums[v]);
}
}
}
for (; i < gimple_call_num_args (gs); i++)
{
if (i)
pp_string (buffer, ", ");
dump_generic_node (buffer, gimple_call_arg (gs, i), 0, flags, false);
}
if (gimple_call_va_arg_pack_p (gs))
{
if (i)
pp_string (buffer, ", ");
pp_string (buffer, "__builtin_va_arg_pack ()");
}
}
/* Dump the points-to solution *PT to BUFFER. */
static void
pp_points_to_solution (pretty_printer *buffer, const pt_solution *pt)
{
if (pt->anything)
{
pp_string (buffer, "anything ");
return;
}
if (pt->nonlocal)
pp_string (buffer, "nonlocal ");
if (pt->escaped)
pp_string (buffer, "escaped ");
if (pt->ipa_escaped)
pp_string (buffer, "unit-escaped ");
if (pt->null)
pp_string (buffer, "null ");
if (pt->vars
&& !bitmap_empty_p (pt->vars))
{
bitmap_iterator bi;
unsigned i;
pp_string (buffer, "{ ");
EXECUTE_IF_SET_IN_BITMAP (pt->vars, 0, i, bi)
{
pp_string (buffer, "D.");
pp_decimal_int (buffer, i);
pp_space (buffer);
}
pp_right_brace (buffer);
if (pt->vars_contains_nonlocal
|| pt->vars_contains_escaped
|| pt->vars_contains_escaped_heap
|| pt->vars_contains_restrict)
{
const char *comma = "";
pp_string (buffer, " (");
if (pt->vars_contains_nonlocal)
{
pp_string (buffer, "nonlocal");
comma = ", ";
}
if (pt->vars_contains_escaped)
{
pp_string (buffer, comma);
pp_string (buffer, "escaped");
comma = ", ";
}
if (pt->vars_contains_escaped_heap)
{
pp_string (buffer, comma);
pp_string (buffer, "escaped heap");
comma = ", ";
}
if (pt->vars_contains_restrict)
{
pp_string (buffer, comma);
pp_string (buffer, "restrict");
comma = ", ";
}
if (pt->vars_contains_interposable)
{
pp_string (buffer, comma);
pp_string (buffer, "interposable");
}
pp_string (buffer, ")");
}
}
}
/* Dump the call statement GS. BUFFER, SPC and FLAGS are as in
pp_gimple_stmt_1. */
static void
dump_gimple_call (pretty_printer *buffer, const gcall *gs, int spc,
dump_flags_t flags)
{
tree lhs = gimple_call_lhs (gs);
tree fn = gimple_call_fn (gs);
if (flags & TDF_ALIAS)
{
const pt_solution *pt;
pt = gimple_call_use_set (gs);
if (!pt_solution_empty_p (pt))
{
pp_string (buffer, "# USE = ");
pp_points_to_solution (buffer, pt);
newline_and_indent (buffer, spc);
}
pt = gimple_call_clobber_set (gs);
if (!pt_solution_empty_p (pt))
{
pp_string (buffer, "# CLB = ");
pp_points_to_solution (buffer, pt);
newline_and_indent (buffer, spc);
}
}
if (flags & TDF_RAW)
{
if (gimple_call_internal_p (gs))
dump_gimple_fmt (buffer, spc, flags, "%G <.%s, %T", gs,
internal_fn_name (gimple_call_internal_fn (gs)), lhs);
else
dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T", gs, fn, lhs);
if (gimple_call_num_args (gs) > 0)
{
pp_string (buffer, ", ");
dump_gimple_call_args (buffer, gs, flags);
}
pp_greater (buffer);
}
else
{
if (lhs && !(flags & TDF_RHS_ONLY))
{
dump_generic_node (buffer, lhs, spc, flags, false);
pp_string (buffer, " =");
if (gimple_has_volatile_ops (gs))
pp_string (buffer, "{v}");
pp_space (buffer);
}
if (gimple_call_internal_p (gs))
{
pp_dot (buffer);
pp_string (buffer, internal_fn_name (gimple_call_internal_fn (gs)));
}
else
print_call_name (buffer, fn, flags);
pp_string (buffer, " (");
dump_gimple_call_args (buffer, gs, flags);
pp_right_paren (buffer);
if (!(flags & TDF_RHS_ONLY))
pp_semicolon (buffer);
}
if (gimple_call_chain (gs))
{
pp_string (buffer, " [static-chain: ");
dump_generic_node (buffer, gimple_call_chain (gs), spc, flags, false);
pp_right_bracket (buffer);
}
if (gimple_call_return_slot_opt_p (gs))
pp_string (buffer, " [return slot optimization]");
if (gimple_call_tail_p (gs))
pp_string (buffer, " [tail call]");
if (gimple_call_must_tail_p (gs))
pp_string (buffer, " [must tail call]");
if (fn == NULL)
return;
/* Dump the arguments of _ITM_beginTransaction sanely. */
if (TREE_CODE (fn) == ADDR_EXPR)
fn = TREE_OPERAND (fn, 0);
if (TREE_CODE (fn) == FUNCTION_DECL && decl_is_tm_clone (fn))
pp_string (buffer, " [tm-clone]");
if (TREE_CODE (fn) == FUNCTION_DECL
&& fndecl_built_in_p (fn, BUILT_IN_TM_START)
&& gimple_call_num_args (gs) > 0)
{
tree t = gimple_call_arg (gs, 0);
unsigned HOST_WIDE_INT props;
gcc_assert (TREE_CODE (t) == INTEGER_CST);
pp_string (buffer, " [ ");
/* Get the transaction code properties. */
props = TREE_INT_CST_LOW (t);
if (props & PR_INSTRUMENTEDCODE)
pp_string (buffer, "instrumentedCode ");
if (props & PR_UNINSTRUMENTEDCODE)
pp_string (buffer, "uninstrumentedCode ");
if (props & PR_HASNOXMMUPDATE)
pp_string (buffer, "hasNoXMMUpdate ");
if (props & PR_HASNOABORT)
pp_string (buffer, "hasNoAbort ");
if (props & PR_HASNOIRREVOCABLE)
pp_string (buffer, "hasNoIrrevocable ");
if (props & PR_DOESGOIRREVOCABLE)
pp_string (buffer, "doesGoIrrevocable ");
if (props & PR_HASNOSIMPLEREADS)
pp_string (buffer, "hasNoSimpleReads ");
if (props & PR_AWBARRIERSOMITTED)
pp_string (buffer, "awBarriersOmitted ");
if (props & PR_RARBARRIERSOMITTED)
pp_string (buffer, "RaRBarriersOmitted ");
if (props & PR_UNDOLOGCODE)
pp_string (buffer, "undoLogCode ");
if (props & PR_PREFERUNINSTRUMENTED)
pp_string (buffer, "preferUninstrumented ");
if (props & PR_EXCEPTIONBLOCK)
pp_string (buffer, "exceptionBlock ");
if (props & PR_HASELSE)
pp_string (buffer, "hasElse ");
if (props & PR_READONLY)
pp_string (buffer, "readOnly ");
pp_right_bracket (buffer);
}
}
/* Dump the switch statement GS. BUFFER, SPC and FLAGS are as in
pp_gimple_stmt_1. */
static void
dump_gimple_switch (pretty_printer *buffer, const gswitch *gs, int spc,
dump_flags_t flags)
{
unsigned int i;
GIMPLE_CHECK (gs, GIMPLE_SWITCH);
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T, ", gs,
gimple_switch_index (gs));
else
{
pp_string (buffer, "switch (");
dump_generic_node (buffer, gimple_switch_index (gs), spc, flags, true);
if (flags & TDF_GIMPLE)
pp_string (buffer, ") {");
else
pp_string (buffer, ") <");
}
for (i = 0; i < gimple_switch_num_labels (gs); i++)
{
tree case_label = gimple_switch_label (gs, i);
gcc_checking_assert (case_label != NULL_TREE);
dump_generic_node (buffer, case_label, spc, flags, false);
pp_space (buffer);
tree label = CASE_LABEL (case_label);
dump_generic_node (buffer, label, spc, flags, false);
if (cfun && cfun->cfg)
{
basic_block dest = label_to_block (cfun, label);
if (dest)
{
edge label_edge = find_edge (gimple_bb (gs), dest);
if (label_edge && !(flags & TDF_GIMPLE))
dump_edge_probability (buffer, label_edge);
}
}
if (i < gimple_switch_num_labels (gs) - 1)
{
if (flags & TDF_GIMPLE)
pp_string (buffer, "; ");
else
pp_string (buffer, ", ");
}
}
if (flags & TDF_GIMPLE)
pp_string (buffer, "; }");
else
pp_greater (buffer);
}
/* Dump the gimple conditional GS. BUFFER, SPC and FLAGS are as in
pp_gimple_stmt_1. */
static void
dump_gimple_cond (pretty_printer *buffer, const gcond *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%s, %T, %T, %T, %T>", gs,
get_tree_code_name (gimple_cond_code (gs)),
gimple_cond_lhs (gs), gimple_cond_rhs (gs),
gimple_cond_true_label (gs), gimple_cond_false_label (gs));
else
{
if (!(flags & TDF_RHS_ONLY))
pp_string (buffer, "if (");
dump_generic_node (buffer, gimple_cond_lhs (gs), spc, flags, false);
pp_space (buffer);
pp_string (buffer, op_symbol_code (gimple_cond_code (gs)));
pp_space (buffer);
dump_generic_node (buffer, gimple_cond_rhs (gs), spc, flags, false);
if (!(flags & TDF_RHS_ONLY))
{
edge_iterator ei;
edge e, true_edge = NULL, false_edge = NULL;
basic_block bb = gimple_bb (gs);
if (bb)
{
FOR_EACH_EDGE (e, ei, bb->succs)
{
if (e->flags & EDGE_TRUE_VALUE)
true_edge = e;
else if (e->flags & EDGE_FALSE_VALUE)
false_edge = e;
}
}
bool has_edge_info = true_edge != NULL && false_edge != NULL;
pp_right_paren (buffer);
if (gimple_cond_true_label (gs))
{
pp_string (buffer, " goto ");
dump_generic_node (buffer, gimple_cond_true_label (gs),
spc, flags, false);
if (has_edge_info && !(flags & TDF_GIMPLE))
dump_edge_probability (buffer, true_edge);
pp_semicolon (buffer);
}
if (gimple_cond_false_label (gs))
{
pp_string (buffer, " else goto ");
dump_generic_node (buffer, gimple_cond_false_label (gs),
spc, flags, false);
if (has_edge_info && !(flags & TDF_GIMPLE))
dump_edge_probability (buffer, false_edge);
pp_semicolon (buffer);
}
}
}
}
/* Dump a GIMPLE_LABEL tuple on the pretty_printer BUFFER, SPC
spaces of indent. FLAGS specifies details to show in the dump (see
TDF_* in dumpfils.h). */
static void
dump_gimple_label (pretty_printer *buffer, const glabel *gs, int spc,
dump_flags_t flags)
{
tree label = gimple_label_label (gs);
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T>", gs, label);
else
{
dump_generic_node (buffer, label, spc, flags, false);
pp_colon (buffer);
}
if (flags & TDF_GIMPLE)
return;
if (DECL_NONLOCAL (label))
pp_string (buffer, " [non-local]");
if ((flags & TDF_EH) && EH_LANDING_PAD_NR (label))
pp_printf (buffer, " [LP %d]", EH_LANDING_PAD_NR (label));
}
/* Dump a GIMPLE_GOTO tuple on the pretty_printer BUFFER, SPC
spaces of indent. FLAGS specifies details to show in the dump (see
TDF_* in dumpfile.h). */
static void
dump_gimple_goto (pretty_printer *buffer, const ggoto *gs, int spc,
dump_flags_t flags)
{
tree label = gimple_goto_dest (gs);
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T>", gs, label);
else
dump_gimple_fmt (buffer, spc, flags, "goto %T;", label);
}
/* Dump a GIMPLE_BIND tuple on the pretty_printer BUFFER, SPC
spaces of indent. FLAGS specifies details to show in the dump (see
TDF_* in dumpfile.h). */
static void
dump_gimple_bind (pretty_printer *buffer, const gbind *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <", gs);
else
pp_left_brace (buffer);
if (!(flags & TDF_SLIM))
{
tree var;
for (var = gimple_bind_vars (gs); var; var = DECL_CHAIN (var))
{
newline_and_indent (buffer, 2);
print_declaration (buffer, var, spc, flags);
}
if (gimple_bind_vars (gs))
pp_newline (buffer);
}
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_bind_body (gs), spc + 2, flags);
newline_and_indent (buffer, spc);
if (flags & TDF_RAW)
pp_greater (buffer);
else
pp_right_brace (buffer);
}
/* Dump a GIMPLE_TRY tuple on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_try (pretty_printer *buffer, const gtry *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
{
const char *type;
if (gimple_try_kind (gs) == GIMPLE_TRY_CATCH)
type = "GIMPLE_TRY_CATCH";
else if (gimple_try_kind (gs) == GIMPLE_TRY_FINALLY)
type = "GIMPLE_TRY_FINALLY";
else
type = "UNKNOWN GIMPLE_TRY";
dump_gimple_fmt (buffer, spc, flags,
"%G <%s,%+EVAL <%S>%nCLEANUP <%S>%->", gs, type,
gimple_try_eval (gs), gimple_try_cleanup (gs));
}
else
{
pp_string (buffer, "try");
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_try_eval (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
gimple_seq seq = gimple_try_cleanup (gs);
if (gimple_try_kind (gs) == GIMPLE_TRY_CATCH)
{
newline_and_indent (buffer, spc);
pp_string (buffer, "catch");
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
}
else if (gimple_try_kind (gs) == GIMPLE_TRY_FINALLY)
{
newline_and_indent (buffer, spc);
pp_string (buffer, "finally");
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
if (seq && is_a <geh_else *> (gimple_seq_first_stmt (seq))
&& gimple_seq_nondebug_singleton_p (seq))
{
geh_else *stmt = as_a <geh_else *> (gimple_seq_first_stmt (seq));
seq = gimple_eh_else_n_body (stmt);
pp_newline (buffer);
dump_gimple_seq (buffer, seq, spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
seq = gimple_eh_else_e_body (stmt);
newline_and_indent (buffer, spc);
pp_string (buffer, "else");
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
}
}
else
pp_string (buffer, " <UNKNOWN GIMPLE_TRY> {");
pp_newline (buffer);
dump_gimple_seq (buffer, seq, spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
/* Dump a GIMPLE_CATCH tuple on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_catch (pretty_printer *buffer, const gcatch *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T, %+CATCH <%S>%->", gs,
gimple_catch_types (gs), gimple_catch_handler (gs));
else
dump_gimple_fmt (buffer, spc, flags, "catch (%T)%+{%S}",
gimple_catch_types (gs), gimple_catch_handler (gs));
}
/* Dump a GIMPLE_EH_FILTER tuple on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_eh_filter (pretty_printer *buffer, const geh_filter *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T, %+FAILURE <%S>%->", gs,
gimple_eh_filter_types (gs),
gimple_eh_filter_failure (gs));
else
dump_gimple_fmt (buffer, spc, flags, "<<<eh_filter (%T)>>>%+{%+%S%-}",
gimple_eh_filter_types (gs),
gimple_eh_filter_failure (gs));
}
/* Dump a GIMPLE_EH_MUST_NOT_THROW tuple. */
static void
dump_gimple_eh_must_not_throw (pretty_printer *buffer,
const geh_mnt *gs, int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T>", gs,
gimple_eh_must_not_throw_fndecl (gs));
else
dump_gimple_fmt (buffer, spc, flags, "<<<eh_must_not_throw (%T)>>>",
gimple_eh_must_not_throw_fndecl (gs));
}
/* Dump a GIMPLE_EH_ELSE tuple on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_eh_else (pretty_printer *buffer, const geh_else *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags,
"%G <%+N_BODY <%S>%nE_BODY <%S>%->", gs,
gimple_eh_else_n_body (gs), gimple_eh_else_e_body (gs));
else
dump_gimple_fmt (buffer, spc, flags,
"<<<if_normal_exit>>>%+{%S}%-<<<else_eh_exit>>>%+{%S}",
gimple_eh_else_n_body (gs), gimple_eh_else_e_body (gs));
}
/* Dump a GIMPLE_RESX tuple on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_resx (pretty_printer *buffer, const gresx *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%d>", gs,
gimple_resx_region (gs));
else
dump_gimple_fmt (buffer, spc, flags, "resx %d", gimple_resx_region (gs));
}
/* Dump a GIMPLE_EH_DISPATCH tuple on the pretty_printer BUFFER. */
static void
dump_gimple_eh_dispatch (pretty_printer *buffer, const geh_dispatch *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%d>", gs,
gimple_eh_dispatch_region (gs));
else
dump_gimple_fmt (buffer, spc, flags, "eh_dispatch %d",
gimple_eh_dispatch_region (gs));
}
/* Dump a GIMPLE_DEBUG tuple on the pretty_printer BUFFER, SPC spaces
of indent. FLAGS specifies details to show in the dump (see TDF_*
in dumpfile.h). */
static void
dump_gimple_debug (pretty_printer *buffer, const gdebug *gs, int spc,
dump_flags_t flags)
{
switch (gs->subcode)
{
case GIMPLE_DEBUG_BIND:
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G BIND <%T, %T>", gs,
gimple_debug_bind_get_var (gs),
gimple_debug_bind_get_value (gs));
else
dump_gimple_fmt (buffer, spc, flags, "# DEBUG %T => %T",
gimple_debug_bind_get_var (gs),
gimple_debug_bind_get_value (gs));
break;
case GIMPLE_DEBUG_SOURCE_BIND:
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G SRCBIND <%T, %T>", gs,
gimple_debug_source_bind_get_var (gs),
gimple_debug_source_bind_get_value (gs));
else
dump_gimple_fmt (buffer, spc, flags, "# DEBUG %T s=> %T",
gimple_debug_source_bind_get_var (gs),
gimple_debug_source_bind_get_value (gs));
break;
case GIMPLE_DEBUG_BEGIN_STMT:
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G BEGIN_STMT", gs);
else
dump_gimple_fmt (buffer, spc, flags, "# DEBUG BEGIN_STMT");
break;
case GIMPLE_DEBUG_INLINE_ENTRY:
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G INLINE_ENTRY %T", gs,
gimple_block (gs)
? block_ultimate_origin (gimple_block (gs))
: NULL_TREE);
else
dump_gimple_fmt (buffer, spc, flags, "# DEBUG INLINE_ENTRY %T",
gimple_block (gs)
? block_ultimate_origin (gimple_block (gs))
: NULL_TREE);
break;
default:
gcc_unreachable ();
}
}
/* Dump a GIMPLE_OMP_FOR tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_for (pretty_printer *buffer, const gomp_for *gs, int spc,
dump_flags_t flags)
{
size_t i;
if (flags & TDF_RAW)
{
const char *kind;
switch (gimple_omp_for_kind (gs))
{
case GF_OMP_FOR_KIND_FOR:
kind = "";
break;
case GF_OMP_FOR_KIND_DISTRIBUTE:
kind = " distribute";
break;
case GF_OMP_FOR_KIND_TASKLOOP:
kind = " taskloop";
break;
case GF_OMP_FOR_KIND_OACC_LOOP:
kind = " oacc_loop";
break;
case GF_OMP_FOR_KIND_SIMD:
kind = " simd";
break;
default:
gcc_unreachable ();
}
dump_gimple_fmt (buffer, spc, flags, "%G%s <%+BODY <%S>%nCLAUSES <", gs,
kind, gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_for_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >,");
for (i = 0; i < gimple_omp_for_collapse (gs); i++)
dump_gimple_fmt (buffer, spc, flags,
"%+%T, %T, %T, %s, %T,%n",
gimple_omp_for_index (gs, i),
gimple_omp_for_initial (gs, i),
gimple_omp_for_final (gs, i),
get_tree_code_name (gimple_omp_for_cond (gs, i)),
gimple_omp_for_incr (gs, i));
dump_gimple_fmt (buffer, spc, flags, "PRE_BODY <%S>%->",
gimple_omp_for_pre_body (gs));
}
else
{
switch (gimple_omp_for_kind (gs))
{
case GF_OMP_FOR_KIND_FOR:
pp_string (buffer, "#pragma omp for");
break;
case GF_OMP_FOR_KIND_DISTRIBUTE:
pp_string (buffer, "#pragma omp distribute");
break;
case GF_OMP_FOR_KIND_TASKLOOP:
pp_string (buffer, "#pragma omp taskloop");
break;
case GF_OMP_FOR_KIND_OACC_LOOP:
pp_string (buffer, "#pragma acc loop");
break;
case GF_OMP_FOR_KIND_SIMD:
pp_string (buffer, "#pragma omp simd");
break;
default:
gcc_unreachable ();
}
dump_omp_clauses (buffer, gimple_omp_for_clauses (gs), spc, flags);
for (i = 0; i < gimple_omp_for_collapse (gs); i++)
{
if (i)
spc += 2;
newline_and_indent (buffer, spc);
pp_string (buffer, "for (");
dump_generic_node (buffer, gimple_omp_for_index (gs, i), spc,
flags, false);
pp_string (buffer, " = ");
tree init = gimple_omp_for_initial (gs, i);
if (TREE_CODE (init) != TREE_VEC)
dump_generic_node (buffer, init, spc, flags, false);
else
dump_omp_loop_non_rect_expr (buffer, init, spc, flags);
pp_string (buffer, "; ");
dump_generic_node (buffer, gimple_omp_for_index (gs, i), spc,
flags, false);
pp_space (buffer);
switch (gimple_omp_for_cond (gs, i))
{
case LT_EXPR:
pp_less (buffer);
break;
case GT_EXPR:
pp_greater (buffer);
break;
case LE_EXPR:
pp_less_equal (buffer);
break;
case GE_EXPR:
pp_greater_equal (buffer);
break;
case NE_EXPR:
pp_string (buffer, "!=");
break;
default:
gcc_unreachable ();
}
pp_space (buffer);
tree cond = gimple_omp_for_final (gs, i);
if (TREE_CODE (cond) != TREE_VEC)
dump_generic_node (buffer, cond, spc, flags, false);
else
dump_omp_loop_non_rect_expr (buffer, cond, spc, flags);
pp_string (buffer, "; ");
dump_generic_node (buffer, gimple_omp_for_index (gs, i), spc,
flags, false);
pp_string (buffer, " = ");
dump_generic_node (buffer, gimple_omp_for_incr (gs, i), spc,
flags, false);
pp_right_paren (buffer);
}
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
}
/* Dump a GIMPLE_OMP_CONTINUE tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_continue (pretty_printer *buffer, const gomp_continue *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T>", gs,
gimple_omp_continue_control_def (gs),
gimple_omp_continue_control_use (gs));
}
else
{
pp_string (buffer, "#pragma omp continue (");
dump_generic_node (buffer, gimple_omp_continue_control_def (gs),
spc, flags, false);
pp_comma (buffer);
pp_space (buffer);
dump_generic_node (buffer, gimple_omp_continue_control_use (gs),
spc, flags, false);
pp_right_paren (buffer);
}
}
/* Dump a GIMPLE_OMP_SINGLE tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_single (pretty_printer *buffer, const gomp_single *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs,
gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_single_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >");
}
else
{
pp_string (buffer, "#pragma omp single");
dump_omp_clauses (buffer, gimple_omp_single_clauses (gs), spc, flags);
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
}
/* Dump a GIMPLE_OMP_TASKGROUP tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_taskgroup (pretty_printer *buffer, const gimple *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs,
gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_taskgroup_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >");
}
else
{
pp_string (buffer, "#pragma omp taskgroup");
dump_omp_clauses (buffer, gimple_omp_taskgroup_clauses (gs), spc, flags);
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
}
/* Dump a GIMPLE_OMP_TARGET tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_target (pretty_printer *buffer, const gomp_target *gs,
int spc, dump_flags_t flags)
{
const char *kind;
switch (gimple_omp_target_kind (gs))
{
case GF_OMP_TARGET_KIND_REGION:
kind = "";
break;
case GF_OMP_TARGET_KIND_DATA:
kind = " data";
break;
case GF_OMP_TARGET_KIND_UPDATE:
kind = " update";
break;
case GF_OMP_TARGET_KIND_ENTER_DATA:
kind = " enter data";
break;
case GF_OMP_TARGET_KIND_EXIT_DATA:
kind = " exit data";
break;
case GF_OMP_TARGET_KIND_OACC_KERNELS:
kind = " oacc_kernels";
break;
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
kind = " oacc_parallel";
break;
case GF_OMP_TARGET_KIND_OACC_SERIAL:
kind = " oacc_serial";
break;
case GF_OMP_TARGET_KIND_OACC_DATA:
kind = " oacc_data";
break;
case GF_OMP_TARGET_KIND_OACC_UPDATE:
kind = " oacc_update";
break;
case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
kind = " oacc_enter_exit_data";
break;
case GF_OMP_TARGET_KIND_OACC_DECLARE:
kind = " oacc_declare";
break;
case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
kind = " oacc_host_data";
break;
default:
gcc_unreachable ();
}
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G%s <%+BODY <%S>%nCLAUSES <", gs,
kind, gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_target_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >, %T, %T%n>",
gimple_omp_target_child_fn (gs),
gimple_omp_target_data_arg (gs));
}
else
{
pp_string (buffer, "#pragma omp target");
pp_string (buffer, kind);
dump_omp_clauses (buffer, gimple_omp_target_clauses (gs), spc, flags);
if (gimple_omp_target_child_fn (gs))
{
pp_string (buffer, " [child fn: ");
dump_generic_node (buffer, gimple_omp_target_child_fn (gs),
spc, flags, false);
pp_string (buffer, " (");
if (gimple_omp_target_data_arg (gs))
dump_generic_node (buffer, gimple_omp_target_data_arg (gs),
spc, flags, false);
else
pp_string (buffer, "???");
pp_string (buffer, ")]");
}
gimple_seq body = gimple_omp_body (gs);
if (body && gimple_code (gimple_seq_first_stmt (body)) != GIMPLE_BIND)
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, body, spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
else if (body)
{
pp_newline (buffer);
dump_gimple_seq (buffer, body, spc + 2, flags);
}
}
}
/* Dump a GIMPLE_OMP_TEAMS tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_teams (pretty_printer *buffer, const gomp_teams *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs,
gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_teams_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >");
}
else
{
pp_string (buffer, "#pragma omp teams");
dump_omp_clauses (buffer, gimple_omp_teams_clauses (gs), spc, flags);
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '{');
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_character (buffer, '}');
}
}
}
/* Dump a GIMPLE_OMP_SECTIONS tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_sections (pretty_printer *buffer, const gomp_sections *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs,
gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_sections_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >");
}
else
{
pp_string (buffer, "#pragma omp sections");
if (gimple_omp_sections_control (gs))
{
pp_string (buffer, " <");
dump_generic_node (buffer, gimple_omp_sections_control (gs), spc,
flags, false);
pp_greater (buffer);
}
dump_omp_clauses (buffer, gimple_omp_sections_clauses (gs), spc, flags);
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
}
/* Dump a GIMPLE_OMP_{MASTER,ORDERED,SECTION} tuple on the
pretty_printer BUFFER. */
static void
dump_gimple_omp_block (pretty_printer *buffer, const gimple *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S> >", gs,
gimple_omp_body (gs));
else
{
switch (gimple_code (gs))
{
case GIMPLE_OMP_MASTER:
pp_string (buffer, "#pragma omp master");
break;
case GIMPLE_OMP_SECTION:
pp_string (buffer, "#pragma omp section");
break;
default:
gcc_unreachable ();
}
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
}
/* Dump a GIMPLE_OMP_CRITICAL tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_critical (pretty_printer *buffer, const gomp_critical *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S> >", gs,
gimple_omp_body (gs));
else
{
pp_string (buffer, "#pragma omp critical");
if (gimple_omp_critical_name (gs))
{
pp_string (buffer, " (");
dump_generic_node (buffer, gimple_omp_critical_name (gs), spc,
flags, false);
pp_right_paren (buffer);
}
dump_omp_clauses (buffer, gimple_omp_critical_clauses (gs), spc, flags);
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
}
/* Dump a GIMPLE_OMP_ORDERED tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_ordered (pretty_printer *buffer, const gomp_ordered *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S> >", gs,
gimple_omp_body (gs));
else
{
pp_string (buffer, "#pragma omp ordered");
dump_omp_clauses (buffer, gimple_omp_ordered_clauses (gs), spc, flags);
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
}
/* Dump a GIMPLE_OMP_SCAN tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_scan (pretty_printer *buffer, const gomp_scan *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S> >", gs,
gimple_omp_body (gs));
else
{
if (gimple_omp_scan_clauses (gs))
{
pp_string (buffer, "#pragma omp scan");
dump_omp_clauses (buffer, gimple_omp_scan_clauses (gs), spc, flags);
}
if (!gimple_seq_empty_p (gimple_omp_body (gs)))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_omp_body (gs), spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
}
}
/* Dump a GIMPLE_OMP_RETURN tuple on the pretty_printer BUFFER. */
static void
dump_gimple_omp_return (pretty_printer *buffer, const gimple *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <nowait=%d", gs,
(int) gimple_omp_return_nowait_p (gs));
if (gimple_omp_return_lhs (gs))
dump_gimple_fmt (buffer, spc, flags, ", lhs=%T>",
gimple_omp_return_lhs (gs));
else
dump_gimple_fmt (buffer, spc, flags, ">");
}
else
{
pp_string (buffer, "#pragma omp return");
if (gimple_omp_return_nowait_p (gs))
pp_string (buffer, "(nowait)");
if (gimple_omp_return_lhs (gs))
{
pp_string (buffer, " (set ");
dump_generic_node (buffer, gimple_omp_return_lhs (gs),
spc, flags, false);
pp_character (buffer, ')');
}
}
}
/* Dump a GIMPLE_TRANSACTION tuple on the pretty_printer BUFFER. */
static void
dump_gimple_transaction (pretty_printer *buffer, const gtransaction *gs,
int spc, dump_flags_t flags)
{
unsigned subcode = gimple_transaction_subcode (gs);
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags,
"%G [SUBCODE=%x,NORM=%T,UNINST=%T,OVER=%T] "
"<%+BODY <%S> >",
gs, subcode, gimple_transaction_label_norm (gs),
gimple_transaction_label_uninst (gs),
gimple_transaction_label_over (gs),
gimple_transaction_body (gs));
}
else
{
if (subcode & GTMA_IS_OUTER)
pp_string (buffer, "__transaction_atomic [[outer]]");
else if (subcode & GTMA_IS_RELAXED)
pp_string (buffer, "__transaction_relaxed");
else
pp_string (buffer, "__transaction_atomic");
subcode &= ~GTMA_DECLARATION_MASK;
if (gimple_transaction_body (gs))
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, gimple_transaction_body (gs),
spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
else
{
pp_string (buffer, " //");
if (gimple_transaction_label_norm (gs))
{
pp_string (buffer, " NORM=");
dump_generic_node (buffer, gimple_transaction_label_norm (gs),
spc, flags, false);
}
if (gimple_transaction_label_uninst (gs))
{
pp_string (buffer, " UNINST=");
dump_generic_node (buffer, gimple_transaction_label_uninst (gs),
spc, flags, false);
}
if (gimple_transaction_label_over (gs))
{
pp_string (buffer, " OVER=");
dump_generic_node (buffer, gimple_transaction_label_over (gs),
spc, flags, false);
}
if (subcode)
{
pp_string (buffer, " SUBCODE=[ ");
if (subcode & GTMA_HAVE_ABORT)
{
pp_string (buffer, "GTMA_HAVE_ABORT ");
subcode &= ~GTMA_HAVE_ABORT;
}
if (subcode & GTMA_HAVE_LOAD)
{
pp_string (buffer, "GTMA_HAVE_LOAD ");
subcode &= ~GTMA_HAVE_LOAD;
}
if (subcode & GTMA_HAVE_STORE)
{
pp_string (buffer, "GTMA_HAVE_STORE ");
subcode &= ~GTMA_HAVE_STORE;
}
if (subcode & GTMA_MAY_ENTER_IRREVOCABLE)
{
pp_string (buffer, "GTMA_MAY_ENTER_IRREVOCABLE ");
subcode &= ~GTMA_MAY_ENTER_IRREVOCABLE;
}
if (subcode & GTMA_DOES_GO_IRREVOCABLE)
{
pp_string (buffer, "GTMA_DOES_GO_IRREVOCABLE ");
subcode &= ~GTMA_DOES_GO_IRREVOCABLE;
}
if (subcode & GTMA_HAS_NO_INSTRUMENTATION)
{
pp_string (buffer, "GTMA_HAS_NO_INSTRUMENTATION ");
subcode &= ~GTMA_HAS_NO_INSTRUMENTATION;
}
if (subcode)
pp_printf (buffer, "0x%x ", subcode);
pp_right_bracket (buffer);
}
}
}
}
/* Dump a GIMPLE_ASM tuple on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_asm (pretty_printer *buffer, const gasm *gs, int spc,
dump_flags_t flags)
{
unsigned int i, n, f, fields;
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%+STRING <%n%s%n>", gs,
gimple_asm_string (gs));
n = gimple_asm_noutputs (gs);
if (n)
{
newline_and_indent (buffer, spc + 2);
pp_string (buffer, "OUTPUT: ");
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_output_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
}
n = gimple_asm_ninputs (gs);
if (n)
{
newline_and_indent (buffer, spc + 2);
pp_string (buffer, "INPUT: ");
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_input_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
}
n = gimple_asm_nclobbers (gs);
if (n)
{
newline_and_indent (buffer, spc + 2);
pp_string (buffer, "CLOBBER: ");
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_clobber_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
}
n = gimple_asm_nlabels (gs);
if (n)
{
newline_and_indent (buffer, spc + 2);
pp_string (buffer, "LABEL: ");
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_label_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
}
newline_and_indent (buffer, spc);
pp_greater (buffer);
}
else
{
pp_string (buffer, "__asm__");
if (gimple_asm_volatile_p (gs))
pp_string (buffer, " __volatile__");
if (gimple_asm_inline_p (gs))
pp_string (buffer, " __inline__");
if (gimple_asm_nlabels (gs))
pp_string (buffer, " goto");
pp_string (buffer, "(\"");
pp_string (buffer, gimple_asm_string (gs));
pp_string (buffer, "\"");
if (gimple_asm_nlabels (gs))
fields = 4;
else if (gimple_asm_nclobbers (gs))
fields = 3;
else if (gimple_asm_ninputs (gs))
fields = 2;
else if (gimple_asm_noutputs (gs))
fields = 1;
else
fields = 0;
for (f = 0; f < fields; ++f)
{
pp_string (buffer, " : ");
switch (f)
{
case 0:
n = gimple_asm_noutputs (gs);
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_output_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
break;
case 1:
n = gimple_asm_ninputs (gs);
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_input_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
break;
case 2:
n = gimple_asm_nclobbers (gs);
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_clobber_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
break;
case 3:
n = gimple_asm_nlabels (gs);
for (i = 0; i < n; i++)
{
dump_generic_node (buffer, gimple_asm_label_op (gs, i),
spc, flags, false);
if (i < n - 1)
pp_string (buffer, ", ");
}
break;
default:
gcc_unreachable ();
}
}
pp_string (buffer, ");");
}
}
/* Dump ptr_info and range_info for NODE on pretty_printer BUFFER with
SPC spaces of indent. */
static void
dump_ssaname_info (pretty_printer *buffer, tree node, int spc)
{
if (TREE_CODE (node) != SSA_NAME)
return;
if (POINTER_TYPE_P (TREE_TYPE (node))
&& SSA_NAME_PTR_INFO (node))
{
unsigned int align, misalign;
struct ptr_info_def *pi = SSA_NAME_PTR_INFO (node);
pp_string (buffer, "# PT = ");
pp_points_to_solution (buffer, &pi->pt);
newline_and_indent (buffer, spc);
if (get_ptr_info_alignment (pi, &align, &misalign))
{
pp_printf (buffer, "# ALIGN = %u, MISALIGN = %u", align, misalign);
newline_and_indent (buffer, spc);
}
}
if (!POINTER_TYPE_P (TREE_TYPE (node))
&& SSA_NAME_RANGE_INFO (node))
{
wide_int min, max, nonzero_bits;
value_range_kind range_type = get_range_info (node, &min, &max);
if (range_type == VR_VARYING)
pp_printf (buffer, "# RANGE VR_VARYING");
else if (range_type == VR_RANGE || range_type == VR_ANTI_RANGE)
{
pp_printf (buffer, "# RANGE ");
pp_printf (buffer, "%s[", range_type == VR_RANGE ? "" : "~");
pp_wide_int (buffer, min, TYPE_SIGN (TREE_TYPE (node)));
pp_printf (buffer, ", ");
pp_wide_int (buffer, max, TYPE_SIGN (TREE_TYPE (node)));
pp_printf (buffer, "]");
}
nonzero_bits = get_nonzero_bits (node);
if (nonzero_bits != -1)
{
pp_string (buffer, " NONZERO ");
pp_wide_int (buffer, nonzero_bits, UNSIGNED);
}
newline_and_indent (buffer, spc);
}
}
/* As dump_ssaname_info, but dump to FILE. */
void
dump_ssaname_info_to_file (FILE *file, tree node, int spc)
{
pretty_printer buffer;
pp_needs_newline (&buffer) = true;
buffer.buffer->stream = file;
dump_ssaname_info (&buffer, node, spc);
pp_flush (&buffer);
}
/* Dump a PHI node PHI. BUFFER, SPC and FLAGS are as in pp_gimple_stmt_1.
The caller is responsible for calling pp_flush on BUFFER to finalize
pretty printer. If COMMENT is true, print this after #. */
static void
dump_gimple_phi (pretty_printer *buffer, const gphi *phi, int spc, bool comment,
dump_flags_t flags)
{
size_t i;
tree lhs = gimple_phi_result (phi);
if (flags & TDF_ALIAS)
dump_ssaname_info (buffer, lhs, spc);
if (comment)
pp_string (buffer, "# ");
if (flags & TDF_RAW)
dump_gimple_fmt (buffer, spc, flags, "%G <%T, ", phi,
gimple_phi_result (phi));
else
{
dump_generic_node (buffer, lhs, spc, flags, false);
if (flags & TDF_GIMPLE)
pp_string (buffer, " = __PHI (");
else
pp_string (buffer, " = PHI <");
}
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
if ((flags & TDF_LINENO) && gimple_phi_arg_has_location (phi, i))
dump_location (buffer, gimple_phi_arg_location (phi, i));
basic_block src = gimple_phi_arg_edge (phi, i)->src;
if (flags & TDF_GIMPLE)
{
pp_string (buffer, "__BB");
pp_decimal_int (buffer, src->index);
pp_string (buffer, ": ");
}
dump_generic_node (buffer, gimple_phi_arg_def (phi, i), spc, flags,
false);
if (! (flags & TDF_GIMPLE))
{
pp_left_paren (buffer);
pp_decimal_int (buffer, src->index);
pp_right_paren (buffer);
}
if (i < gimple_phi_num_args (phi) - 1)
pp_string (buffer, ", ");
}
if (flags & TDF_GIMPLE)
pp_string (buffer, ");");
else
pp_greater (buffer);
}
/* Dump a GIMPLE_OMP_PARALLEL tuple on the pretty_printer BUFFER, SPC spaces
of indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_omp_parallel (pretty_printer *buffer, const gomp_parallel *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs,
gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_parallel_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >, %T, %T%n>",
gimple_omp_parallel_child_fn (gs),
gimple_omp_parallel_data_arg (gs));
}
else
{
gimple_seq body;
pp_string (buffer, "#pragma omp parallel");
dump_omp_clauses (buffer, gimple_omp_parallel_clauses (gs), spc, flags);
if (gimple_omp_parallel_child_fn (gs))
{
pp_string (buffer, " [child fn: ");
dump_generic_node (buffer, gimple_omp_parallel_child_fn (gs),
spc, flags, false);
pp_string (buffer, " (");
if (gimple_omp_parallel_data_arg (gs))
dump_generic_node (buffer, gimple_omp_parallel_data_arg (gs),
spc, flags, false);
else
pp_string (buffer, "???");
pp_string (buffer, ")]");
}
body = gimple_omp_body (gs);
if (body && gimple_code (gimple_seq_first_stmt (body)) != GIMPLE_BIND)
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, body, spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
else if (body)
{
pp_newline (buffer);
dump_gimple_seq (buffer, body, spc + 2, flags);
}
}
}
/* Dump a GIMPLE_OMP_TASK tuple on the pretty_printer BUFFER, SPC spaces
of indent. FLAGS specifies details to show in the dump (see TDF_* in
dumpfile.h). */
static void
dump_gimple_omp_task (pretty_printer *buffer, const gomp_task *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%+BODY <%S>%nCLAUSES <", gs,
gimple_omp_body (gs));
dump_omp_clauses (buffer, gimple_omp_task_clauses (gs), spc, flags);
dump_gimple_fmt (buffer, spc, flags, " >, %T, %T, %T, %T, %T%n>",
gimple_omp_task_child_fn (gs),
gimple_omp_task_data_arg (gs),
gimple_omp_task_copy_fn (gs),
gimple_omp_task_arg_size (gs),
gimple_omp_task_arg_size (gs));
}
else
{
gimple_seq body;
if (gimple_omp_task_taskloop_p (gs))
pp_string (buffer, "#pragma omp taskloop");
else if (gimple_omp_task_taskwait_p (gs))
pp_string (buffer, "#pragma omp taskwait");
else
pp_string (buffer, "#pragma omp task");
dump_omp_clauses (buffer, gimple_omp_task_clauses (gs), spc, flags);
if (gimple_omp_task_child_fn (gs))
{
pp_string (buffer, " [child fn: ");
dump_generic_node (buffer, gimple_omp_task_child_fn (gs),
spc, flags, false);
pp_string (buffer, " (");
if (gimple_omp_task_data_arg (gs))
dump_generic_node (buffer, gimple_omp_task_data_arg (gs),
spc, flags, false);
else
pp_string (buffer, "???");
pp_string (buffer, ")]");
}
body = gimple_omp_body (gs);
if (body && gimple_code (gimple_seq_first_stmt (body)) != GIMPLE_BIND)
{
newline_and_indent (buffer, spc + 2);
pp_left_brace (buffer);
pp_newline (buffer);
dump_gimple_seq (buffer, body, spc + 4, flags);
newline_and_indent (buffer, spc + 2);
pp_right_brace (buffer);
}
else if (body)
{
pp_newline (buffer);
dump_gimple_seq (buffer, body, spc + 2, flags);
}
}
}
/* Dump a GIMPLE_OMP_ATOMIC_LOAD tuple on the pretty_printer BUFFER, SPC
spaces of indent. FLAGS specifies details to show in the dump (see TDF_*
in dumpfile.h). */
static void
dump_gimple_omp_atomic_load (pretty_printer *buffer, const gomp_atomic_load *gs,
int spc, dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T>", gs,
gimple_omp_atomic_load_lhs (gs),
gimple_omp_atomic_load_rhs (gs));
}
else
{
pp_string (buffer, "#pragma omp atomic_load");
dump_omp_atomic_memory_order (buffer,
gimple_omp_atomic_memory_order (gs));
if (gimple_omp_atomic_need_value_p (gs))
pp_string (buffer, " [needed]");
newline_and_indent (buffer, spc + 2);
dump_generic_node (buffer, gimple_omp_atomic_load_lhs (gs),
spc, flags, false);
pp_space (buffer);
pp_equal (buffer);
pp_space (buffer);
pp_star (buffer);
dump_generic_node (buffer, gimple_omp_atomic_load_rhs (gs),
spc, flags, false);
}
}
/* Dump a GIMPLE_OMP_ATOMIC_STORE tuple on the pretty_printer BUFFER, SPC
spaces of indent. FLAGS specifies details to show in the dump (see TDF_*
in dumpfile.h). */
static void
dump_gimple_omp_atomic_store (pretty_printer *buffer,
const gomp_atomic_store *gs, int spc,
dump_flags_t flags)
{
if (flags & TDF_RAW)
{
dump_gimple_fmt (buffer, spc, flags, "%G <%T>", gs,
gimple_omp_atomic_store_val (gs));
}
else
{
pp_string (buffer, "#pragma omp atomic_store");
dump_omp_atomic_memory_order (buffer,
gimple_omp_atomic_memory_order (gs));
pp_space (buffer);
if (gimple_omp_atomic_need_value_p (gs))
pp_string (buffer, "[needed] ");
pp_left_paren (buffer);
dump_generic_node (buffer, gimple_omp_atomic_store_val (gs),
spc, flags, false);
pp_right_paren (buffer);
}
}
/* Dump all the memory operands for statement GS. BUFFER, SPC and
FLAGS are as in pp_gimple_stmt_1. */
static void
dump_gimple_mem_ops (pretty_printer *buffer, const gimple *gs, int spc,
dump_flags_t flags)
{
tree vdef = gimple_vdef (gs);
tree vuse = gimple_vuse (gs);
if (vdef != NULL_TREE)
{
pp_string (buffer, "# ");
dump_generic_node (buffer, vdef, spc + 2, flags, false);
pp_string (buffer, " = VDEF <");
dump_generic_node (buffer, vuse, spc + 2, flags, false);
pp_greater (buffer);
newline_and_indent (buffer, spc);
}
else if (vuse != NULL_TREE)
{
pp_string (buffer, "# VUSE <");
dump_generic_node (buffer, vuse, spc + 2, flags, false);
pp_greater (buffer);
newline_and_indent (buffer, spc);
}
}
/* Print the gimple statement GS on the pretty printer BUFFER, SPC
spaces of indent. FLAGS specifies details to show in the dump (see
TDF_* in dumpfile.h). The caller is responsible for calling
pp_flush on BUFFER to finalize the pretty printer. */
void
pp_gimple_stmt_1 (pretty_printer *buffer, const gimple *gs, int spc,
dump_flags_t flags)
{
if (!gs)
return;
if (flags & TDF_STMTADDR)
pp_printf (buffer, "<&%p> ", (const void *) gs);
if ((flags & TDF_LINENO) && gimple_has_location (gs))
dump_location (buffer, gimple_location (gs));
if (flags & TDF_EH)
{
int lp_nr = lookup_stmt_eh_lp (gs);
if (lp_nr > 0)
pp_printf (buffer, "[LP %d] ", lp_nr);
else if (lp_nr < 0)
pp_printf (buffer, "[MNT %d] ", -lp_nr);
}
if ((flags & (TDF_VOPS|TDF_MEMSYMS))
&& gimple_has_mem_ops (gs))
dump_gimple_mem_ops (buffer, gs, spc, flags);
if (gimple_has_lhs (gs)
&& (flags & TDF_ALIAS))
dump_ssaname_info (buffer, gimple_get_lhs (gs), spc);
switch (gimple_code (gs))
{
case GIMPLE_ASM:
dump_gimple_asm (buffer, as_a <const gasm *> (gs), spc, flags);
break;
case GIMPLE_ASSIGN:
dump_gimple_assign (buffer, as_a <const gassign *> (gs), spc, flags);
break;
case GIMPLE_BIND:
dump_gimple_bind (buffer, as_a <const gbind *> (gs), spc, flags);
break;
case GIMPLE_CALL:
dump_gimple_call (buffer, as_a <const gcall *> (gs), spc, flags);
break;
case GIMPLE_COND:
dump_gimple_cond (buffer, as_a <const gcond *> (gs), spc, flags);
break;
case GIMPLE_LABEL:
dump_gimple_label (buffer, as_a <const glabel *> (gs), spc, flags);
break;
case GIMPLE_GOTO:
dump_gimple_goto (buffer, as_a <const ggoto *> (gs), spc, flags);
break;
case GIMPLE_NOP:
pp_string (buffer, "GIMPLE_NOP");
break;
case GIMPLE_RETURN:
dump_gimple_return (buffer, as_a <const greturn *> (gs), spc, flags);
break;
case GIMPLE_SWITCH:
dump_gimple_switch (buffer, as_a <const gswitch *> (gs), spc, flags);
break;
case GIMPLE_TRY:
dump_gimple_try (buffer, as_a <const gtry *> (gs), spc, flags);
break;
case GIMPLE_PHI:
dump_gimple_phi (buffer, as_a <const gphi *> (gs), spc, false, flags);
break;
case GIMPLE_OMP_PARALLEL:
dump_gimple_omp_parallel (buffer, as_a <const gomp_parallel *> (gs), spc,
flags);
break;
case GIMPLE_OMP_TASK:
dump_gimple_omp_task (buffer, as_a <const gomp_task *> (gs), spc, flags);
break;
case GIMPLE_OMP_ATOMIC_LOAD:
dump_gimple_omp_atomic_load (buffer, as_a <const gomp_atomic_load *> (gs),
spc, flags);
break;
case GIMPLE_OMP_ATOMIC_STORE:
dump_gimple_omp_atomic_store (buffer,
as_a <const gomp_atomic_store *> (gs),
spc, flags);
break;
case GIMPLE_OMP_FOR:
dump_gimple_omp_for (buffer, as_a <const gomp_for *> (gs), spc, flags);
break;
case GIMPLE_OMP_CONTINUE:
dump_gimple_omp_continue (buffer, as_a <const gomp_continue *> (gs), spc,
flags);
break;
case GIMPLE_OMP_SINGLE:
dump_gimple_omp_single (buffer, as_a <const gomp_single *> (gs), spc,
flags);
break;
case GIMPLE_OMP_TARGET:
dump_gimple_omp_target (buffer, as_a <const gomp_target *> (gs), spc,
flags);
break;
case GIMPLE_OMP_TEAMS:
dump_gimple_omp_teams (buffer, as_a <const gomp_teams *> (gs), spc,
flags);
break;
case GIMPLE_OMP_RETURN:
dump_gimple_omp_return (buffer, gs, spc, flags);
break;
case GIMPLE_OMP_SECTIONS:
dump_gimple_omp_sections (buffer, as_a <const gomp_sections *> (gs),
spc, flags);
break;
case GIMPLE_OMP_SECTIONS_SWITCH:
pp_string (buffer, "GIMPLE_SECTIONS_SWITCH");
break;
case GIMPLE_OMP_TASKGROUP:
dump_gimple_omp_taskgroup (buffer, gs, spc, flags);
break;
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_SECTION:
dump_gimple_omp_block (buffer, gs, spc, flags);
break;
case GIMPLE_OMP_ORDERED:
dump_gimple_omp_ordered (buffer, as_a <const gomp_ordered *> (gs), spc,
flags);
break;
case GIMPLE_OMP_SCAN:
dump_gimple_omp_scan (buffer, as_a <const gomp_scan *> (gs), spc,
flags);
break;
case GIMPLE_OMP_CRITICAL:
dump_gimple_omp_critical (buffer, as_a <const gomp_critical *> (gs), spc,
flags);
break;
case GIMPLE_CATCH:
dump_gimple_catch (buffer, as_a <const gcatch *> (gs), spc, flags);
break;
case GIMPLE_EH_FILTER:
dump_gimple_eh_filter (buffer, as_a <const geh_filter *> (gs), spc,
flags);
break;
case GIMPLE_EH_MUST_NOT_THROW:
dump_gimple_eh_must_not_throw (buffer,
as_a <const geh_mnt *> (gs),
spc, flags);
break;
case GIMPLE_EH_ELSE:
dump_gimple_eh_else (buffer, as_a <const geh_else *> (gs), spc, flags);
break;
case GIMPLE_RESX:
dump_gimple_resx (buffer, as_a <const gresx *> (gs), spc, flags);
break;
case GIMPLE_EH_DISPATCH:
dump_gimple_eh_dispatch (buffer, as_a <const geh_dispatch *> (gs), spc,
flags);
break;
case GIMPLE_DEBUG:
dump_gimple_debug (buffer, as_a <const gdebug *> (gs), spc, flags);
break;
case GIMPLE_PREDICT:
pp_string (buffer, "// predicted ");
if (gimple_predict_outcome (gs))
pp_string (buffer, "likely by ");
else
pp_string (buffer, "unlikely by ");
pp_string (buffer, predictor_name (gimple_predict_predictor (gs)));
pp_string (buffer, " predictor.");
break;
case GIMPLE_TRANSACTION:
dump_gimple_transaction (buffer, as_a <const gtransaction *> (gs), spc,
flags);
break;
default:
GIMPLE_NIY;
}
}
/* Dumps header of basic block BB to OUTF indented by INDENT
spaces and details described by flags. */
static void
dump_gimple_bb_header (FILE *outf, basic_block bb, int indent,
dump_flags_t flags)
{
if (flags & TDF_BLOCKS)
{
if (flags & TDF_LINENO)
{
gimple_stmt_iterator gsi;
fputs (";; ", outf);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
if (!is_gimple_debug (gsi_stmt (gsi))
&& get_lineno (gsi_stmt (gsi)) != UNKNOWN_LOCATION)
{
fprintf (outf, "%*sstarting at line %d",
indent, "", get_lineno (gsi_stmt (gsi)));
break;
}
if (bb->discriminator)
fprintf (outf, ", discriminator %i", bb->discriminator);
fputc ('\n', outf);
}
}
else
{
if (flags & TDF_GIMPLE)
{
fprintf (outf, "%*s__BB(%d", indent, "", bb->index);
if (bb->loop_father->header == bb)
fprintf (outf, ",loop_header(%d)", bb->loop_father->num);
if (bb->count.initialized_p ())
fprintf (outf, ",%s(%d)",
profile_quality_as_string (bb->count.quality ()),
bb->count.value ());
fprintf (outf, "):\n");
}
else
fprintf (outf, "%*s<bb %d> %s:\n",
indent, "", bb->index, dump_profile (bb->count));
}
}
/* Dumps end of basic block BB to buffer BUFFER indented by INDENT
spaces. */
static void
dump_gimple_bb_footer (FILE *outf ATTRIBUTE_UNUSED,
basic_block bb ATTRIBUTE_UNUSED,
int indent ATTRIBUTE_UNUSED,
dump_flags_t flags ATTRIBUTE_UNUSED)
{
/* There is currently no GIMPLE-specific basic block info to dump. */
return;
}
/* Dump PHI nodes of basic block BB to BUFFER with details described
by FLAGS and indented by INDENT spaces. */
static void
dump_phi_nodes (pretty_printer *buffer, basic_block bb, int indent,
dump_flags_t flags)
{
gphi_iterator i;
for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i))
{
gphi *phi = i.phi ();
if (!virtual_operand_p (gimple_phi_result (phi)) || (flags & TDF_VOPS))
{
INDENT (indent);
dump_gimple_phi (buffer, phi, indent,
(flags & TDF_GIMPLE) ? false : true, flags);
pp_newline (buffer);
}
}
}
/* Dump jump to basic block BB that is represented implicitly in the cfg
to BUFFER. */
static void
pp_cfg_jump (pretty_printer *buffer, edge e, dump_flags_t flags)
{
if (flags & TDF_GIMPLE)
{
pp_string (buffer, "goto __BB");
pp_decimal_int (buffer, e->dest->index);
if (e->probability.initialized_p ())
{
pp_string (buffer, "(");
pp_string (buffer,
profile_quality_as_string (e->probability.quality ()));
pp_string (buffer, "(");
pp_decimal_int (buffer, e->probability.value ());
pp_string (buffer, "))");
}
pp_semicolon (buffer);
}
else
{
pp_string (buffer, "goto <bb ");
pp_decimal_int (buffer, e->dest->index);
pp_greater (buffer);
pp_semicolon (buffer);
dump_edge_probability (buffer, e);
}
}
/* Dump edges represented implicitly in basic block BB to BUFFER, indented
by INDENT spaces, with details given by FLAGS. */
static void
dump_implicit_edges (pretty_printer *buffer, basic_block bb, int indent,
dump_flags_t flags)
{
edge e;
gimple *stmt;
stmt = last_stmt (bb);
if (stmt && gimple_code (stmt) == GIMPLE_COND)
{
edge true_edge, false_edge;
/* When we are emitting the code or changing CFG, it is possible that
the edges are not yet created. When we are using debug_bb in such
a situation, we do not want it to crash. */
if (EDGE_COUNT (bb->succs) != 2)
return;
extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
INDENT (indent + 2);
pp_cfg_jump (buffer, true_edge, flags);
newline_and_indent (buffer, indent);
pp_string (buffer, "else");
newline_and_indent (buffer, indent + 2);
pp_cfg_jump (buffer, false_edge, flags);
pp_newline (buffer);
return;
}
/* If there is a fallthru edge, we may need to add an artificial
goto to the dump. */
e = find_fallthru_edge (bb->succs);
if (e && (e->dest != bb->next_bb || (flags & TDF_GIMPLE)))
{
INDENT (indent);
if ((flags & TDF_LINENO)
&& e->goto_locus != UNKNOWN_LOCATION)
dump_location (buffer, e->goto_locus);
pp_cfg_jump (buffer, e, flags);
pp_newline (buffer);
}
}
/* Dumps basic block BB to buffer BUFFER with details described by FLAGS and
indented by INDENT spaces. */
static void
gimple_dump_bb_buff (pretty_printer *buffer, basic_block bb, int indent,
dump_flags_t flags)
{
gimple_stmt_iterator gsi;
gimple *stmt;
int label_indent = indent - 2;
if (label_indent < 0)
label_indent = 0;
dump_phi_nodes (buffer, bb, indent, flags);
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
int curr_indent;
stmt = gsi_stmt (gsi);
curr_indent = gimple_code (stmt) == GIMPLE_LABEL ? label_indent : indent;
INDENT (curr_indent);
pp_gimple_stmt_1 (buffer, stmt, curr_indent, flags);
pp_newline_and_flush (buffer);
gcc_checking_assert (DECL_STRUCT_FUNCTION (current_function_decl));
dump_histograms_for_stmt (DECL_STRUCT_FUNCTION (current_function_decl),
pp_buffer (buffer)->stream, stmt);
}
dump_implicit_edges (buffer, bb, indent, flags);
pp_flush (buffer);
}
/* Dumps basic block BB to FILE with details described by FLAGS and
indented by INDENT spaces. */
void
gimple_dump_bb (FILE *file, basic_block bb, int indent, dump_flags_t flags)
{
dump_gimple_bb_header (file, bb, indent, flags);
if (bb->index >= NUM_FIXED_BLOCKS)
{
pretty_printer buffer;
pp_needs_newline (&buffer) = true;
buffer.buffer->stream = file;
gimple_dump_bb_buff (&buffer, bb, indent, flags);
}
dump_gimple_bb_footer (file, bb, indent, flags);
}
/* Dumps basic block BB to pretty-printer PP with default dump flags and
no indentation, for use as a label of a DOT graph record-node.
??? Should just use gimple_dump_bb_buff here, except that value profiling
histogram dumping doesn't know about pretty-printers. */
void
gimple_dump_bb_for_graph (pretty_printer *pp, basic_block bb)
{
pp_printf (pp, "<bb %d>:\n", bb->index);
pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (gsi);
gsi_next (&gsi))
{
gphi *phi = gsi.phi ();
if (!virtual_operand_p (gimple_phi_result (phi))
|| (dump_flags & TDF_VOPS))
{
pp_bar (pp);
pp_write_text_to_stream (pp);
pp_string (pp, "# ");
pp_gimple_stmt_1 (pp, phi, 0, dump_flags);
pp_newline (pp);
pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
}
}
for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
gsi_next (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
pp_bar (pp);
pp_write_text_to_stream (pp);
pp_gimple_stmt_1 (pp, stmt, 0, dump_flags);
pp_newline (pp);
pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
}
dump_implicit_edges (pp, bb, 0, dump_flags);
pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
}
/* Handle the %G format for TEXT. Same as %K in handle_K_format in
tree-pretty-print.c but with a Gimple statement as an argument. */
void
percent_G_format (text_info *text)
{
gimple *stmt = va_arg (*text->args_ptr, gimple*);
/* Fall back on the rich location if the statement doesn't have one. */
location_t loc = gimple_location (stmt);
if (loc == UNKNOWN_LOCATION)
loc = text->m_richloc->get_loc ();
tree block = gimple_block (stmt);
percent_K_format (text, loc, block);
}
#if __GNUC__ >= 10
# pragma GCC diagnostic pop
#endif
|
rawSHA512_ng_fmt_plug.c | /*
* Copyright 2013, epixoip.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that redistribution of source
* retains the above copyright.
*/
#include "arch.h"
#if defined __SSE2__
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawSHA512_ng;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawSHA512_ng);
#else
#ifdef _OPENMP
#include <omp.h>
#if defined __XOP__
#define OMP_SCALE 768 /* AMD */
#else
#define OMP_SCALE 2048 /* Intel */
#endif
#endif
// These compilers claim to be __GNUC__ but warn on gcc pragmas.
#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__) && !defined(__llvm__) && !defined (_MSC_VER)
#pragma GCC optimize 3
#endif
#include "stdint.h"
#include <string.h>
#include <emmintrin.h>
#if defined __XOP__
#include <x86intrin.h>
#elif defined __SSSE3__
#include <tmmintrin.h>
#endif
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "memdbg.h"
#if defined __XOP__
#define SIMD_TYPE "XOP"
#elif defined __SSSE3__
#define SIMD_TYPE "SSSE3"
#else
#define SIMD_TYPE "SSE2"
#endif
#define FORMAT_LABEL "Raw-SHA512-ng"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SHA512 128/128 " SIMD_TYPE " 2x"
#define FORMAT_TAG "$SHA512$"
#define TAG_LENGTH 8
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
// max length is not 119, but 8 less than this, or 111. 111 actually make sense.
// For SHA512 there are 14 'usable' 8 byte ints, minus 1 byte (for the 0x80).
// 14*8-1 is 111. This comment left for reference for future sha2 hackers within JtR.
//#define MAXLEN 119
#define MAXLEN 111
#define CIPHERTEXT_LENGTH 128
#define FULL_BINARY_SIZE 64
#define BINARY_SIZE 8
#define BINARY_ALIGN 8
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 2
#define MAX_KEYS_PER_CRYPT 2
#if defined (_MSC_VER) && !defined (_M_X64)
// 32 bit VC does NOT define these intrinsics :((((
_inline __m128i _mm_set_epi64x(uint64_t a, uint64_t b) {
__m128i x;
x.m128i_u64[0] = b;
x.m128i_u64[1] = a;
return x;
}
_inline __m128i _mm_set1_epi64x(uint64_t a) {
__m128i x;
x.m128i_u64[0] = a;
x.m128i_u64[1] = a;
return x;
}
#endif
#ifndef __XOP__
#define _mm_roti_epi64(x, n) \
( \
_mm_xor_si128 ( \
_mm_srli_epi64(x, ~n + 1), \
_mm_slli_epi64(x, 64 + n) \
) \
)
#define _mm_cmov_si128(y, z, x) \
( \
_mm_xor_si128 (z, \
_mm_and_si128 (x, \
_mm_xor_si128 (y, z) \
) \
) \
)
#endif
#ifdef __SSSE3__
#define SWAP_ENDIAN(n) \
{ \
n = _mm_shuffle_epi8 (n, \
_mm_set_epi64x (0x08090a0b0c0d0e0f, 0x0001020304050607) \
); \
}
#else
#define SWAP_ENDIAN(n) \
{ \
n = _mm_shufflehi_epi16 (_mm_shufflelo_epi16 (n, 0xb1), 0xb1); \
n = _mm_xor_si128 (_mm_slli_epi16 (n, 8), _mm_srli_epi16 (n, 8)); \
n = _mm_shuffle_epi32 (n, 0xb1); \
}
#endif
#define GATHER(x,y,z) \
{ \
x = _mm_set_epi64x (y[index + 1][z], y[index][z]); \
}
#define S0(x) \
( \
_mm_xor_si128 ( \
_mm_roti_epi64 (x, -39), \
_mm_xor_si128 ( \
_mm_roti_epi64 (x, -28), \
_mm_roti_epi64 (x, -34) \
) \
) \
)
#define S1(x) \
( \
_mm_xor_si128 ( \
_mm_roti_epi64 (x, -41), \
_mm_xor_si128 ( \
_mm_roti_epi64 (x, -14), \
_mm_roti_epi64 (x, -18) \
) \
) \
)
#define s0(x) \
( \
_mm_xor_si128 ( \
_mm_srli_epi64 (x, 7), \
_mm_xor_si128 ( \
_mm_roti_epi64 (x, -1), \
_mm_roti_epi64 (x, -8) \
) \
) \
)
#define s1(x) \
( \
_mm_xor_si128 ( \
_mm_srli_epi64 (x, 6), \
_mm_xor_si128 ( \
_mm_roti_epi64 (x, -19), \
_mm_roti_epi64 (x, -61) \
) \
) \
)
#define Maj(x,y,z) _mm_cmov_si128 (x, y, _mm_xor_si128 (z, y))
#define Ch(x,y,z) _mm_cmov_si128 (y, z, x)
#define R(t) \
{ \
tmp1 = _mm_add_epi64 (s1(w[t - 2]), w[t - 7]); \
tmp2 = _mm_add_epi64 (s0(w[t - 15]), w[t - 16]); \
w[t] = _mm_add_epi64 (tmp1, tmp2); \
}
#define SHA512_STEP(a,b,c,d,e,f,g,h,x,K) \
{ \
tmp1 = _mm_add_epi64 (h, w[x]); \
tmp2 = _mm_add_epi64 (S1(e),_mm_set1_epi64x(K)); \
tmp1 = _mm_add_epi64 (tmp1, Ch(e,f,g)); \
tmp1 = _mm_add_epi64 (tmp1, tmp2); \
tmp2 = _mm_add_epi64 (S0(a),Maj(a,b,c)); \
d = _mm_add_epi64 (tmp1, d); \
h = _mm_add_epi64 (tmp1, tmp2); \
}
static struct fmt_tests tests[] = {
{"f342aae82952db35b8e02c30115e3deed3d80fdfdadacab336f0ba51ac54e297291fa1d6b201d69a2bd77e2535280f17a54fa1e527abc6e2eddba79ad3be11c0", "epixoip"},
{FORMAT_TAG "f342aae82952db35b8e02c30115e3deed3d80fdfdadacab336f0ba51ac54e297291fa1d6b201d69a2bd77e2535280f17a54fa1e527abc6e2eddba79ad3be11c0", "epixoip"},
{"b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86", "password"},
{"2c80f4c2b3db6b677d328775be4d38c8d8cd9a4464c3b6273644fb148f855e3db51bc33b54f3f6fa1f5f52060509f0e4d350bb0c7f51947728303999c6eff446", "john-user"},
{"71ebcb1eccd7ea22bd8cebaec735a43f1f7164d003dacdeb06e0de4a6d9f64d123b00a45227db815081b1008d1a1bbad4c39bde770a2c23308ff1b09418dd7ed", "ALLCAPS"},
{"82244918c2e45fbaa00c7c7d52eb61f309a37e2f33ea1fba78e61b4140efa95731eec849de02ee16aa31c82848b51fb7b7fbae62f50df6e150a8a85e70fa740c", "TestTESTt3st"},
{"fa585d89c851dd338a70dcf535aa2a92fee7836dd6aff1226583e88e0996293f16bc009c652826e0fc5c706695a03cddce372f139eff4d13959da6f1f5d3eabe", "12345678"},
{FORMAT_TAG "fa585d89c851dd338a70dcf535aa2a92fee7836dd6aff1226583e88e0996293f16bc009c652826e0fc5c706695a03cddce372f139eff4d13959da6f1f5d3eabe", "12345678"},
{FORMAT_TAG "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e", ""},
{"c96f1c1260074832bd3068ddd29e733090285dfc65939555dbbcafb27834957d15d9c509481cc7df0e2a7e21429783ba573036b78f5284f9928b5fef02a791ef", "mot\xf6rhead"},
{"aa3b7bdd98ec44af1f395bbd5f7f27a5cd9569d794d032747323bf4b1521fbe7725875a68b440abdf0559de5015baf873bb9c01cae63ecea93ad547a7397416e", "12345678901234567890"},
{"db9981645857e59805132f7699e78bbcf39f69380a41aac8e6fa158a0593f2017ffe48764687aa855dae3023fcceefd51a1551d57730423df18503e80ba381ba", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"},
{"7aba4411846c61b08b0f2282a8a4600232ace4dd96593c755ba9c9a4e7b780b8bdc437b5c55574b3e8409c7b511032f98ef120e25467678f0458643578eb60ff", "123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901"},
// this one DOES NOT work for a 1 limb. Only 111 bytes max can be used, unless we do 2 sha512 limbs.
// {"a5fa73a3c9ca13df56c2cb3ae6f2e57671239a6b461ef5021a65d08f40336bfb458ec52a3003e1004f1a40d0706c27a9f4268fa4e1479382e2053c2b5b47b9b2", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"},
#ifdef DEBUG //Special test cases.
{"12b03226a6d8be9c6e8cd5e55dc6c7920caaa39df14aab92d5e3ea9340d1c8a4d3d0b8e4314f1f6ef131ba4bf1ceb9186ab87c801af0d5c95b1befb8cedae2b9", "1234567890"},
{"eba392e2f2094d7ffe55a23dffc29c412abd47057a0823c6c149c9c759423afde56f0eef73ade8f79bc1d16a99cbc5e4995afd8c14adb49410ecd957aecc8d02", "123456789012345678901234567890"},
{"3a8529d8f0c7b1ad2fa54c944952829b718d5beb4ff9ba8f4a849e02fe9a272daf59ae3bd06dde6f01df863d87c8ba4ab016ac576b59a19078c26d8dbe63f79e", "1234567890123456789012345678901234567890"},
{"49c1faba580a55d6473f427174b62d8aa68f49958d70268eb8c7f258ba5bb089b7515891079451819aa4f8bf75b784dc156e7400ab0a04dfd2b75e46ef0a943e", "12345678901234567890123456789012345678901234567890"},
{"8c5b51368ec88e1b1c4a67aa9de0aa0919447e142a9c245d75db07bbd4d00962b19112adb9f2b52c0a7b29fe2de661a872f095b6a1670098e5c7fde4a3503896", "123456789012345678901234567890123456789012345678901"},
{"35ea7bc1d848db0f7ff49178392bf58acfae94bf74d77ae2d7e978df52aac250ff2560f9b98dc7726f0b8e05b25e5132074b470eb461c4ebb7b4d8bf9ef0d93f", "1234567890123456789012345678901234567890123456789012345"},
#endif
{NULL}
};
static uint64_t (*saved_key)[16];
static uint64_t *crypt_key[ 8];
static void init(struct fmt_main *self)
{
int i;
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_CACHE);
for (i = 0; i < 8; i++)
crypt_key[i] = mem_calloc_tiny(sizeof(uint64_t) * self->params.max_keys_per_crypt, MEM_ALIGN_CACHE);
}
static inline void alter_endianity_64 (uint64_t *x, unsigned int size)
{
int i;
for (i=0; i < (size / sizeof(*x)); i++)
x[i] = JOHNSWAP64(x[i]);
}
static int valid (char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (! strncmp (p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = p;
while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++;
return !*q && q - p == CIPHERTEXT_LENGTH;
}
#if FMT_MAIN_VERSION > 9
static char *split (char *ciphertext, int index, struct fmt_main *self)
#else
static char *split (char *ciphertext, int index)
#endif
{
static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1];
if (!strncmp (ciphertext, FORMAT_TAG, TAG_LENGTH))
return ciphertext;
memcpy (out, FORMAT_TAG, TAG_LENGTH);
memcpy (out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1);
strlwr (out + TAG_LENGTH);
return out;
}
static void *get_binary (char *ciphertext)
{
static union {
unsigned char c[FULL_BINARY_SIZE];
uint64_t w[FULL_BINARY_SIZE / sizeof(uint64_t)];
} *out;
int i;
if (!out)
out = mem_alloc_tiny (FULL_BINARY_SIZE, BINARY_ALIGN);
ciphertext += TAG_LENGTH;
for (i=0; i < FULL_BINARY_SIZE; i++)
out->c[i] = atoi16[ARCH_INDEX(ciphertext[i*2])] * 16 +
atoi16[ARCH_INDEX(ciphertext[i*2 + 1])];
alter_endianity_64 (out->w, FULL_BINARY_SIZE);
out->w[0] -= 0x6a09e667f3bcc908ULL;
out->w[1] -= 0xbb67ae8584caa73bULL;
out->w[2] -= 0x3c6ef372fe94f82bULL;
out->w[3] -= 0xa54ff53a5f1d36f1ULL;
out->w[4] -= 0x510e527fade682d1ULL;
out->w[5] -= 0x9b05688c2b3e6c1fULL;
out->w[6] -= 0x1f83d9abfb41bd6bULL;
out->w[7] -= 0x5be0cd19137e2179ULL;
return (void *) out;
}
static int get_hash_0 (int index) { return crypt_key[0][index] & 0xf; }
static int get_hash_1 (int index) { return crypt_key[0][index] & 0xff; }
static int get_hash_2 (int index) { return crypt_key[0][index] & 0xfff; }
static int get_hash_3 (int index) { return crypt_key[0][index] & 0xffff; }
static int get_hash_4 (int index) { return crypt_key[0][index] & 0xfffff; }
static int get_hash_5 (int index) { return crypt_key[0][index] & 0xffffff; }
static int get_hash_6 (int index) { return crypt_key[0][index] & 0x7ffffff; }
static void set_key (char *key, int index)
{
uint64_t *buf64 = (uint64_t *) &saved_key[index];
uint8_t *buf8 = (uint8_t * ) buf64;
int len = 0;
while (*key && len < MAXLEN)
buf8[len++] = *key++;
buf64[15] = len << 3;
buf8[len++] = 0x80;
while (buf8[len] && len <= MAXLEN)
buf8[len++] = 0;
}
static char *get_key (int index)
{
uint64_t *buf64 = (uint64_t *) &saved_key[index];
uint8_t *buf8 = (uint8_t * ) buf64;
static char out[MAXLEN + 1];
int len = (int)(buf64[15] >> 3);
out[len] = 0;
for (len--; len > -1; len--)
out[len] = buf8[len];
return (char *) out;
}
#if FMT_MAIN_VERSION > 10
static int crypt_all (int *pcount, struct db_salt *salt)
#else
static void crypt_all (int count)
#endif
{
#if FMT_MAIN_VERSION > 10
int count = *pcount;
#endif
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += 2)
#endif
{
int i;
__m128i a, b, c, d, e, f, g, h;
__m128i w[80], tmp1, tmp2;
for (i = 0; i < 14; i += 2) {
GATHER (tmp1, saved_key, i);
GATHER (tmp2, saved_key, i + 1);
SWAP_ENDIAN (tmp1);
SWAP_ENDIAN (tmp2);
w[i] = tmp1;
w[i + 1] = tmp2;
}
GATHER (tmp1, saved_key, 14);
SWAP_ENDIAN (tmp1);
w[14] = tmp1;
GATHER (w[15], saved_key, 15);
for (i = 16; i < 80; i++) R(i);
a = _mm_set1_epi64x (0x6a09e667f3bcc908ULL);
b = _mm_set1_epi64x (0xbb67ae8584caa73bULL);
c = _mm_set1_epi64x (0x3c6ef372fe94f82bULL);
d = _mm_set1_epi64x (0xa54ff53a5f1d36f1ULL);
e = _mm_set1_epi64x (0x510e527fade682d1ULL);
f = _mm_set1_epi64x (0x9b05688c2b3e6c1fULL);
g = _mm_set1_epi64x (0x1f83d9abfb41bd6bULL);
h = _mm_set1_epi64x (0x5be0cd19137e2179ULL);
SHA512_STEP(a, b, c, d, e, f, g, h, 0, 0x428a2f98d728ae22ULL);
SHA512_STEP(h, a, b, c, d, e, f, g, 1, 0x7137449123ef65cdULL);
SHA512_STEP(g, h, a, b, c, d, e, f, 2, 0xb5c0fbcfec4d3b2fULL);
SHA512_STEP(f, g, h, a, b, c, d, e, 3, 0xe9b5dba58189dbbcULL);
SHA512_STEP(e, f, g, h, a, b, c, d, 4, 0x3956c25bf348b538ULL);
SHA512_STEP(d, e, f, g, h, a, b, c, 5, 0x59f111f1b605d019ULL);
SHA512_STEP(c, d, e, f, g, h, a, b, 6, 0x923f82a4af194f9bULL);
SHA512_STEP(b, c, d, e, f, g, h, a, 7, 0xab1c5ed5da6d8118ULL);
SHA512_STEP(a, b, c, d, e, f, g, h, 8, 0xd807aa98a3030242ULL);
SHA512_STEP(h, a, b, c, d, e, f, g, 9, 0x12835b0145706fbeULL);
SHA512_STEP(g, h, a, b, c, d, e, f, 10, 0x243185be4ee4b28cULL);
SHA512_STEP(f, g, h, a, b, c, d, e, 11, 0x550c7dc3d5ffb4e2ULL);
SHA512_STEP(e, f, g, h, a, b, c, d, 12, 0x72be5d74f27b896fULL);
SHA512_STEP(d, e, f, g, h, a, b, c, 13, 0x80deb1fe3b1696b1ULL);
SHA512_STEP(c, d, e, f, g, h, a, b, 14, 0x9bdc06a725c71235ULL);
SHA512_STEP(b, c, d, e, f, g, h, a, 15, 0xc19bf174cf692694ULL);
SHA512_STEP(a, b, c, d, e, f, g, h, 16, 0xe49b69c19ef14ad2ULL);
SHA512_STEP(h, a, b, c, d, e, f, g, 17, 0xefbe4786384f25e3ULL);
SHA512_STEP(g, h, a, b, c, d, e, f, 18, 0x0fc19dc68b8cd5b5ULL);
SHA512_STEP(f, g, h, a, b, c, d, e, 19, 0x240ca1cc77ac9c65ULL);
SHA512_STEP(e, f, g, h, a, b, c, d, 20, 0x2de92c6f592b0275ULL);
SHA512_STEP(d, e, f, g, h, a, b, c, 21, 0x4a7484aa6ea6e483ULL);
SHA512_STEP(c, d, e, f, g, h, a, b, 22, 0x5cb0a9dcbd41fbd4ULL);
SHA512_STEP(b, c, d, e, f, g, h, a, 23, 0x76f988da831153b5ULL);
SHA512_STEP(a, b, c, d, e, f, g, h, 24, 0x983e5152ee66dfabULL);
SHA512_STEP(h, a, b, c, d, e, f, g, 25, 0xa831c66d2db43210ULL);
SHA512_STEP(g, h, a, b, c, d, e, f, 26, 0xb00327c898fb213fULL);
SHA512_STEP(f, g, h, a, b, c, d, e, 27, 0xbf597fc7beef0ee4ULL);
SHA512_STEP(e, f, g, h, a, b, c, d, 28, 0xc6e00bf33da88fc2ULL);
SHA512_STEP(d, e, f, g, h, a, b, c, 29, 0xd5a79147930aa725ULL);
SHA512_STEP(c, d, e, f, g, h, a, b, 30, 0x06ca6351e003826fULL);
SHA512_STEP(b, c, d, e, f, g, h, a, 31, 0x142929670a0e6e70ULL);
SHA512_STEP(a, b, c, d, e, f, g, h, 32, 0x27b70a8546d22ffcULL);
SHA512_STEP(h, a, b, c, d, e, f, g, 33, 0x2e1b21385c26c926ULL);
SHA512_STEP(g, h, a, b, c, d, e, f, 34, 0x4d2c6dfc5ac42aedULL);
SHA512_STEP(f, g, h, a, b, c, d, e, 35, 0x53380d139d95b3dfULL);
SHA512_STEP(e, f, g, h, a, b, c, d, 36, 0x650a73548baf63deULL);
SHA512_STEP(d, e, f, g, h, a, b, c, 37, 0x766a0abb3c77b2a8ULL);
SHA512_STEP(c, d, e, f, g, h, a, b, 38, 0x81c2c92e47edaee6ULL);
SHA512_STEP(b, c, d, e, f, g, h, a, 39, 0x92722c851482353bULL);
SHA512_STEP(a, b, c, d, e, f, g, h, 40, 0xa2bfe8a14cf10364ULL);
SHA512_STEP(h, a, b, c, d, e, f, g, 41, 0xa81a664bbc423001ULL);
SHA512_STEP(g, h, a, b, c, d, e, f, 42, 0xc24b8b70d0f89791ULL);
SHA512_STEP(f, g, h, a, b, c, d, e, 43, 0xc76c51a30654be30ULL);
SHA512_STEP(e, f, g, h, a, b, c, d, 44, 0xd192e819d6ef5218ULL);
SHA512_STEP(d, e, f, g, h, a, b, c, 45, 0xd69906245565a910ULL);
SHA512_STEP(c, d, e, f, g, h, a, b, 46, 0xf40e35855771202aULL);
SHA512_STEP(b, c, d, e, f, g, h, a, 47, 0x106aa07032bbd1b8ULL);
SHA512_STEP(a, b, c, d, e, f, g, h, 48, 0x19a4c116b8d2d0c8ULL);
SHA512_STEP(h, a, b, c, d, e, f, g, 49, 0x1e376c085141ab53ULL);
SHA512_STEP(g, h, a, b, c, d, e, f, 50, 0x2748774cdf8eeb99ULL);
SHA512_STEP(f, g, h, a, b, c, d, e, 51, 0x34b0bcb5e19b48a8ULL);
SHA512_STEP(e, f, g, h, a, b, c, d, 52, 0x391c0cb3c5c95a63ULL);
SHA512_STEP(d, e, f, g, h, a, b, c, 53, 0x4ed8aa4ae3418acbULL);
SHA512_STEP(c, d, e, f, g, h, a, b, 54, 0x5b9cca4f7763e373ULL);
SHA512_STEP(b, c, d, e, f, g, h, a, 55, 0x682e6ff3d6b2b8a3ULL);
SHA512_STEP(a, b, c, d, e, f, g, h, 56, 0x748f82ee5defb2fcULL);
SHA512_STEP(h, a, b, c, d, e, f, g, 57, 0x78a5636f43172f60ULL);
SHA512_STEP(g, h, a, b, c, d, e, f, 58, 0x84c87814a1f0ab72ULL);
SHA512_STEP(f, g, h, a, b, c, d, e, 59, 0x8cc702081a6439ecULL);
SHA512_STEP(e, f, g, h, a, b, c, d, 60, 0x90befffa23631e28ULL);
SHA512_STEP(d, e, f, g, h, a, b, c, 61, 0xa4506cebde82bde9ULL);
SHA512_STEP(c, d, e, f, g, h, a, b, 62, 0xbef9a3f7b2c67915ULL);
SHA512_STEP(b, c, d, e, f, g, h, a, 63, 0xc67178f2e372532bULL);
SHA512_STEP(a, b, c, d, e, f, g, h, 64, 0xca273eceea26619cULL);
SHA512_STEP(h, a, b, c, d, e, f, g, 65, 0xd186b8c721c0c207ULL);
SHA512_STEP(g, h, a, b, c, d, e, f, 66, 0xeada7dd6cde0eb1eULL);
SHA512_STEP(f, g, h, a, b, c, d, e, 67, 0xf57d4f7fee6ed178ULL);
SHA512_STEP(e, f, g, h, a, b, c, d, 68, 0x06f067aa72176fbaULL);
SHA512_STEP(d, e, f, g, h, a, b, c, 69, 0x0a637dc5a2c898a6ULL);
SHA512_STEP(c, d, e, f, g, h, a, b, 70, 0x113f9804bef90daeULL);
SHA512_STEP(b, c, d, e, f, g, h, a, 71, 0x1b710b35131c471bULL);
SHA512_STEP(a, b, c, d, e, f, g, h, 72, 0x28db77f523047d84ULL);
SHA512_STEP(h, a, b, c, d, e, f, g, 73, 0x32caab7b40c72493ULL);
SHA512_STEP(g, h, a, b, c, d, e, f, 74, 0x3c9ebe0a15c9bebcULL);
SHA512_STEP(f, g, h, a, b, c, d, e, 75, 0x431d67c49c100d4cULL);
SHA512_STEP(e, f, g, h, a, b, c, d, 76, 0x4cc5d4becb3e42b6ULL);
SHA512_STEP(d, e, f, g, h, a, b, c, 77, 0x597f299cfc657e2aULL);
SHA512_STEP(c, d, e, f, g, h, a, b, 78, 0x5fcb6fab3ad6faecULL);
SHA512_STEP(b, c, d, e, f, g, h, a, 79, 0x6c44198c4a475817ULL);
_mm_store_si128 ((__m128i *) &crypt_key[0][index], a);
_mm_store_si128 ((__m128i *) &crypt_key[1][index], b);
_mm_store_si128 ((__m128i *) &crypt_key[2][index], c);
_mm_store_si128 ((__m128i *) &crypt_key[3][index], d);
_mm_store_si128 ((__m128i *) &crypt_key[4][index], e);
_mm_store_si128 ((__m128i *) &crypt_key[5][index], f);
_mm_store_si128 ((__m128i *) &crypt_key[6][index], g);
_mm_store_si128 ((__m128i *) &crypt_key[7][index], h);
}
#if FMT_MAIN_VERSION > 10
return count;
#endif
}
static int cmp_all (void *binary, int count)
{
int i;
#ifdef _OPENMP
for (i=0; i < count; i++)
#else
for (i=0; i < 2; i++)
#endif
if (((uint64_t *) binary)[0] == crypt_key[0][i])
return 1;
return 0;
}
static int cmp_one (void *binary, int index)
{
return (((uint64_t *) binary)[0] == crypt_key[0][index]);
}
static int cmp_exact (char *source, int index)
{
int i;
uint64_t *bin;
bin = (uint64_t *) get_binary (source);
for (i=1; i < 8; i++)
if (((uint64_t *) bin)[i] != crypt_key[i][index])
return 0;
return 1;
}
struct fmt_main fmt_rawSHA512_ng = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
MAXLEN,
BINARY_SIZE,
#if FMT_MAIN_VERSION > 9
BINARY_ALIGN,
#endif
SALT_SIZE,
#if FMT_MAIN_VERSION > 9
SALT_ALIGN,
#endif
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
#if FMT_MAIN_VERSION > 10
fmt_default_done,
fmt_default_reset,
#endif
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 9
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
#endif
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* __SSE2__ */
|
master_taskloop_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 -triple x86_64-unknown-unknown %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 -triple x86_64-unknown-unknown %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 -triple x86_64-unknown-unknown %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 -triple x86_64-unknown-unknown %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp master taskloop simd
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp master taskloop simd'}}
#pragma omp master taskloop simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp master taskloop simd'}}
#pragma omp master taskloop simd foo
void test_no_clause() {
int i;
#pragma omp master taskloop simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp master taskloop simd' must be a for loop}}
#pragma omp master taskloop simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp master taskloop simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}}
#pragma omp master taskloop simd foo bar
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{directive '#pragma omp master taskloop simd' cannot contain more than one 'nogroup' clause}}
#pragma omp master taskloop simd nogroup nogroup
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}}
#pragma omp master taskloop simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}}
#pragma omp parallel
#pragma omp master taskloop simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}}
#pragma omp master taskloop simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}}
#pragma omp master taskloop simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp master taskloop simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp master taskloop simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp master taskloop simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}}
#pragma omp parallel
#pragma omp master taskloop simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp master taskloop simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp master taskloop simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp master taskloop simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp master taskloop simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp master taskloop simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp master taskloop simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp master taskloop simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp master taskloop simd simdlen(64) safelen(8)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp master taskloop simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp master taskloop simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp master taskloop simd
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
void test_nontemporal() {
int i;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop simd nontemporal(
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop simd nontemporal(,
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 2 {{expected expression}}
#pragma omp master taskloop simd nontemporal(, )
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd nontemporal()
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{expected expression}}
#pragma omp master taskloop simd nontemporal(int)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} omp50-error@+1 {{expected variable name}}
#pragma omp master taskloop simd nontemporal(0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp master taskloop simd nontemporal(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp master taskloop simd nontemporal(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp master taskloop simd nontemporal(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp master taskloop simd nontemporal(x :)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}}
#pragma omp master taskloop simd nontemporal(x :, )
for (i = 0; i < 16; ++i)
;
// omp50-note@+2 {{defined as nontemporal}}
// omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}}
#pragma omp master taskloop simd nontemporal(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}}
#pragma omp master taskloop simd private(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}}
#pragma omp master taskloop simd nontemporal(x) private(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}}
#pragma omp master taskloop simd nontemporal(x, y : 0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}}
#pragma omp master taskloop simd nontemporal(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp master taskloop simd'}}
#pragma omp master taskloop simd lastprivate(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
}
|
schelude-clause-dynamic.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int i, n=16,chunk,a[n],suma=0;
if(argc < 2) {
fprintf(stderr,"\nFalta iteraciones o chunk \n");
exit(-1);
}
//n = atoi(argv[1]);
if (n>200) n=200;
chunk = atoi(argv[1]);
for (i=0; i<n; i++)
a[i] = i;
#pragma omp parallel for firstprivate(suma) \
lastprivate(suma) schedule(dynamic,chunk)
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf(" thread %d suma a[%d]=%d suma=%d \n",
omp_get_thread_num(),i,a[i],suma);
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
}
|
mixed_tentusscher_myo_epi_2004_S2_15.c | // Scenario 2 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S2_15.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.4855897827032,0.00131304048220611,0.777673256476332,0.777530419758781,0.000176915007944578,0.484229873407731,0.00295766051225702,0.999998320538839,1.96031195718503e-08,1.91202485653593e-05,0.999769095072611,1.00710495848039,0.999995509954569,4.49502542744173e-05,0.671374359732192,10.7525810292738,138.733913720923};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.7328911543692,0.000268220573270157,0.000159694102989303,4.95038560493972e-05,0.281222894359262,0.155491530964224,0.222154844407151,4.08947089393252,0.0209965622527636,1.02972284723443,1096.92640050885,0.000622419783707689,0.0929425682382634,0.0199277276192553,0.00362501998690467,4.31336229850729e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
weno5jp_impl_c_.c | #include <stdlib.h>
static double c1;
static double c2;
static double eps;
static double dx;
static double dx_inv;
static double dx_inv_12;
static int size;
void wenohj_init(double aeps, int asize, double adx)
{
c1 = 1.0 / 3.0;
c2 = 1.0 / 6.0;
eps = aeps;
size = asize;
dx = adx;
dx_inv = 1.0 / dx;
dx_inv_12 = 1.0 / (12.0 * dx);
}
void wenohj_interpolate(double *um3, double *um2, double *um1,
double *u0, double *up1, double *up2,
double *up3, double * restrict u_x_plus,
double * restrict u_x_minus)
{
double numer, common;
double dder1, dder2, dder3, dder4, dder5;
double flux_plus, flux_minus;
double is0, is1, is2;
double alpha0, alpha1, alpha2;
double sum_alpha;
double w0, w2;
double a, b, c, d;
int i;
// Constant is used for auto-vectorization in GCC
const int ub = size;
#pragma omp simd \
private(numer, common, dder1, dder2, dder3, dder4, dder5, \
a, b, c, d, is0, is1, is2, alpha0, alpha1, alpha2, \
sum_alpha, w0, w2, flux_plus, flux_minus)
for (i = 0; i < ub; ++i) {
numer = um2[i] + 8*(up1[i] - um1[i]) - up2[i];
common = numer * dx_inv_12;
// Compute second derivatives
dder1 = (up3[i] - 2*up2[i] + up1[i]) * dx_inv;
dder2 = (up2[i] - 2*up1[i] + u0[i] ) * dx_inv;
dder3 = (up1[i] - 2*u0[i] + um1[i]) * dx_inv;
dder4 = (u0[i] - 2*um1[i] + um2[i]) * dx_inv;
dder5 = (um1[i] - 2*um2[i] + um3[i]) * dx_inv;
a = dder1;
b = dder2;
c = dder3;
d = dder4;
is0 = 13.0*(a - b)*(a - b) + 3.0*(a - 3*b)*(a - 3*b);
is1 = 13.0*(b - c)*(b - c) + 3.0*(b + c)*(b + c);
is2 = 13.0*(c - d)*(c - d) + 3.0*(3*c - d)*(3*c - d);
alpha0 = 1.0 / ((eps + is0)*(eps + is0));
alpha1 = 6.0 / ((eps + is1)*(eps + is1));
alpha2 = 3.0 / ((eps + is2)*(eps + is2));
sum_alpha = alpha0 + alpha1 + alpha2;
w0 = alpha0 / sum_alpha;
w2 = alpha2 / sum_alpha;
flux_plus = c1 * w0 * (a - 2*b + c) + c2 * (w2 - 0.5) * (b - 2*c + d);
a = dder5;
b = dder4;
c = dder3;
d = dder2;
is0 = 13.0*(a - b)*(a - b) + 3.0*(a - 3*b)*(a - 3*b);
is1 = 13.0*(b - c)*(b - c) + 3.0*(b + c)*(b + c);
is2 = 13.0*(c - d)*(c - d) + 3.0*(3*c - d)*(3*c - d);
alpha0 = 1.0 / ((eps + is0)*(eps + is0));
alpha1 = 6.0 / ((eps + is1)*(eps + is1));
alpha2 = 3.0 / ((eps + is2)*(eps + is2));
sum_alpha = alpha0 + alpha1 + alpha2;
w0 = alpha0 / sum_alpha;
w2 = alpha2 / sum_alpha;
flux_minus = c1 * w0 * (a - 2*b + c) + c2 * (w2 - 0.5) * (b - 2*c + d);
u_x_plus[i] = common + flux_plus;
u_x_minus[i] = common - flux_minus;
}
}
|
omp_pragma_example1.c | #if 0
//inputBug342-2.c
// -rose:C
// roseomp: main.C:1685: static SgStatement*
// ASTtools::getNextStatement(SgPragmaDeclaration*): Assertion (*i) != __null failed.
void foo()
{
int i;
double sum = 0.0;
// Extra block inserted around subcollection of statements in original block!
{
for (i = 1; i <= 10; i++) { sum++; }
// a statement between the for loop and pragma will help
// sum++;
#pragma omp single
}
sum++;
}
#endif
void foo (void)
{
int i;
double sum=0.0;
// for(i=1;i<=10;i++) sum++;
// while (i <= 10) { sum++; }
// { int x; }
while (i <= 10) { sum++; }
// a statement between the for loop and pragma will help
// sum++;
#pragma omp single
sum++;
}
|
GB_unop__expm1_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__expm1_fp64_fp64)
// op(A') function: GB (_unop_tran__expm1_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = expm1 (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = expm1 (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = expm1 (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXPM1 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__expm1_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = expm1 (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = expm1 (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__expm1_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
triplet.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* These codes were originally parts of spglib, but only develped */
/* and used for phono3py. Therefore these were moved from spglib to */
/* phono3py. This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include "triplet.h"
#include "bzgrid.h"
#include "triplet_grid.h"
#include "triplet_iw.h"
long tpl_get_BZ_triplets_at_q(long (*triplets)[3], const long grid_point,
const ConstBZGrid *bzgrid,
const long *map_triplets) {
return tpk_get_BZ_triplets_at_q(triplets, grid_point, bzgrid, map_triplets);
}
long tpl_get_triplets_reciprocal_mesh_at_q(
long *map_triplets, long *map_q, const long grid_point, const long mesh[3],
const long is_time_reversal, const long num_rot,
const long (*rec_rotations)[3][3], const long swappable) {
long num_ir;
num_ir = tpk_get_ir_triplets_at_q(map_triplets, map_q, grid_point, mesh,
is_time_reversal, rec_rotations, num_rot,
swappable);
return num_ir;
}
void tpl_get_integration_weight(
double *iw, char *iw_zero, const double *frequency_points,
const long num_band0, const long relative_grid_address[24][4][3],
const long (*triplets)[3], const long num_triplets,
const ConstBZGrid *bzgrid, const double *frequencies1, const long num_band1,
const double *frequencies2, const long num_band2, const long tp_type,
const long openmp_per_triplets, const long openmp_per_bands) {
long i, num_band_prod;
long tp_relative_grid_address[2][24][4][3];
tpl_set_relative_grid_address(tp_relative_grid_address,
relative_grid_address, tp_type);
num_band_prod = num_band0 * num_band1 * num_band2;
#ifdef PHPYOPENMP
#pragma omp parallel for if (openmp_per_triplets)
#endif
for (i = 0; i < num_triplets; i++) {
tpi_get_integration_weight(
iw + i * num_band_prod, iw_zero + i * num_band_prod,
frequency_points, /* f0 */
num_band0, tp_relative_grid_address, triplets[i], num_triplets,
bzgrid, frequencies1, /* f1 */
num_band1, frequencies2, /* f2 */
num_band2, tp_type, openmp_per_bands);
}
}
void tpl_get_integration_weight_with_sigma(
double *iw, char *iw_zero, const double sigma, const double sigma_cutoff,
const double *frequency_points, const long num_band0,
const long (*triplets)[3], const long num_triplets,
const double *frequencies, const long num_band, const long tp_type) {
long i, num_band_prod, const_adrs_shift;
double cutoff;
cutoff = sigma * sigma_cutoff;
num_band_prod = num_band0 * num_band * num_band;
const_adrs_shift = num_triplets * num_band0 * num_band * num_band;
#ifdef PHPYOPENMP
#pragma omp parallel for
#endif
for (i = 0; i < num_triplets; i++) {
tpi_get_integration_weight_with_sigma(
iw + i * num_band_prod, iw_zero + i * num_band_prod, sigma, cutoff,
frequency_points, num_band0, triplets[i], const_adrs_shift,
frequencies, num_band, tp_type, 0);
}
}
long tpl_is_N(const long triplet[3], const long (*bz_grid_addresses)[3]) {
long i, j, sum_q, is_N;
is_N = 1;
for (i = 0; i < 3; i++) {
sum_q = 0;
for (j = 0; j < 3; j++) { /* 1st, 2nd, 3rd triplet */
sum_q += bz_grid_addresses[triplet[j]][i];
}
if (sum_q) {
is_N = 0;
break;
}
}
return is_N;
}
void tpl_set_relative_grid_address(long tp_relative_grid_address[2][24][4][3],
const long relative_grid_address[24][4][3],
const long tp_type) {
long i, j, k, l;
long signs[2];
signs[0] = 1;
signs[1] = 1;
if ((tp_type == 2) || (tp_type == 3)) {
/* q1+q2+q3=G */
/* To set q2+1, q3-1 is needed to keep G */
signs[1] = -1;
}
/* tp_type == 4, q+k_i-k_f=G */
for (i = 0; i < 2; i++) {
for (j = 0; j < 24; j++) {
for (k = 0; k < 4; k++) {
for (l = 0; l < 3; l++) {
tp_relative_grid_address[i][j][k][l] =
relative_grid_address[j][k][l] * signs[i];
}
}
}
}
}
|
cross.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <ops/declarable/helpers/helpers.h>
namespace nd4j {
namespace ops {
namespace helpers {
void FORCEINLINE _cross(NDArray *a, NDArray *b, NDArray *o) {
if (a->isR()) {
auto a0 = a->e<double>(0);
auto a1 = a->e<double>(1);
auto a2 = a->e<double>(2);
auto b0 = b->e<double>(0);
auto b1 = b->e<double>(1);
auto b2 = b->e<double>(2);
Nd4jLong idx = 0L;
o->p(Nd4jLong(0L), a1 * b2 - a2 * b1);
o->p(1L, a2 * b0 - a0 * b2);
o->p(2L, a0 * b1 - a1 * b0);
} else {
auto a0 = a->e<Nd4jLong>(0);
auto a1 = a->e<Nd4jLong>(1);
auto a2 = a->e<Nd4jLong>(2);
auto b0 = b->e<Nd4jLong>(0);
auto b1 = b->e<Nd4jLong>(1);
auto b2 = b->e<Nd4jLong>(2);
Nd4jLong idx = 0L;
o->p(Nd4jLong(0L), a1 * b2 - a2 * b1);
o->p(1L, a2 * b0 - a0 * b2);
o->p(2L, a0 * b1 - a1 * b0);
}
}
void FORCEINLINE _crossBatched(NDArray *a, NDArray *b, NDArray *o) {
auto _a = a->reshape(a->ordering(), {-1, 3});
auto _b = b->reshape(b->ordering(), {-1, 3});
auto _o = o->reshape(o->ordering(), {-1, 3});
auto tadsA = _a->allTensorsAlongDimension({1});
auto tadsB = _b->allTensorsAlongDimension({1});
auto tadsO = _o->allTensorsAlongDimension({1});
int tads = tadsA->size();
#pragma omp parallel for simd schedule(static)
for (int e = 0; e < tads; e++) {
auto a_ = tadsA->at(e);
auto b_ = tadsB->at(e);
auto o_ = tadsO->at(e);
helpers::_cross(a_, b_, o_);
}
delete tadsA;
delete tadsB;
delete tadsO;
delete _a;
delete _b;
delete _o;
}
void weightedCrossEntropyWithLogitsFunctor(NDArray const* targets, NDArray const* input, NDArray const* weights, NDArray* output);
}
}
} |
GB_binop__lor_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__lor_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__lor_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_int8)
// A*D function (colscale): GB (_AxD__lor_int8)
// D*A function (rowscale): GB (_DxB__lor_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_int8)
// C=scalar+B GB (_bind1st__lor_int8)
// C=scalar+B' GB (_bind1st_tran__lor_int8)
// C=A+scalar GB (_bind2nd__lor_int8)
// C=A'+scalar GB (_bind2nd_tran__lor_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_INT8 || GxB_NO_LOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
workspace.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
**/
#include "plasma_workspace.h"
#include "plasma_internal.h"
#include <omp.h>
/******************************************************************************/
int plasma_workspace_create(plasma_workspace_t *workspace, size_t lworkspace,
plasma_enum_t dtyp)
{
// Allocate array of pointers.
#pragma omp parallel
#pragma omp master
{
workspace->nthread = omp_get_num_threads();
}
workspace->lworkspace = lworkspace;
workspace->dtyp = dtyp;
if ((workspace->spaces = (void**)calloc(workspace->nthread,
sizeof(void*))) == NULL) {
free(workspace->spaces);
plasma_error("malloc() failed");
return PlasmaErrorOutOfMemory;
}
// Each thread allocates its workspace.
size_t size = (size_t)lworkspace * plasma_element_size(workspace->dtyp);
int info = PlasmaSuccess;
#pragma omp parallel
{
int tid = omp_get_thread_num();
if ((workspace->spaces[tid] = (void*)malloc(size)) == NULL) {
info = PlasmaErrorOutOfMemory;
}
}
if (info != PlasmaSuccess) {
plasma_workspace_destroy(workspace);
}
return info;
}
/******************************************************************************/
int plasma_workspace_destroy(plasma_workspace_t *workspace)
{
if (workspace->spaces != NULL) {
for (int i = 0; i < workspace->nthread; ++i) {
free(workspace->spaces[i]);
workspace->spaces[i] = NULL;
}
free(workspace->spaces);
workspace->spaces = NULL;
workspace->nthread = 0;
workspace->lworkspace = 0;
}
return PlasmaSuccess;
}
|
net_md5_fmt_plug.c | /* Cracker for RIPv2 MD5 authentication hashes.
*
* This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Added linkage to dynamic (type dynamic_39) for any salt 230 bytes or less,
* by Jim Fougeron. Any salts > 239 bytes will still be handled by this full
* format. dynamic is limited to 256 bytes, which 'should' get us 240 bytes
* of salt. I think we might be able to get 239 bytes (due to a few issues).
* 240 byte salts fail. So, for peace of mind, I am limiting to 230 byte salts
* within dynamic. This is the FIRST format that is hybrid fat-thin.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_netmd5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_netmd5);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 2048 // XXX
#endif
#include "arch.h"
#include "formats.h"
#include "dynamic.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "net-md5"
#define FORMAT_NAME "\"Keyed MD5\" RIPv2, OSPF, BGP, SNMPv2"
#define FORMAT_TAG "$netmd5$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
// RIPv2 truncates (or null pads) passwords to length 16
#define PLAINTEXT_LENGTH 16
#define BINARY_SIZE 16
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN MEM_ALIGN_WORD
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define HEXCHARS "0123456789abcdef"
#define MAX_SALT_LEN 1024
static struct fmt_tests tests[] = {
/* RIPv2 MD5 authentication hashes */
{ "02020000ffff0003002c01145267d48d000000000000000000020000ac100100ffffff000000000000000001ffff0001$1e372a8a233c6556253a0909bc3dcce6", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d48f000000000000000000020000ac100100ffffff000000000000000001ffff0001$ed9f940c3276afcc06d15babe8a1b61b", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d490000000000000000000020000ac100100ffffff000000000000000001ffff0001$c9f7763f80fcfcc2bbbca073be1f5df7", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d49a000000000000000000020000ac100200ffffff000000000000000001ffff0001$3f6a72deeda200806230298af0797997", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d49b000000000000000000020000ac100200ffffff000000000000000001ffff0001$b69184bacccc752cadf78cac455bd0de", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267d49d000000000000000000020000ac100100ffffff000000000000000001ffff0001$6442669c577e7662188865a54c105d0e", "quagga"},
{FORMAT_TAG "02020000ffff0003002c01145267e076000000000000000000020000ac100200ffffff000000000000000001ffff0001$4afe22cf1750d9af8775b25bcf9cfb8c", "abcdefghijklmnop"},
{FORMAT_TAG "02020000ffff0003002c01145267e077000000000000000000020000ac100200ffffff000000000000000001ffff0001$326b12f6da03048a655ea4d8f7e3e123", "abcdefghijklmnop"},
{FORMAT_TAG "02020000ffff0003002c01145267e2ab000000000000000000020000ac100100ffffff000000000000000001ffff0001$ad76c40e70383f6993f54b4ba6492a26", "abcdefghijklmnop"},
/* OSPFv2 MD5 authentication hashes */
{"$netmd5$0201002cac1001010000000000000002000001105267ff8fffffff00000a0201000000280000000000000000$445ecbb27272bd791a757a6c85856150", "abcdefghijklmnop"},
{FORMAT_TAG "0201002cac1001010000000000000002000001105267ff98ffffff00000a0201000000280000000000000000$d4c248b417b8cb1490e02c5e99eb0ad1", "abcdefghijklmnop"},
{FORMAT_TAG "0201002cac1001010000000000000002000001105267ffa2ffffff00000a0201000000280000000000000000$528d9bf98be8213482af7295307625bf", "abcdefghijklmnop"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void get_ptr();
static void init(struct fmt_main *self);
#define MAGIC 0xfe5dd5ef
static struct custom_salt {
ARCH_WORD_32 magic;
int length;
unsigned char salt[MAX_SALT_LEN]; // fixd len, but should be OK
} *cur_salt;
static int dyna_salt_seen=0;
static char Conv_Buf[300]; // max salt length we will pass to dyna is 230. 300 is MORE than enough.
static struct fmt_main *pDynamicFmt, *pNetMd5_Dyna;
/* this function converts a 'native' net-md5 signature string into a $dynamic_39$ syntax string */
static char *Convert(char *Buf, char *ciphertext)
{
char *cp, *cp2;
if (text_in_dynamic_format_already(pDynamicFmt, ciphertext))
return ciphertext;
cp = strchr(&ciphertext[2], '$');
if (!cp)
return "*";
cp2 = strchr(&cp[1], '$');
if (!cp2)
return "*";
snprintf(Buf, sizeof(Conv_Buf), "$dynamic_39$%s$HEX%*.*s", &cp2[1], (int)(cp2-cp), (int)(cp2-cp), cp);
return Buf;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q = NULL;
int len;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
q = strrchr(ciphertext, '$');
if (!q)
return 0;
q = q + 1;
if ((q - p - 1) > MAX_SALT_LEN * 2)
return 0;
len = strspn(q, HEXCHARS);
if (len != BINARY_SIZE * 2 || len != strlen(q)) {
get_ptr();
return pDynamicFmt->methods.valid(ciphertext, pDynamicFmt);
}
if (strspn(p, HEXCHARS) != q - p - 1)
return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *orig_ct = ciphertext;
int i, len;
memset(&cs, 0, sizeof(cs));
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
len = (strrchr(ciphertext, '$') - ciphertext) / 2;
for (i = 0; i < len; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) |
atoi16[ARCH_INDEX(ciphertext[2 * i + 1])];
if (len < 230) {
// return our memset buffer (putting the dyna salt pointer into it).
// This keeps teh 'pre-cleaned salt() warning from hitting this format)
//return pDynamicFmt->methods.salt(Convert(Conv_Buf, orig_ct));
memcpy((char*)(&cs), pDynamicFmt->methods.salt(Convert(Conv_Buf, orig_ct)), pDynamicFmt->params.salt_size);
dyna_salt_seen=1;
return &cs;
}
cs.magic = MAGIC;
cs.length = len;
return &cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
if (text_in_dynamic_format_already(pDynamicFmt, ciphertext))
// returns proper 16 bytes, so we do not need to copy into our buffer.
return pDynamicFmt->methods.binary(ciphertext);
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[0](index); return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[1](index); return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[2](index); return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[3](index); return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[4](index); return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[5](index); return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[6](index); return crypt_out[index][0] & 0x7ffffff; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
get_ptr();
if (cur_salt->magic != MAGIC) {
pDynamicFmt->methods.set_salt(salt);
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.crypt_all(pcount, salt);
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, cur_salt->salt, cur_salt->length);
MD5_Update(&ctx, saved_key[index], PLAINTEXT_LENGTH);
MD5_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.cmp_all(binary, count);
}
for (; index < count; index++)
if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
if (cur_salt->magic != MAGIC) {
return pDynamicFmt->methods.cmp_one(binary, index);
}
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void netmd5_set_key(char *key, int index)
{
if(dyna_salt_seen)
pDynamicFmt->methods.set_key(key, index);
/* strncpy will pad with zeros, which is needed */
strncpy(saved_key[index], key, sizeof(saved_key[0]));
}
static char *get_key(int index)
{
return saved_key[index];
}
static char *prepare(char *fields[10], struct fmt_main *self) {
static char buf[sizeof(cur_salt->salt)*2+TAG_LENGTH+1];
char *hash = fields[1];
if (strncmp(hash, FORMAT_TAG, TAG_LENGTH) && valid(hash, self)) {
get_ptr();
if (text_in_dynamic_format_already(pDynamicFmt, hash))
return hash;
sprintf(buf, "%s%s", FORMAT_TAG, hash);
return buf;
}
return hash;
}
struct fmt_main fmt_netmd5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
netmd5_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
static void get_ptr() {
if (!pDynamicFmt) {
char *Buf;
pNetMd5_Dyna = mem_alloc_tiny(sizeof(fmt_netmd5), 16);
memcpy(pNetMd5_Dyna, &fmt_netmd5, sizeof(fmt_netmd5));
pDynamicFmt = dynamic_THIN_FORMAT_LINK(pNetMd5_Dyna, Convert(Conv_Buf, tests[1].ciphertext), "net-md5", 0);
fmt_netmd5.params.min_keys_per_crypt = pDynamicFmt->params.min_keys_per_crypt;
fmt_netmd5.params.max_keys_per_crypt = pDynamicFmt->params.max_keys_per_crypt;
Buf = mem_alloc_tiny(strlen(fmt_netmd5.params.algorithm_name) + 4 + strlen("dynamic_39") + 1, 1);
sprintf(Buf, "%s or %s", fmt_netmd5.params.algorithm_name, "dynamic_39");
fmt_netmd5.params.algorithm_name = Buf;
//pDynamicFmt->methods.init(pDynamicFmt);
}
}
static void init(struct fmt_main *self)
{
// We have to allocate our dyna_39 object first, because we get 'modified' min/max counts from there.
get_ptr();
if (self->private.initialized == 0) {
pDynamicFmt = dynamic_THIN_FORMAT_LINK(pNetMd5_Dyna, Convert(Conv_Buf, tests[1].ciphertext), "net-md5", 1);
self->private.initialized = 1;
}
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
#endif /* plugin stanza */
|
Kernel_3d_GDZ.h | #ifndef KRIPKE_KERNEL_3D_GDZ_H__
#define KRIPKE_KERNEL_3D_GDZ_H__
#include<Kripke/Kernel.h>
#include<Grid.h>
class Kernel_3d_GDZ : public Kernel {
public:
typedef std::vector<std::vector<double>> result_type;
// Grid is needed to access metadata (e.g. gd_sets) stored on it.
Grid_Data* grid_data;
int group_set;
int direction_set;
Kernel_3d_GDZ(Grid_Data*);
virtual ~Kernel_3d_GDZ();
virtual Nesting_Order nestingPsi(void) const;
virtual Nesting_Order nestingPhi(void) const;
virtual void LTimes(Grid_Data *grid_data);
virtual void LPlusTimes(Grid_Data *grid_data);
template<typename GridView, typename IPlane, typename JPlane, typename KPlane>
result_type
operator()(GridView& grid_view, IPlane const& i_plane, JPlane const& j_plane,
KPlane const& k_plane);
void define_type(stapl::typer& t)
{
t.member(grid_data);
t.member(group_set);
t.member(direction_set);
}
};
/* Sweep routine for Diamond-Difference */
/* Macros for offsets with fluxes on cell faces */
#define I_PLANE_INDEX(j, k) (k)*(local_jmax) + (j)
#define J_PLANE_INDEX(i, k) (k)*(local_imax) + (i)
#define K_PLANE_INDEX(i, j) (j)*(local_imax) + (i)
#define Zonal_INDEX(i, j, k) (i) + (local_imax)*(j) \
+ (local_imax)*(local_jmax)*(k)
template<typename GridView, typename IPlane, typename JPlane, typename KPlane>
std::vector<std::vector<double>>
Kernel_3d_GDZ::operator()(GridView& grid_view, IPlane const& i_plane_in,
JPlane const& j_plane_in, KPlane const& k_plane_in)
{
typedef std::array<typename GridView::value_type::property_type::
storage_type::index, 2> index_type;
result_type result(3);
std::vector<double> i_plane = i_plane_in[0];
std::vector<double> j_plane = j_plane_in[0];
std::vector<double> k_plane = k_plane_in[0];
// grid_data, group_set, and direction_set are data members of the Kernel.
Group_Dir_Set& gd_set = grid_data->gd_sets()[group_set][direction_set];
int num_directions = gd_set.num_directions;
int num_groups = gd_set.num_groups;
Directions *direction = gd_set.directions;
int local_imax = grid_data->nzones()[0];
int local_jmax = grid_data->nzones()[1];
int local_kmax = grid_data->nzones()[2];
auto dx = grid_data->deltas(0);
auto dy = grid_data->deltas(1);
auto dz = grid_data->deltas(2);
// All directions have same id,jd,kd, since these are all one Direction Set
// So pull that information out now
int octant = direction[0].octant;
Grid_Sweep_Block const &extent = grid_data->octant_extent()[octant];
#ifdef KRIPKE_USE_OPENMP
#pragma omp parallel for
#endif
for (int group = 0; group < num_groups; ++group) {
std::vector<double> xcos_dxi_all(local_imax);
std::vector<double> ycos_dyj_all(local_jmax);
std::vector<double> zcos_dzk_all(local_kmax);
index_type sigt_idx{{gd_set.group0+group, 0}};
for (int d = 0; d < num_directions; ++d) {
index_type psi_idx{{group, d}};
index_type rhs_idx{{group, d}};
int plane_idx = num_directions * num_groups + d * num_groups + group;
double xcos = direction[d].xcos;
double ycos = direction[d].ycos;
double zcos = direction[d].zcos;
for (int i = 0; i < local_imax; ++i) {
double dxi = dx[i + 1];
xcos_dxi_all[i] = 2.0 * xcos / dxi;
}
for (int j = 0; j < local_jmax; ++j) {
double dyj = dy[j + 1];
ycos_dyj_all[j] = 2.0 * ycos / dyj;
}
for (int k = 0; k < local_kmax; ++k) {
double dzk = dz[k + 1];
zcos_dzk_all[k] = 2.0 * zcos / dzk;
}
/* Perform transport sweep of the grid 1 cell at a time. */
for (int k = extent.start_k; k != extent.end_k; k += extent.inc_k) {
double zcos_dzk = zcos_dzk_all[k];
for (int j = extent.start_j; j != extent.end_j; j += extent.inc_j) {
double ycos_dyj = ycos_dyj_all[j];
for (int i = extent.start_i; i != extent.end_i; i += extent.inc_i) {
double xcos_dxi = xcos_dxi_all[i];
/* Calculate new zonal flux */
// get a reference to the vertex being processed.
int z = Zonal_INDEX(i, j, k);
auto v = (*grid_view.find_vertex(z)).property();
double psi_lf_g_d_z = i_plane[I_PLANE_INDEX(j, k) * plane_idx];
double psi_fr_g_d_z = j_plane[J_PLANE_INDEX(i, k) * plane_idx];
double psi_bo_g_d_z = k_plane[K_PLANE_INDEX(i, j) * plane_idx];
auto psi_z = v.psi()[group_set][direction_set];
auto rhs_z = v.rhs()[group_set][direction_set];
double psi_g_d_z = (rhs_z(rhs_idx)
+ psi_lf_g_d_z * xcos_dxi
+ psi_fr_g_d_z * ycos_dyj
+ psi_bo_g_d_z * zcos_dzk)
/ (xcos_dxi + ycos_dyj + zcos_dzk
+ v.sigt()(sigt_idx));
psi_z(psi_idx) = psi_g_d_z;
/* Apply diamond-difference relationships */
psi_g_d_z *= 2.0;
psi_lf_g_d_z = psi_g_d_z - psi_lf_g_d_z;
psi_fr_g_d_z = psi_g_d_z - psi_fr_g_d_z;
psi_bo_g_d_z = psi_g_d_z - psi_bo_g_d_z;
}
}
}
}
}
result[0] = std::move(i_plane);
result[1] = std::move(j_plane);
result[2] = std::move(k_plane);
return result;
}
#endif
|
a.27.1.c | /* { dg-do compile } */
void
a27 ()
{
int i, a;
#pragma omp parallel private(a)
{
#pragma omp parallel for private(a)
for (i = 0; i < 10; i++)
{
/* do work here */
}
}
}
|
omp_sections.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int work1()
{
int j, tid;
tid = omp_get_thread_num();
for (j = 0; j < 10; j++)
printf("The value of j as printed by work 1, thread %li = %li\n", tid, j);
}
int work2()
{
int j, tid;
tid = omp_get_thread_num();
for (j = 0; j < 10; j++)
printf("The value of j as printed by work 2, thread %li = %li\n", tid, j);
}
int work3()
{
printf("Work 3\n");
printf("Work 3\n");
printf("Work 3\n");
printf("Work 3\n");
printf("Work 3\n");
}
int work4()
{
printf("Work 4\n");
printf("Work 4\n");
printf("Work 4\n");
printf("Work 4\n");
printf("Work 4\n");
}
main()
{
#pragma omp parallel sections
{
work1();
#pragma omp section
{
work2();
work3();
}
#pragma omp section
{
work4();
}
}
}
|
__clang_openmp_device_functions.h | /*===- __clang_openmp_device_functions.h - OpenMP device function declares -===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __CLANG_OPENMP_DEVICE_FUNCTIONS_H__
#define __CLANG_OPENMP_DEVICE_FUNCTIONS_H__
#ifndef _OPENMP
#error "This file is for OpenMP compilation only."
#endif
#ifdef __cplusplus
extern "C" {
#endif
#pragma omp begin declare variant match( \
device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
#define __CUDA__
#define __OPENMP_NVPTX__
/// Include declarations for libdevice functions.
#include <__clang_cuda_libdevice_declares.h>
/// Provide definitions for these functions.
#include <__clang_cuda_device_functions.h>
#undef __OPENMP_NVPTX__
#undef __CUDA__
#pragma omp end declare variant
#ifdef __AMDGCN__
#pragma omp begin declare variant match(device = {arch(amdgcn)})
// Import types which will be used by __clang_hip_libdevice_declares.h
#ifndef __cplusplus
#include <stdbool.h>
#include <stdint.h>
#endif
#define __OPENMP_AMDGCN__
#pragma push_macro("__device__")
#define __device__
/// Include declarations for libdevice functions.
#include <__clang_hip_libdevice_declares.h>
#pragma pop_macro("__device__")
#undef __OPENMP_AMDGCN__
#pragma omp end declare variant
#endif
#ifdef __cplusplus
} // extern "C"
#endif
// Ensure we make `_ZdlPv`, aka. `operator delete(void*)` available without the
// need to `include <new>` in C++ mode.
#ifdef __cplusplus
// We require malloc/free.
#include <cstdlib>
#pragma push_macro("OPENMP_NOEXCEPT")
#if __cplusplus >= 201103L
#define OPENMP_NOEXCEPT noexcept
#else
#define OPENMP_NOEXCEPT
#endif
// Device overrides for non-placement new and delete.
inline void *operator new(__SIZE_TYPE__ size) {
if (size == 0)
size = 1;
return ::malloc(size);
}
inline void *operator new[](__SIZE_TYPE__ size) { return ::operator new(size); }
inline void operator delete(void *ptr)OPENMP_NOEXCEPT { ::free(ptr); }
inline void operator delete[](void *ptr) OPENMP_NOEXCEPT {
::operator delete(ptr);
}
// Sized delete, C++14 only.
#if __cplusplus >= 201402L
inline void operator delete(void *ptr, __SIZE_TYPE__ size)OPENMP_NOEXCEPT {
::operator delete(ptr);
}
inline void operator delete[](void *ptr, __SIZE_TYPE__ size) OPENMP_NOEXCEPT {
::operator delete(ptr);
}
#endif
#pragma pop_macro("OPENMP_NOEXCEPT")
#endif
#endif
|
comm.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Copyright (c) 2015 by Contributors
*/
#ifndef MXNET_KVSTORE_COMM_H_
#define MXNET_KVSTORE_COMM_H_
#include <dmlc/omp.h>
#include <string>
#include <algorithm>
#include <utility>
#include <limits>
#include <vector>
#include <tuple>
#include <thread>
#include "mxnet/ndarray.h"
#include "gradient_compression.h"
#include "../ndarray/ndarray_function.h"
#include "../operator/tensor/sparse_retain-inl.h"
#include "./kvstore_utils.h"
namespace mxnet {
namespace kvstore {
/**
* \brief multiple device commmunication
*/
class Comm {
public:
Comm() {
pinned_ctx_ = Context::CPU(0);
}
virtual ~Comm() { }
/**
* \brief init key with the data shape and storage shape
*/
virtual void Init(int key, const NDArrayStorageType stype,
const mxnet::TShape& shape, int dtype = mshadow::kFloat32) = 0;
/**
* \brief returns src[0] + .. + src[src.size()-1]
*/
virtual const NDArray& Reduce(
int key, const std::vector<NDArray>& src, int priority) = 0;
/**
* \brief copy from src to dst[i] for every i
*/
virtual void Broadcast(
int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) = 0;
/**
* \brief broadcast src to dst[i] with target row_ids for every i
* \param key the identifier key for the stored ndarray
* \param src the source row_sparse ndarray to broadcast
* \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast,
where the row_ids are expected to be unique and sorted in row_id.data()
* \param priority the priority of the operation
*/
virtual void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) = 0;
/**
* \brief return a pinned contex
*/
Context pinned_ctx() const {
return pinned_ctx_;
}
/**
* \brief Sets gradient compression parameters to be able to
* perform reduce with compressed gradients
*/
void SetGradientCompression(std::shared_ptr<GradientCompression> gc) {
gc_ = gc;
}
protected:
Context pinned_ctx_;
std::shared_ptr<GradientCompression> gc_;
};
/**
* \brief an implemention of Comm that first copy data to CPU memeory, and then
* reduce there
*/
class CommCPU : public Comm {
public:
CommCPU() {
nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4);
bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000);
// TODO(junwu) delete the following data member, now for benchmark only
is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0);
}
virtual ~CommCPU() { }
void Init(int key, const NDArrayStorageType stype, const mxnet::TShape& shape,
int type = mshadow::kFloat32) override {
// Delayed allocation - the dense merged buffer might not be used at all if push()
// only sees sparse arrays
bool delay_alloc = true;
merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type);
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
auto& buf = merge_buf_[key];
const auto stype = src[0].storage_type();
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
if (stype == kDefaultStorage) {
return src[0];
} else {
// With 'local' kvstore, we could store the weight on CPU while compute
// the gradient on GPU when the weight is extremely large.
// To avoiding copying the weight to the same context of the gradient,
// we always copy the gradient to merged buf.
NDArray& merged = buf.merged_buf(stype);
CopyFromTo(src[0], &merged, priority);
return merged;
}
}
NDArray& buf_merged = buf.merged_buf(stype);
// normal dense reduce
if (stype == kDefaultStorage) {
std::vector<Engine::VarHandle> const_vars(src.size() - 1);
std::vector<NDArray> reduce(src.size());
CopyFromTo(src[0], &buf_merged, priority);
reduce[0] = buf_merged;
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size()-1);
for (size_t j = 0; j < src.size() - 1; ++j) {
// allocate copy buffer
buf.copy_buf[j] = NDArray(
src[0].shape(), pinned_ctx_, false, src[0].dtype());
}
}
CHECK(stype == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << stype << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 1; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority);
reduce[i] = buf.copy_buf[i-1];
const_vars[i-1] = reduce[i].var();
}
Engine::Get()->PushAsync(
[reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) {
ReduceSumCPU(reduce);
on_complete();
}, Context::CPU(), const_vars, {reduce[0].var()},
FnProperty::kCPUPrioritized, priority, "KVStoreReduce");
} else {
// sparse reduce
std::vector<Engine::VarHandle> const_vars(src.size());
std::vector<NDArray> reduce(src.size());
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size());
for (size_t j = 0; j < src.size(); ++j) {
buf.copy_buf[j] = NDArray(
src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype());
}
}
CHECK(stype == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << stype << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 0; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
const_vars[i] = reduce[i].var();
}
Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(),
ResourceRequest(ResourceRequest::kTempSpace));
Engine::Get()->PushAsync(
[reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) {
NDArray out = buf_merged;
is_serial_push_?
ReduceSumCPUExSerial(reduce, &out)
: mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out);
on_complete();
}, Context::CPU(), const_vars, {buf_merged.var(), rsc.var},
FnProperty::kCPUPrioritized, priority, "KVStoreReduce");
}
return buf_merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
int mask = src.ctx().dev_mask();
if (mask == Context::kCPU) {
for (auto d : dst) CopyFromTo(src, d, priority);
} else {
// First copy data to pinned_ctx, then broadcast.
// Note that kv.init initializes the data on pinned_ctx.
// This branch indicates push() with ndarrays on gpus were called,
// and the source is copied to gpu ctx.
// Also indicates that buffers are already initialized during push().
auto& buf = merge_buf_[key].merged_buf(src.storage_type());
CopyFromTo(src, &buf, priority);
for (auto d : dst) CopyFromTo(buf, d, priority);
}
}
void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) override {
using namespace mshadow;
CHECK_EQ(src.storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row-sparse src NDArray";
CHECK_EQ(src.ctx().dev_mask(), Context::kCPU)
<< "BroadcastRowSparse with src on gpu context not supported";
for (const auto& dst_kv : dst) {
NDArray* out = dst_kv.first;
NDArray row_id = dst_kv.second;
CHECK_EQ(out->storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row_sparse dst NDArray";
CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU)
<< "BroadcastRowSparse with row_indices on gpu context not supported";
// retain according to unique indices
const bool is_same_ctx = out->ctx() == src.ctx();
const bool is_diff_var = out->var() != src.var();
NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out :
NDArray(kRowSparseStorage, src.shape(), src.ctx(), true,
src.dtype(), src.aux_types());
if (!is_diff_var) {
common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) +
"refers to the same NDArray as the one stored in KVStore."
"Performing row_sparse_pull() with such output is going to change the "
"data stored in KVStore. Incorrect result may be generated "
"next time row_sparse_pull() is called. To avoid such an issue,"
"consider create a new NDArray buffer to store the output.");
}
Engine::Get()->PushAsync(
[=](RunContext rctx, Engine::CallbackOnComplete on_complete) {
const TBlob& indices = row_id.data();
NDArray temp = retained_cpu; // get rid the of const qualifier
op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(),
src, indices, kWriteTo,
&temp);
on_complete();
}, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()},
FnProperty::kNormal, priority, "KVStoreSparseRetain");
// if retained_cpu == out, CopyFromTo will ignore the copy operation
CopyFromTo(retained_cpu, out, priority);
}
}
private:
// reduce sum into val[0]
inline void ReduceSumCPU(const std::vector<NDArray> &in_data) {
MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, {
std::vector<DType*> dptr(in_data.size());
for (size_t i = 0; i < in_data.size(); ++i) {
TBlob data = in_data[i].data();
CHECK(data.CheckContiguous());
dptr[i] = data.FlatTo2D<cpu, DType>().dptr_;
}
size_t total = in_data[0].shape().Size();
ReduceSumCPUImpl(dptr, total);
});
}
// serial implementation of reduce sum for row sparse NDArray.
inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) {
using namespace rowsparse;
using namespace mshadow;
auto stype = out->storage_type();
CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype;
size_t total_num_rows = 0;
size_t num_in = in.size();
// skip the ones with empty indices and values
std::vector<bool> skip(num_in, false);
// the values tensor of the inputs
MSHADOW_TYPE_SWITCH(out->dtype(), DType, {
MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, {
std::vector<Tensor<cpu, 2, DType>> in_vals(num_in);
std::vector<Tensor<cpu, 1, IType>> in_indices(num_in);
// offset to the values tensor of all inputs
std::vector<size_t> offsets(num_in, 0);
std::vector<size_t> num_rows(num_in, 0);
for (size_t i = 0; i < num_in; i++) {
if (!in[i].storage_initialized()) {
skip[i] = true;
continue;
}
auto size = in[i].aux_shape(kIdx).Size();
num_rows[i] = size;
total_num_rows += size;
in_vals[i] = in[i].data().FlatTo2D<cpu, DType>();
in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>();
}
std::vector<IType> indices;
indices.reserve(total_num_rows);
// gather indices from all inputs
for (size_t i = 0; i < num_in; i++) {
for (size_t j = 0; j < num_rows[i]; j++) {
indices.emplace_back(in_indices[i][j]);
}
}
CHECK_EQ(indices.size(), total_num_rows);
// dedup indices
std::sort(indices.begin(), indices.end());
indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin());
// the one left are unique non-zero rows
size_t nnr = indices.size();
// allocate memory for output
out->CheckAndAlloc({Shape1(nnr)});
auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>();
auto val_data = out->data().FlatTo2D<cpu, DType>();
for (size_t i = 0; i < nnr; i++) {
// copy indices back
idx_data[i] = indices[i];
bool zeros = true;
for (size_t j = 0; j < num_in; j++) {
if (skip[j]) continue;
size_t offset = offsets[j];
if (offset < num_rows[j]) {
if (indices[i] == in_indices[j][offset]) {
if (zeros) {
Copy(val_data[i], in_vals[j][offset], nullptr);
zeros = false;
} else {
val_data[i] += in_vals[j][offset];
}
offsets[j] += 1;
}
}
}
}
});
});
}
template<typename DType>
inline static void ReduceSumCPU(
const std::vector<DType*> &dptr, size_t offset, index_t size) {
using namespace mshadow; // NOLINT(*)
Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size));
for (size_t i = 1; i < dptr.size(); i+=4) {
switch (dptr.size() - i) {
case 1: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
in_0 += in_1;
break;
}
case 2: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
in_0 += in_1 + in_2;
break;
}
case 3: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3;
break;
}
default: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3 + in_4;
break;
}
}
}
}
template<typename DType>
inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) {
const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10));
long ntask = (total + step - 1) / step; // NOLINT(*)
if (total < bigarray_bound_ || nthread_reduction_ <= 1) {
ReduceSumCPU(dptr, 0, total);
} else {
#pragma omp parallel for schedule(static) num_threads(nthread_reduction_)
for (long j = 0; j < ntask; ++j) { // NOLINT(*)
size_t k = static_cast<size_t>(j);
size_t begin = std::min(k * step, total);
size_t end = std::min((k + 1) * step, total);
if (j == ntask - 1) CHECK_EQ(end, total);
ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin));
}
}
}
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the merged value
NDArray merged;
/// \brief the cpu buffer for gpu data
std::vector<NDArray> copy_buf;
/// \brief the merged buffer for the given storage type
inline NDArray& merged_buf(NDArrayStorageType stype) {
if (stype == kDefaultStorage) {
return merged;
}
CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype;
// check if sparse_merged is initialized
if (sparse_merged.is_none()) {
CHECK(!merged.is_none());
sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(),
true, merged.dtype());
}
return sparse_merged;
}
private:
/// \brief the sparse merged value
NDArray sparse_merged;
};
std::unordered_map<int, BufferEntry> merge_buf_;
size_t bigarray_bound_;
int nthread_reduction_;
bool is_serial_push_;
};
/**
* \brief an implementation of Comm that performs reduction on device
* directly.
*
* It is faster if the total device-to-device bandwidths is larger than
* device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device
* memory.
*/
class CommDevice : public Comm {
public:
CommDevice() {
inited_ = false;
}
virtual ~CommDevice() { }
void Init(int key, const NDArrayStorageType stype, const mxnet::TShape& shape,
int dtype = mshadow::kFloat32) override {
sorted_key_attrs_.emplace_back(key, shape, dtype);
inited_ = false;
}
void InitBuffersAndComm(const std::vector<NDArray>& src) {
if (!inited_) {
std::vector<Context> devs;
for (const auto& a : src) {
devs.push_back(a.ctx());
}
InitMergeBuffer(devs);
if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) {
EnableP2P(devs);
}
}
}
const NDArray& ReduceRowSparse(int key, const std::vector<NDArray>& src,
int priority) {
auto& buf = merge_buf_[key];
std::vector<NDArray> reduce(src.size());
const NDArrayStorageType stype = src[0].storage_type();
NDArray& buf_merged = buf.merged_buf(stype);
if (buf.copy_buf.empty()) {
// initialize buffer for copying during reduce
buf.copy_buf.resize(src.size());
for (size_t j = 0; j < src.size(); ++j) {
buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype());
}
}
CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 0; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf_merged, priority);
return buf_merged;
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
// when this reduce is called from kvstore_dist, gc is not set
// we don't do compression twice in dist_sync_device
if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) {
return ReduceCompressed(key, src, priority);
}
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
return src[0];
}
InitBuffersAndComm(src);
auto& buf = merge_buf_[key];
const NDArrayStorageType stype = src[0].storage_type();
NDArray& buf_merged = buf.merged_buf(stype);
// normal dense reduce
if (stype == kDefaultStorage) {
CopyFromTo(src[0], &buf_merged, priority);
std::vector<NDArray> reduce(src.size());
reduce[0] = buf_merged;
if (buf.copy_buf.empty()) {
// TODO(mli) this results in large device memory usage for huge ndarray,
// such as the largest fullc in VGG. consider to do segment reduce with
// NDArray.Slice or gpu direct memory access. for the latter, we need to
// remove some ctx check, and also it reduces 20% perf
buf.copy_buf.resize(src.size()-1);
for (size_t i = 0; i < src.size()-1; ++i) {
buf.copy_buf[i] = NDArray(
buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype());
}
}
for (size_t i = 0; i < src.size()-1; ++i) {
CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority);
reduce[i+1] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf_merged, priority);
} else {
// sparse reduce
buf_merged = ReduceRowSparse(key, src, priority);
}
return buf_merged;
}
const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src,
int priority) {
InitBuffersAndComm(src);
auto& buf = merge_buf_[key];
std::vector<NDArray> reduce(src.size());
if (buf.copy_buf.empty()) {
// one buf for each context
buf.copy_buf.resize(src.size());
buf.compressed_recv_buf.resize(src.size());
buf.compressed_send_buf.resize(src.size());
buf.residual.resize(src.size());
for (size_t i = 0; i < src.size(); ++i) {
buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(),
false, buf.merged.dtype());
buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(),
false, buf.merged.dtype());
buf.residual[i] = 0;
int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size());
buf.compressed_recv_buf[i] = NDArray(mxnet::TShape{small_size}, buf.merged.ctx(),
false, buf.merged.dtype());
buf.compressed_send_buf[i] = NDArray(mxnet::TShape{small_size}, src[i].ctx(),
false, buf.merged.dtype());
}
}
for (size_t i = 0; i < src.size(); ++i) {
// compress before copy
// this is done even if the data is on same context as copy_buf because
// we don't want the training to be biased towards data on this GPU
gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority);
if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) {
CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority);
} else {
// avoid memory copy when they are on same context
buf.compressed_recv_buf[i] = buf.compressed_send_buf[i];
}
gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf.merged);
return buf.merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
if (!inited_) {
// copy to a random device first
int dev_id = key % dst.size();
CopyFromTo(src, dst[dev_id], priority);
for (size_t i = 0; i < dst.size(); ++i) {
if (i != static_cast<size_t>(dev_id)) {
CopyFromTo(*dst[dev_id], dst[i], priority);
}
}
} else {
auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type());
CopyFromTo(src, &buf_merged, priority);
for (auto d : dst) {
CopyFromTo(buf_merged, d, priority);
}
}
}
void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) override {
CHECK_EQ(src.storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row-sparse src NDArray";
for (const auto& dst_kv : dst) {
NDArray* out = dst_kv.first;
NDArray row_id = dst_kv.second;
CHECK_EQ(out->storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row_sparse dst NDArray";
CHECK_EQ(row_id.ctx(), src.ctx())
<< "row_id and src are expected to be on the same context";
// retain according to indices
const bool is_same_ctx = out->ctx() == src.ctx();
const bool is_diff_var = out->var() != src.var();
NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out :
NDArray(kRowSparseStorage, out->shape(), src.ctx(), true,
out->dtype(), out->aux_types());
if (!is_diff_var) {
common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) +
"refers to the same NDArray as the one stored in KVStore."
"Performing row_sparse_pull() with such output is going to change the "
"data stored in KVStore. Incorrect result may be generated "
"next time row_sparse_pull() is called. To avoid such an issue,"
"consider create a new NDArray buffer to store the output.");
}
bool is_gpu = retained_gpu.ctx().dev_mask() == gpu::kDevMask;
Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) {
const TBlob& indices = row_id.data();
using namespace mxnet::common;
NDArray temp = retained_gpu;
switch (temp.ctx().dev_mask()) {
case cpu::kDevMask: {
SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(),
src, indices, kWriteTo, &temp);
break;
}
#if MXNET_USE_CUDA
case gpu::kDevMask: {
SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(),
src, indices, kWriteTo, &temp);
// wait for GPU operations to complete
rctx.get_stream<gpu>()->Wait();
break;
}
#endif
default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR;
}
on_complete();
}, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()},
is_gpu ? FnProperty::kGPUPrioritized : FnProperty::kCPUPrioritized,
priority, "KVStoreSparseRetain");
CopyFromTo(retained_gpu, out, priority);
}
}
using KeyAttrs = std::tuple<int, mxnet::TShape, int>;
// try to allocate buff on device evenly
void InitMergeBuffer(const std::vector<Context>& devs) {
std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), [](
const KeyAttrs& a, const KeyAttrs& b) {
return std::get<1>(a).Size() > std::get<1>(b).Size();
});
std::unordered_map<int, std::pair<Context, size_t>> ctx_info;
for (auto d : devs) {
ctx_info[d.dev_id] = std::make_pair(d, 0);
}
for (auto& sorted_key_attr : sorted_key_attrs_) {
const int key = std::get<0>(sorted_key_attr);
const mxnet::TShape& shape = std::get<1>(sorted_key_attr);
const int type = std::get<2>(sorted_key_attr);
auto& buf = merge_buf_[key];
Context ctx;
size_t min_size = std::numeric_limits<size_t>::max();
for (auto& ctx_info_kv : ctx_info) {
size_t size = ctx_info_kv.second.second;
if (size <= min_size) {
ctx = ctx_info_kv.second.first;
min_size = size;
}
}
// Delayed allocation - as the dense merged buffer might not be used at all if push()
// only sees sparse arrays
if (buf.merged.is_none()) {
bool delay_alloc = true;
buf.merged = NDArray(shape, ctx, delay_alloc, type);
}
ctx_info[ctx.dev_id].second += shape.Size();
}
inited_ = true;
}
private:
void EnableP2P(const std::vector<Context>& devs) {
#if MXNET_USE_CUDA
std::vector<int> gpus;
for (const auto& d : devs) {
if (d.dev_mask() == gpu::kDevMask) {
gpus.push_back(d.dev_id);
}
}
int n = static_cast<int>(gpus.size());
int enabled = 0;
std::vector<int> p2p(n*n);
for (int i = 0; i < n; ++i) {
// Restores active device to what it was before EnableP2P
mxnet::common::cuda::DeviceStore device_store(gpus[i]);
for (int j = 0; j < n; j++) {
int access;
cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]);
if (access) {
cudaDeviceEnablePeerAccess(gpus[j], 0);
cudaError_t e = cudaGetLastError();
if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled ||
e == cudaErrorCudartUnloading) {
++enabled;
p2p[i*n+j] = 1;
} else {
LOG(FATAL) << cudaGetErrorString(e);
}
}
}
}
if (enabled != n*(n-1)) {
// print warning info if not fully enabled
LOG(WARNING) << "only " << enabled << " out of "
<< n*(n-1) << " GPU pairs are enabled direct access. "
<< "It may affect the performance. "
<< "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off";
std::string access(n, '.');
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
access[j] = p2p[i*n+j] ? 'v' : '.';
}
LOG(WARNING) << access;
}
}
#endif
}
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the dense merged value for reduce and broadcast operations
NDArray merged;
/// \brief the gpu buffer for copy during reduce operation
std::vector<NDArray> copy_buf;
/// \brief the residual buffer for gradient compression
std::vector<NDArray> residual;
/// \brief the small buffer for compressed data in sender
std::vector<NDArray> compressed_send_buf;
/// \brief the small buffer for compressed data in receiver
std::vector<NDArray> compressed_recv_buf;
/// \brief the merged buffer for the given storage type (could be either dense or row_sparse)
inline NDArray& merged_buf(NDArrayStorageType stype) {
if (stype == kDefaultStorage) {
CHECK(!merged.is_none()) << "unintialized merge buffer detected";
return merged;
}
CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype;
// check if sparse_merged is initialized
if (sparse_merged.is_none()) {
CHECK(!merged.is_none());
sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(),
true, merged.dtype());
}
return sparse_merged;
}
private:
/// \brief the sparse merged value for reduce and rowsparse broadcast operations
NDArray sparse_merged;
};
std::unordered_map<int, BufferEntry> merge_buf_;
public:
bool inited_;
std::vector<KeyAttrs> sorted_key_attrs_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_COMM_H_
|
GB_binop__pow_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__pow_uint32
// A.*B function (eWiseMult): GB_AemultB__pow_uint32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__pow_uint32
// C+=b function (dense accum): GB_Cdense_accumb__pow_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_uint32
// C=scalar+B GB_bind1st__pow_uint32
// C=scalar+B' GB_bind1st_tran__pow_uint32
// C=A+scalar GB_bind2nd__pow_uint32
// C=A'+scalar GB_bind2nd_tran__pow_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = GB_pow_uint32 (aij, bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_pow_uint32 (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_UINT32 || GxB_NO_POW_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__pow_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__pow_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__pow_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__pow_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__pow_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__pow_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t bij = Bx [p] ;
Cx [p] = GB_pow_uint32 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__pow_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
Cx [p] = GB_pow_uint32 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = GB_pow_uint32 (x, aij) ; \
}
GrB_Info GB_bind1st_tran__pow_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = GB_pow_uint32 (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__pow_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ast-dump-openmp-target-parallel.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp target parallel
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-parallel.c:3:1, line:6:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1>
// CHECK-NEXT: `-OMPTargetParallelDirective {{.*}} <line:4:1, col:28>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CapturedStmt {{.*}} <col:3>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <col:3>
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-NullStmt {{.*}} <col:3>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-NullStmt {{.*}} <line:5:3>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'
// CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'
// CHECK-NEXT: |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-NullStmt {{.*}} <col:3>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'
// CHECK-NEXT: |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-NullStmt {{.*}} <line:5:3>
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'
|
CutPursuit_KL.h | #pragma once
#include "Common.h"
#include "CutPursuit.h"
namespace CP {
template <typename T>
class CutPursuit_KL : public CutPursuit<T>
{
public:
~CutPursuit_KL(){
};
virtual std::pair<T,T> compute_energy() override
{
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
std::pair<T,T> pair_energy;
T energy = 0, smoothedObservation, smoothedValue;
//#pragma omp parallel if (this->parameter.parallel)
for (VertexIterator<T> i_ver = boost::vertices(this->main_graph).first;
i_ver != this->lastIterator; ++i_ver)
{
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{ //smoothing as a linear combination with the uniform probability
smoothedObservation =
this->parameter.smoothing / this->dim
+ (1 - this->parameter.smoothing)
* vertex_attribute_map(*i_ver).observation[i_dim];
smoothedValue =
this->parameter.smoothing / this->dim
+ (1 - this->parameter.smoothing)
* vertex_attribute_map(*i_ver).value[i_dim];
energy += smoothedObservation
* (log(smoothedObservation) - log(smoothedValue))
* vertex_attribute_map(*i_ver).weight;
}
}
pair_energy.first = energy;
energy = 0;
EdgeIterator<T> i_edg_end = boost::edges(this->main_graph).second;
for (EdgeIterator<T> i_edg = boost::edges(this->main_graph).first;
i_edg != i_edg_end; ++i_edg)
{
if (!edge_attribute_map(*i_edg).realEdge)
{
continue;
}
energy += .5 * edge_attribute_map(*i_edg).isActive * this->parameter.reg_strenth
* edge_attribute_map(*i_edg).weight;
}
pair_energy.second = energy;
return pair_energy;
}
//=============================================================================================
//============================= SPLIT ===========================================
//=============================================================================================
virtual uint32_t split() override
{ // split the graph by trying to find the best binary partition
// each components is split into B and notB
// for each components we associate the value h_1 and h_2 to vertices in B or notB
// the affectation as well as h_1 and h_2 are computed alternatively
//tic();
//--------loading structures---------------------------------------------------------------
TimeStack ts; ts.tic();
uint32_t nb_comp = this->components.size();
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph);
uint32_t saturation;
//initialize h_1 and h_2 with kmeans
//stores wether each vertex is B or notB
std::vector<bool> binary_label(this->nVertex);
this->init_labels(binary_label);
VectorOfCentroids<T> centers(nb_comp, this->dim);
//-----main loop----------------------------------------------------------------
// the optimal flow is iteratively approximated
for (uint32_t i_step = 1; i_step <= this->parameter.flow_steps; i_step++)
{
//compute h_1 and h_2
centers = VectorOfCentroids<T>(nb_comp, this->dim);
this->compute_centers(centers, binary_label);
// update the capacities of the flow graph
this->set_capacities(centers);
//compute flow
boost::boykov_kolmogorov_max_flow(
this->main_graph,
get(&EdgeAttribute<T>::capacity , this->main_graph),
get(&EdgeAttribute<T>::residualCapacity, this->main_graph),
get(&EdgeAttribute<T>::edge_reverse , this->main_graph),
get(&VertexAttribute<T>::color , this->main_graph),
get(boost::vertex_index , this->main_graph),
this->source,
this->sink);
for (uint32_t i_com = 0; i_com < nb_comp; i_com++)
{
if (this->saturated_components[i_com])
{
continue;
}
for (uint32_t i_ver = 0; i_ver < this->components[i_com].size(); i_ver++)
{
binary_label[vertex_index_map(this->components[i_com][i_ver])]
= (vertex_attribute_map(this->components[i_com][i_ver]).color
== vertex_attribute_map(this->sink).color);
}
}
}
saturation = this->activate_edges();
return saturation;
}
//=============================================================================================
//============================= INIT_KL ===================================================
//=============================================================================================
inline void init_labels(std::vector<bool> & binary_label)
{ //-----initialize the labelling for each components with kmeans------------------------------
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph);
std::vector< std::vector<T> > kernels(2, std::vector<T>(this->dim));
std::vector< std::vector<T> > smooth_kernels(2, std::vector<T>(this->dim));
T total_weight[2];
uint32_t nb_comp = this->components.size();
T best_energy, current_energy;
//#pragma omp parallel for private(kernels, total_weight, best_energy, current_energy) if (this->parameter.parallel && nb_comp>8) schedule(dynamic)
for (uint32_t i_com = 0; i_com < nb_comp; i_com++)
{
uint32_t comp_size = this->components[i_com].size();
std::vector<bool> potential_label(comp_size);
std::vector<T> energy_array(comp_size);
std::vector<T> constant_part(comp_size);
std::vector< std::vector<T> > smooth_obs(comp_size, std::vector<T>(2,0));
if (this->saturated_components[i_com] || comp_size <= 1)
{
continue;
}
//KL fidelity has a part that depends
//purely on the observation that can be precomputed
//#pragma omp parallel for if (this->parameter.parallel && nb_comp<=8) schedule(dynamic)
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
constant_part[i_ver] = 0;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
smooth_obs[i_ver][i_dim] = 0;
}
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
smooth_obs[i_ver][i_dim] =
this->parameter.smoothing / this->dim
+ (1 - this->parameter.smoothing)
* vertex_attribute_map(this->components[i_com][i_ver]).observation[i_dim];
constant_part[i_ver] += smooth_obs[i_ver][i_dim]
* log(smooth_obs[i_ver][i_dim])
* vertex_attribute_map(this->components[i_com][i_ver]).weight;
}
}
for (uint32_t init_kmeans = 0; init_kmeans < this->parameter.kmeans_resampling; init_kmeans++)
{
//----- initialization with KM++ ------------------
// first kernel chosen randomly
uint32_t first_kernel = std::rand() % comp_size, second_kernel = 0;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{ //fill the first kernel
kernels[0][i_dim] = vertex_attribute_map(this->components[i_com][first_kernel ]).observation[i_dim];
smooth_kernels[0][i_dim] = this->parameter.smoothing
/ this->dim + (1 - this->parameter.smoothing)
* kernels[0][i_dim];
}
//now compute the square distance of each pouint32_t to this kernel
best_energy = 0; //energy total
//#pragma omp parallel for if (this->parameter.parallel && nb_comp<=8) schedule(dynamic)
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
energy_array[i_ver] = constant_part[i_ver];
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
energy_array[i_ver] -=
smooth_obs[i_ver][i_dim]
* log(smooth_kernels[0][i_dim])
* vertex_attribute_map(this->components[i_com][i_ver]).weight;
}
energy_array[i_ver] = pow(energy_array[i_ver],2);
best_energy += energy_array[i_ver];
} // we now generate a random number to determinate which node will be the second kernel
if (best_energy==0)
{ //all the points in this components are identical
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
binary_label[vertex_index_map(this->components[i_com][i_ver])] = false;
}
break;
}
//we now choose the second kernel with a probability
//proportional to the square distance
T random_sample = ((T)(rand())) / ((T)(RAND_MAX));
current_energy = best_energy * random_sample;
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
current_energy -= energy_array[i_ver];
if (current_energy < 0)
{ //we have selected the second kernel
second_kernel = i_ver;
break;
}
}
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{ // now fill the second kernel
kernels[1][i_dim] = vertex_attribute_map(this->components[i_com][second_kernel]).observation[i_dim];
smooth_kernels[1][i_dim] = this->parameter.smoothing
/ this->dim + (1 - this->parameter.smoothing)
* kernels[1][i_dim];
}
//----main kmeans loop-----
for (uint32_t ite_kmeans = 0; ite_kmeans < this->parameter.kmeans_ite; ite_kmeans++)
{
//--affectation step: associate each node with its closest kernel-------------------
//#pragma omp parallel for if (this->parameter.parallel && nb_comp<=8) schedule(dynamic)
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
//the distance to each kernel
std::vector<T> distance_kernels(2, constant_part[i_ver]);
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
distance_kernels[0] -= smooth_obs[i_ver][i_dim]
* log(smooth_kernels[0][i_dim])
* vertex_attribute_map(this->components[i_com][i_ver]).weight;
distance_kernels[1] -= smooth_obs[i_ver][i_dim]
* log(smooth_kernels[1][i_dim])
* vertex_attribute_map(this->components[i_com][i_ver]).weight;
}
potential_label[i_ver] = distance_kernels[0] > distance_kernels[1];
}
//-----computation of the new kernels----------------------------
total_weight[0] = 0.;
total_weight[1] = 0.;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] = 0;
kernels[1][i_dim] = 0;
}
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
if (vertex_attribute_map(this->components[i_com][i_ver]).weight==0)
{
continue;
}
if (potential_label[i_ver])
{
total_weight[0] += vertex_attribute_map(this->components[i_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] +=
vertex_attribute_map(this->components[i_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[i_com][i_ver]).weight ;
}
}
else
{
total_weight[1] += vertex_attribute_map(this->components[i_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[1][i_dim] +=
vertex_attribute_map(this->components[i_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[i_com][i_ver]).weight;
}
}
}
if ((total_weight[0] == 0)||(total_weight[1] == 0))
{
std::cout << "kmeans error" << std::endl;
}
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] = kernels[0][i_dim] / total_weight[0];
kernels[1][i_dim] = kernels[1][i_dim] / total_weight[1];
smooth_kernels[0][i_dim] = this->parameter.smoothing
/ this->dim + (1 - this->parameter.smoothing)
* kernels[0][i_dim];
smooth_kernels[1][i_dim] = this->parameter.smoothing
/ this->dim + (1 - this->parameter.smoothing)
* kernels[1][i_dim];
}
}
//----compute the associated energy ------
current_energy = 0;
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
current_energy += constant_part[i_ver];
if (potential_label[i_ver])
{
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
current_energy -= smooth_obs[i_ver][i_dim]
* log(smooth_kernels[0][i_dim])
* vertex_attribute_map(this->components[i_com][i_ver]).weight;
}
}
else
{
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
current_energy -= smooth_obs[i_ver][i_dim]
* log(smooth_kernels[1][i_dim])
* vertex_attribute_map(this->components[i_com][i_ver]).weight;
}
}
}
if (current_energy < best_energy)
{
best_energy = current_energy;
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
binary_label[vertex_index_map(this->components[i_com][i_ver])] = potential_label[i_ver];
}
}
}
}
}
//=============================================================================================
//============================= COMPUTE_CENTERS_KL ==========================================
//=============================================================================================
inline void compute_centers(VectorOfCentroids<T> & centers, const std::vector<bool> & binary_label)
{
//compute for each component the values of h_1 and h_2
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph);
uint32_t nb_comp = this->components.size();
//#pragma omp parallel for if (this->parameter.parallel) schedule(dynamic)
for (uint32_t i_com = 0; i_com < nb_comp; i_com++)
{
if (this->saturated_components[i_com])
{
continue;
}
T total_weight[2];
total_weight[0] = 0.;
total_weight[1] = 0.;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
centers.centroids[i_com][0][i_dim] = 0.;
centers.centroids[i_com][1][i_dim] = 0.;
}
for (uint32_t i_ver = 0; i_ver < this->components[i_com].size(); i_ver++)
{
if (vertex_attribute_map(this->components[i_com][i_ver]).weight==0)
{
continue;
}
if (binary_label[vertex_index_map(this->components[i_com][i_ver])])
{
total_weight[0] += vertex_attribute_map(this->components[i_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
centers.centroids[i_com][0][i_dim] += vertex_attribute_map(this->components[i_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[i_com][i_ver]).weight ;
}
}
else
{
total_weight[1] += vertex_attribute_map(this->components[i_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
centers.centroids[i_com][1][i_dim] += vertex_attribute_map(this->components[i_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[i_com][i_ver]).weight;
}
}
}
if ((total_weight[0] == 0)||(total_weight[1] == 0))
{
//the component is saturated
this->saturateComponent(i_com);
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
centers.centroids[i_com][0][i_dim] = vertex_attribute_map(this->components[i_com].back()).value[i_dim];
centers.centroids[i_com][1][i_dim] = vertex_attribute_map(this->components[i_com].back()).value[i_dim];
}
}
else
{
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
centers.centroids[i_com][0][i_dim] = centers.centroids[i_com][0][i_dim] / total_weight[0];
centers.centroids[i_com][1][i_dim] = centers.centroids[i_com][1][i_dim] / total_weight[1];
}
}
}
return;
}
//=============================================================================================
//============================= SET_CAPACITIES ==========================================
//=============================================================================================
inline void set_capacities(const VectorOfCentroids<T> & centers)
{
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
VertexDescriptor<T> desc_v;
EdgeDescriptor desc_source2v, desc_v2sink, desc_v2source;
uint32_t nb_comp = this->components.size();
T cost_B, cost_notB, smoothedValueB, smoothedValueNotB, smoothedObservation; //the cost of being in B or not B, local for each component
//----first compute the capacity in sink/node edges------------------------------------
//#pragma omp parallel for if (this->parameter.parallel) schedule(dynamic)
for (uint32_t i_com = 0; i_com < nb_comp; i_com++)
{
if (this->saturated_components[i_com])
{
continue;
}
for (uint32_t i_ver = 0; i_ver < this->components[i_com].size(); i_ver++)
{
desc_v = this->components[i_com][i_ver];
// because of the adjacency structure NEVER access edge (source,v) directly!
desc_v2source = boost::edge(desc_v, this->source,this->main_graph).first;
desc_source2v = edge_attribute_map(desc_v2source).edge_reverse; //use edge_reverse instead
desc_v2sink = boost::edge(desc_v, this->sink,this->main_graph).first;
cost_B = 0;
cost_notB = 0;
if (vertex_attribute_map(desc_v).weight==0)
{
edge_attribute_map(desc_source2v).capacity = 0;
edge_attribute_map(desc_v2sink).capacity = 0;
continue;
}
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
smoothedObservation =
this->parameter.smoothing / this->dim
+ (1 - this->parameter.smoothing)
* vertex_attribute_map(desc_v).observation[i_dim];
smoothedValueB =
this->parameter.smoothing / this->dim
+ (1 - this->parameter.smoothing)
* centers.centroids[i_com][0][i_dim];
smoothedValueNotB =
this->parameter.smoothing / this->dim
+ (1 - this->parameter.smoothing)
* centers.centroids[i_com][1][i_dim];
cost_B += smoothedObservation
* (log(smoothedObservation)
- log(smoothedValueB));
cost_notB += smoothedObservation
* (log(smoothedObservation)
- log(smoothedValueNotB));
}
if (cost_B>cost_notB)
{
edge_attribute_map(desc_source2v).capacity = cost_B - cost_notB;
edge_attribute_map(desc_v2sink).capacity = 0.;
}
else
{
edge_attribute_map(desc_source2v).capacity = 0.;
edge_attribute_map(desc_v2sink).capacity = cost_notB - cost_B;
}
}
}
//----then set the vertex to vertex edges ---------------------------------------------
EdgeIterator<T> i_edg, i_edg_end;
for (boost::tie(i_edg, i_edg_end) = boost::edges(this->main_graph);
i_edg != i_edg_end; ++i_edg)
{
if (!edge_attribute_map(*i_edg).realEdge)
{
continue;
}
if (!edge_attribute_map(*i_edg).isActive)
{
edge_attribute_map(*i_edg).capacity
= edge_attribute_map(*i_edg).weight * this->parameter.reg_strenth;
}
else
{
edge_attribute_map(*i_edg).capacity = 0;
}
}
}
//=============================================================================================
//================================= COMPUTE_VALUE =========================================
//=============================================================================================
virtual std::pair<std::vector<T>, T> compute_value(const uint32_t & i_com) override
{
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
T total_weight = 0;
std::vector<T> compValue(this->dim);
std::fill((compValue.begin()),(compValue.end()),0);
for (uint32_t ind_ver = 0; ind_ver < this->components[i_com].size(); ++ind_ver)
{
total_weight += vertex_attribute_map(this->components[i_com][ind_ver]).weight;
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
compValue[i_dim] += vertex_attribute_map(this->components[i_com][ind_ver]).observation[i_dim]
* vertex_attribute_map(this->components[i_com][ind_ver]).weight;
}
vertex_attribute_map(this->components[i_com][ind_ver]).in_component = i_com;
}
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
compValue[i_dim] = compValue[i_dim] / total_weight;
}
for (uint32_t ind_ver = 0; ind_ver < this->components[i_com].size(); ++ind_ver)
{
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
vertex_attribute_map(this->components[i_com][ind_ver]).value[i_dim] = compValue[i_dim];
}
}
return std::pair<std::vector<T>, T>(compValue, total_weight);
}
//=============================================================================================
//================================= COMPUTE_MERGE_GAIN =========================================
//=============================================================================================
virtual std::pair<std::vector<T>, T> compute_merge_gain(const VertexDescriptor<T> & comp1
, const VertexDescriptor<T> & comp2) override
{
VertexAttributeMap<T> reduced_vertex_attribute_map
= boost::get(boost::vertex_bundle, this->reduced_graph);
std::vector<T> merge_value(this->dim);
T gain = 0, smoothedValue1, smoothedValue2, smoothedValueMerged;
// compute the value obtained by mergeing the two connected components
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
merge_value[i_dim] =
(reduced_vertex_attribute_map(comp1).weight *
reduced_vertex_attribute_map(comp1).value[i_dim]
+reduced_vertex_attribute_map(comp2).weight *
reduced_vertex_attribute_map(comp2).value[i_dim])
/(reduced_vertex_attribute_map(comp1).weight
+reduced_vertex_attribute_map(comp2).weight);
smoothedValue1 =
this->parameter.smoothing / this->dim
+ (1 - this->parameter.smoothing)
* reduced_vertex_attribute_map(comp1).value[i_dim];
smoothedValue2 =
this->parameter.smoothing / this->dim
+ (1 - this->parameter.smoothing)
* reduced_vertex_attribute_map(comp2).value[i_dim];
smoothedValueMerged =
this->parameter.smoothing / this->dim
+ (1 - this->parameter.smoothing)
* merge_value[i_dim];
gain -= reduced_vertex_attribute_map(comp1).weight
* smoothedValue1 * (log(smoothedValue1)
- log(smoothedValueMerged))
+ reduced_vertex_attribute_map(comp2).weight
* smoothedValue2 * (log(smoothedValue2)
- log(smoothedValueMerged));
}
return std::pair<std::vector<T>, T>(merge_value, gain);
}
};
}
|
exercise7.c | /*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file exercise7.c
* @author Alessandro Capotondi
* @date 27 Mar 2020
* @brief Exercise 7
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <stdio.h>
#include <omp.h>
#include "utils.h"
#if !defined(W)
#define W (1000)
#endif
#if !defined(T)
#define T (20)
#endif
/**
* @brief EX 7 - Task Parallelism w/tasks
*
* a) Parallelize with TASK directive.
* b) Parallelize with SINGLE NOWAIT directive.
* c) Compare Results with a for loop.
* @return void
*/
void exercise()
{
unsigned int i;
#if 1
#pragma omp parallel
#pragma omp single nowait
for (i = 0; i < 16384; i++)
{
#pragma omp task
{
DEBUG_PRINT("%hu: I am executing iteration %hu!\n", omp_get_thread_num(), i);
work(W);
}
}
#endif
#if 0
#pragma omp parallel
for (i = 0; i < 16384; i++)
{
#pragma omp single nowait
{
DEBUG_PRINT("%hu: I am executing iteration %hu!\n", omp_get_thread_num(), i);
work(W);
}
}
#endif
#if 0
#pragma omp parallel for
for (i = 0; i < 16384; i++)
{
DEBUG_PRINT("%hu: I am executing iteration %hu!\n", omp_get_thread_num(), i);
work(W);
}
#endif
}
|
main.c | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
/*
* File: main.c
* Author: Alexandros Ioannidis
*
* Created on January 7, 2016, 1:35 PM
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
// ---------- CONSTANTS -------------------
#define VERSION "v1.1"
#define MASK_SIZE 3
#define IMAGE_GRAYSCALE_HUES 256
// ---------- GLOBAL VARIABLES ------------
// EDGE RECOGNITION MASK for Image Convolution calculation
int EDGE_MASK[][MASK_SIZE] = {
{0, 1, 0},
{1, -4, 1},
{0, 1, 0}
};
int **Image, // the total image
**AugImage, // the augmented image
**ImgConv; // image convolution
// ---------- FUNCTION PROTOTYPES for main() ----------------
void alloc_matrix(int ***data_ptr, int n, int m);
void free_matrix(int ***data_ptr, int n);
void readInputData(char*, int, int, int**);
void writeOutputData(char*, int, int, int**);
void flipHorizontal(int*, int, int, int*);
void flipVertical(int*, int, int, int*);
void print2DArray(int**, int, int);
void augmentImage(int**, int, int, int**);
void calcImgConv(int **AugImage, int **ImgConv, int rows, int cols);
int main(int argc, char** argv) {
if (argc != 4) {
fprintf(stderr, "SYNTAX: %s <imagesize> <inputfile> <outputfile>\n", argv[0]);
exit(1);
}
int imagesize = atoi(argv[1]); // read image size
char *inputfile = argv[2]; // read image filename
char *outputfile = argv[3]; // read filename for convolution
// get start time
double parallelStart_t, parallelEnd_t, end_t, start_t = omp_get_wtime()*1000;
int threads;
int FLIPPED_HOR[MASK_SIZE][MASK_SIZE];
// flip EDGE_MASK horizontally to FLIPPED_HOR
flipHorizontal(EDGE_MASK, MASK_SIZE, MASK_SIZE, FLIPPED_HOR);
// flip FLIPPED_HOR vertically to EDGE_MASK
flipVertical(FLIPPED_HOR, MASK_SIZE, MASK_SIZE, EDGE_MASK);
// allocate memory for Image
alloc_matrix(&Image, imagesize, imagesize);
// read Image from file
readInputData(inputfile, imagesize, imagesize, Image);
// allocate memory for augmented image
alloc_matrix(&AugImage, imagesize + 2, imagesize + 2);
// augment Image
augmentImage(Image, imagesize, imagesize, AugImage);
// destroy image matrix
free_matrix(&Image, imagesize);
// get parallel start time
parallelStart_t = omp_get_wtime()*1000;
// allocate memory for ImgConv
alloc_matrix(&ImgConv, imagesize, imagesize);
#pragma omp parallel
{
#pragma omp master
{
threads = omp_get_num_threads();
printf("OpenMP Image (%s) %dx%d Convolution - Threads %d\n", VERSION, imagesize, imagesize, threads);
}
// calculate convolution
calcImgConv(AugImage, ImgConv, imagesize, imagesize);
}
// get parallel end time
parallelEnd_t = omp_get_wtime()*1000;
// destroy augmented image
free_matrix(&AugImage, imagesize + 2);
// write Image to file
writeOutputData(outputfile, imagesize, imagesize, ImgConv);
// destroy ImgConv
free_matrix(&ImgConv, imagesize);
// get end time
end_t = omp_get_wtime()*1000;
printf("\nTotal duration:\t%0.2f msecs", (end_t-start_t));
printf("\nConvolution calculation duration:\t%0.2f msecs", (parallelEnd_t-parallelStart_t));
printf("\n");
return (EXIT_SUCCESS);
}
void alloc_matrix(int ***data_ptr, int n, int m) {
int row, i, j;
int **data;
data = (int **) malloc(n * sizeof (int *));
for (row = 0; row < n; row++)
data[row] = (int *) malloc(m * sizeof (int));
for (i = 0; i < n; i++)
for (j = 0; j < m; j++)
data[i][j] = i * j;
*data_ptr = data;
}
void free_matrix(int ***data_ptr, int n) {
int row;
int **data = *data_ptr;
for (row = 0; row < n; row++)
free(data[row]);
free(data);
}
void readInputData(char* file, int rows, int cols, int **image) {
int row, col;
FILE *fp;
// open file for reading
fp = fopen(file, "rb");
if (fp == NULL) {
return;
}
for (row = 0; row < rows; row++)
fread(&image[row][0], sizeof(int)*cols, 1, fp);
fclose(fp);
}
void writeOutputData(char* file, int rows, int cols, int **image) {
int row, col;
FILE *fp;
// open file for writing
fp = fopen(file, "wb");
if (fp == NULL) {
return;
}
for (row = 0; row < rows; row++)
fwrite(&image[row][0], sizeof(int)*cols, 1, fp);
fflush(fp);
fclose(fp);
}
void flipHorizontal(int *arr, int rows, int cols, int *fliparr) {
int row, col, colsmid = cols / 2;
for (row = 0; row < rows; row++)
for (col = 0; col <= colsmid; col++) {
*(fliparr + row * cols + col) = *(arr + row * cols + cols - 1 - col);
*(fliparr + row * cols + cols - 1 - col) = *(arr + row * cols + col);
}
}
void flipVertical(int *arr, int rows, int cols, int *fliparr) {
int row, col, rowssmid = rows / 2;
for (col = 0; col < cols; col++)
for (row = 0; row <= rowssmid; row++) {
*(fliparr + row * cols + col) = *(arr + (rows - 1 - row) * cols + col);
*(fliparr + (rows - 1 - row) * cols + col) = *(arr + row * cols + col);
}
}
void print2DArray(int **arr, int rows, int cols) {
int row, col, len;
len = sizeof(char) * (rows*cols + rows)*5 + 1;
char *s = malloc(len);
*s = '\0';
char t[100];
for (row = 0; row < rows; row++) {
for (col = 0; col < cols; col++) {
sprintf(t, "%4d ", arr[row][col]);
strcat(s, t);
}
strcat(s,"\n");
}
fprintf(stderr, "%s", s);
free(s);
}
void augmentImage(int **image, int rows, int cols, int **augimage) {
int row, col;
int rowsaug = rows + 2;
int colsaug = cols + 2;
// initialize to 0 the 1st and last columns of augimage
for (row = 0; row < rowsaug; row++) {
augimage[row][0] = 0;
augimage[row][colsaug - 1] = 0;
}
// initialize to 0 the 1st and last rows of augimage
for (col = 0; col < colsaug; col++) {
augimage[0][col] = 0;
augimage[rowsaug - 1][col] = 0;
}
// copy image rows to augimage rows
for (row = 1; row < rowsaug - 1; row++)
for (col = 1; col < colsaug - 1; col++)
augimage[row][col] = image[row - 1][col - 1];
}
void calcImgConv(int **AugImage, int **ImgConv, int rows, int cols) {
#pragma omp for collapse(2)
for (int x = 0; x < rows; x++)
for (int y = 0; y < cols; y++) {
ImgConv[x][y] = 0;
for (int k = 0; k < 3; k++)
for (int j = 0; j < 3; j++)
ImgConv[x][y] += EDGE_MASK[k][j] * AugImage[x + k][y + j];
}
}
|
integrator.c | #define _USE_MATH_DEFINES
#include <inttypes.h>
#include <string.h>
#include <assert.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "io.h"
#include "storage.h"
#include "integrator.h"
#include "cmontecarlo.h"
#include "omp_helper.h"
#define NULEN 0
#define LINELEN 1
#define PLEN 2
#define SHELLEN 3
#define C_INV 3.33564e-11
#define M_PI acos (-1)
#define KB_CGS 1.3806488e-16
#define H_CGS 6.62606957e-27
/**
* Calculate the intensity of a black-body according to the following formula
* .. math::
* I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\frac{1}{e^{h\\nu \\beta_\\textrm{rad}} - 1}
*/
double
intensity_black_body (double nu, double T)
{
double beta_rad = 1 / (KB_CGS * T);
double coefficient = 2 * H_CGS * C_INV * C_INV;
return coefficient * nu * nu * nu / (exp(H_CGS * nu * beta_rad) - 1 );
}
/*! @brief Algorithm to integrate an array using the trapezoid integration rule
*
*/
double
trapezoid_integration (const double* array, const double h, int N)
{
double result = (array[0] + array[N-1])/2;
for (int idx = 1; idx < N-1; ++idx)
{
result += array[idx];
}
return result * h;
}
/*! @brief Calculate distance to p line
*
* Calculate half of the length of the p-line inside a shell
* of radius r in terms of unit length (c * t_exp).
* If shell and p-line do not intersect, return 0.
*
* @param r radius of the shell
* @param p distance of the p-line to the center of the supernova
* @param inv_t inverse time_explosio is needed to norm to unit-length
* @return half the lenght inside the shell or zero
*/
static inline double
calculate_z(double r, double p, double inv_t)
{
return (r > p) ? sqrt(r * r - p * p) * C_INV * inv_t : 0;
}
/*!
* @brief Calculate p line intersections
*
* This function calculates the intersection points of the p-line with each shell
*
* @param storage (INPUT) A storage model containing the environment
* @param p (INPUT) distance of the integration line to the center
* @param oz (OUTPUT) will be set with z values. The array is truncated by the
* value `1`.
* @param oshell_id (OUTPUT) will be set with the corresponding shell_ids
* @return number of shells intersected by the p-line
*/
int64_t
populate_z(const storage_model_t *storage, const double p, double *oz, int64_t *oshell_id)
{
// Abbreviations
double *r = storage->r_outer_i;
const int64_t N = storage->no_of_shells_i;
double inv_t = storage->inverse_time_explosion;
double z = 0;
int64_t i = 0, offset = N, i_low, i_up;
if (p <= storage->r_inner_i[0])
{
// Intersect the photosphere
for(i = 0; i < N; ++i)
{ // Loop from inside to outside
oz[i] = 1 - calculate_z(r[i], p, inv_t);
oshell_id[i] = i;
}
return N;
}
else
{
// No intersection with the photosphere
// that means we intersect each shell twice
for(i = 0; i < N; ++i)
{ // Loop from inside to outside
z = calculate_z(r[i], p, inv_t);
if (z == 0)
continue;
if (offset == N)
{
offset = i;
}
// Calculate the index in the resulting array
i_low = N - i - 1; // the far intersection with the shell
i_up = N + i - 2 * offset; // the nearer intersection with the shell
// Setting the arrays
oz[i_low] = 1 + z;
oshell_id[i_low] = i;
oz[i_up] = 1 - z;
oshell_id[i_up] = i;
}
return 2 * (N - offset);
}
}
/*! @brief Calculate integration points
*
*/
void
calculate_p_values(double R_max, int64_t N, double *opp)
{
for(int i = 0; i<N; ++i)
{
// Trapezoid integration points
opp[i] = R_max/(N - 1) * (i);
}
}
/*! @brief Caculate a spectrum using the formal integral approach
*
*/
double *
_formal_integral(
const storage_model_t *storage,
double iT,
double *inu, int64_t inu_size,
double *att_S_ul, double *Jred_lu, double *Jblue_lu, int N)
{
// Initialize the output which is shared among threads
double *L = calloc(inu_size, sizeof(double));
// global read-only values
int64_t size_line = storage->no_of_lines,
size_shell = storage->no_of_shells_i,
size_tau = size_line * size_shell,
finished_nus = 0;
double R_ph = storage->r_inner_i[0];
double R_max = storage->r_outer_i[size_shell - 1];
double pp[N];
double *exp_tau = calloc(size_tau, sizeof(double));
#pragma omp parallel firstprivate(L, exp_tau)
{
#pragma omp master
{
if (omp_get_num_threads() > 1) {
fprintf(stderr, "Doing the formal integral\nRunning with OpenMP - %d threads\n", omp_get_num_threads());
} else {
fprintf(stderr, "Doing the formal integral\nRunning without OpenMP\n");
}
print_progress_fi(0, inu_size);
}
// Initializing all the thread-local variables
int64_t offset = 0, i = 0,
size_z = 0,
idx_nu_start = 0,
direction = 0,
first = 0;
double I_nu[N],
//I_nu_b[N],
//I_nu_r[N],
z[2 * storage->no_of_shells_i],
p = 0,
nu_start,
nu_end,
nu,
zstart,
zend,
escat_contrib,
escat_op,
Jkkp;
int64_t shell_id[2 * storage->no_of_shells_i];
double *pexp_tau, *patt_S_ul, *pline, *pJred_lu, *pJblue_lu;
// Prepare exp_tau
#pragma omp for
for (i = 0; i < size_tau; ++i) {
exp_tau[i] = exp( -storage->line_lists_tau_sobolevs_i[i]);
}
calculate_p_values(R_max, N, pp);
// Done with the initialization
// Loop over wavelengths in spectrum
#pragma omp for
for (int nu_idx = 0; nu_idx < inu_size ; ++nu_idx)
{
nu = inu[nu_idx];
// Loop over discrete values along line
for (int p_idx = 1; p_idx < N; ++p_idx)
{
escat_contrib = 0;
p = pp[p_idx];
// initialize z intersections for p values
size_z = populate_z(storage, p, z, shell_id);
// initialize I_nu
if (p <= R_ph)
I_nu[p_idx] = intensity_black_body(nu * z[0], iT);
else
I_nu[p_idx] = 0;
// Find first contributing line
nu_start = nu * z[0];
nu_end = nu * z[1];
line_search(
storage->line_list_nu,
nu_start,
size_line,
&idx_nu_start
);
offset = shell_id[0] * size_line;
// start tracking accumulated e-scattering optical depth
zstart = storage->time_explosion / C_INV * (1. - z[0]);
// Initialize pointers
pline = storage->line_list_nu + idx_nu_start;
pexp_tau = exp_tau + offset + idx_nu_start;
patt_S_ul = att_S_ul + offset + idx_nu_start;
pJred_lu = Jred_lu + offset + idx_nu_start;
pJblue_lu = Jblue_lu + offset + idx_nu_start;
// flag for first contribution to integration on current p-ray
first = 1;
// TODO: Ugly loop
// Loop over all intersections
// TODO: replace by number of intersections and remove break
for (i = 0; i < size_z - 1; ++i)
{
escat_op = storage->electron_densities_i[shell_id[i]] * storage->sigma_thomson;
nu_end = nu * z[i+1];
// TODO: e-scattering: in principle we also have to check
// that dtau is <<1 (as assumed in Lucy 1999); if not, there
// is the chance that I_nu_b becomes negative
for (;pline < storage->line_list_nu + size_line;
// We have to increment all pointers simultaneously
++pline,
++pexp_tau,
++patt_S_ul,
++pJblue_lu)
{
if (*pline < nu_end)
{
// next resonance not in current shell
break;
}
// Calculate e-scattering optical depth to next resonance point
zend = storage->time_explosion / C_INV * (1. - *pline / nu);
if (first == 1){
// First contribution to integration
// NOTE: this treatment of I_nu_b (given by boundary
// conditions) is not in Lucy 1999; should be
// re-examined carefully
escat_contrib += (zend - zstart) * escat_op * (*pJblue_lu - I_nu[p_idx]) ;
first = 0;
}
else{
// Account for e-scattering, c.f. Eqs 27, 28 in Lucy 1999
Jkkp = 0.5 * (*pJred_lu + *pJblue_lu);
escat_contrib += (zend - zstart) * escat_op * (Jkkp - I_nu[p_idx]) ;
// this introduces the necessary offset of one element between pJblue_lu and pJred_lu
pJred_lu += 1;
}
I_nu[p_idx] = I_nu[p_idx] + escat_contrib;
// Lucy 1999, Eq 26
I_nu[p_idx] = I_nu[p_idx] * (*pexp_tau) + *patt_S_ul;
// reset e-scattering opacity
escat_contrib = 0;
zstart = zend;
}
// Calculate e-scattering optical depth to grid cell boundary
Jkkp = 0.5 * (*pJred_lu + *pJblue_lu);
zend = storage->time_explosion / C_INV * (1. - nu_end / nu);
escat_contrib += (zend - zstart) * escat_op * (Jkkp - I_nu[p_idx]);
zstart = zend;
if (i < size_z-1){
// advance pointers
direction = shell_id[i+1] - shell_id[i];
pexp_tau += direction * size_line;
patt_S_ul += direction * size_line;
pJred_lu += direction * size_line;
pJblue_lu += direction * size_line;
}
}
I_nu[p_idx] *= p;
}
// TODO: change integration to match the calculation of p values
L[nu_idx] = 8 * M_PI * M_PI * trapezoid_integration(I_nu, R_max/N, N);
#pragma omp atomic update
++finished_nus;
if (finished_nus%10 == 0){
print_progress_fi(finished_nus, inu_size);
}
}
// Free everything allocated on heap
free(exp_tau);
printf("\n");
}
return L;
} |
ep.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - EP
This benchmark is an OpenMP C version of the NPB EP code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: P. O. Frederickson
D. H. Bailey
A. C. Woo
OpenMP C version: S. Satoh
3.0 structure translation: M. Popov
--------------------------------------------------------------------*/
#include "../common/npb-C.h"
#include "npbparams.h"
#include "../math/nas_math.h"
/* parameters */
#include<nautilus/nautilus.h>
#include<nautilus/shell.h>
#include<nautilus/libccompat.h>
#define MK 16
#define MM (M - MK)
#define NN (1 << MM)
#define NK (1 << MK)
#define NQ 10
#define EPSILON 1.0e-8
#define A 1220703125.0
#define S 271828183.0
#define TIMERS_ENABLED FALSE
/* global variables */
/* common /storage/ */
static double x[2*NK];
#pragma omp threadprivate(x)
static double q[NQ];
/*--------------------------------------------------------------------
program EMBAR
c-------------------------------------------------------------------*/
/*
c This is the serial version of the APP Benchmark 1,
c the "embarassingly parallel" benchmark.
c
c M is the Log_2 of the number of complex pairs of uniform (0, 1) random
c numbers. MK is the Log_2 of the size of each batch of uniform random
c numbers. MK can be set for convenience on a given system, since it does
c not affect the results.
*/
static int program_EP(char *buf, void* priv);
int program_EP_profile(char *_, void *__);
static struct shell_cmd_impl nas_ep_impl = {
.cmd = "nas-ep",
.help_str = "NAS parallel benchmark EP",
.handler = program_EP_profile,
};
nk_register_shell_cmd(nas_ep_impl);
int program_EP_profile(char *_, void *__){
#ifdef NAUT_CONFIG_PROFILE
nk_instrument_clear();
nk_instrument_start();
#endif
program_EP(_,__);
#ifdef NAUT_CONFIG_PROFILE
nk_instrument_end();
nk_instrument_query();
#endif
return 0;
}
int program_EP(char *buf, void* priv) {
double Mops, t1, t2, t3, t4, x1, x2, sx, sy, tm, an, tt, gc;
double dum[3] = { 1.0, 1.0, 1.0 };
int np, ierr, node, no_nodes, i, ik, kk, l, k, nit, ierrcode,
no_large_nodes, np_add, k_offset, j;
int nthreads = 1;
boolean verified;
char size[13+1]; /* character*13 */
/*
c Because the size of the problem is too large to store in a 32-bit
c integer for some classes, we put it into a string (for printing).
c Have to strip off the decimal point put in there by the floating
c point print statement (internal file)
*/
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - EP Benchmark\n");
sprintf(size, "%12.0f", pow(2.0, M+1));
for (j = 13; j >= 1; j--) {
if (size[j] == '.') size[j] = ' ';
}
printf(" Number of random numbers generated: %13s\n", size);
verified = FALSE;
/*
c Compute the number of "batches" of random number pairs generated
c per processor. Adjust if the number of processors does not evenly
c divide the total number
*/
np = NN;
/*
c Call the random number generator functions and initialize
c the x-array to reduce the effects of paging on the timings.
c Also, call all mathematical functions that are used. Make
c sure these initializations cannot be eliminated as dead code.
*/
vranlc(0, &(dum[0]), dum[1], &(dum[2]));
dum[0] = randlc(&(dum[1]), dum[2]);
#pragma omp parallel for default(shared) private(i)
for (i = 0; i < 2*NK; i++) x[i] = -1.0e99;
Mops = log(sqrt(fabs(max(1.0, 1.0))));
timer_clear(1);
timer_clear(2);
timer_clear(3);
timer_start(1);
vranlc(0, &t1, A, x);
/* Compute AN = A ^ (2 * NK) (mod 2^46). */
t1 = A;
for ( i = 1; i <= MK+1; i++) {
t2 = randlc(&t1, t1);
}
an = t1;
tt = S;
gc = 0.0;
sx = 0.0;
sy = 0.0;
for ( i = 0; i <= NQ - 1; i++) {
q[i] = 0.0;
}
/*
c Each instance of this loop may be performed independently. We compute
c the k offsets separately to take into account the fact that some nodes
c have more numbers to generate than others
*/
k_offset = -1;
#pragma omp parallel copyin(x)
{
double t1, t2, t3, t4, x1, x2;
int kk, i, ik, l;
double qq[NQ]; /* private copy of q[0:NQ-1] */
for (i = 0; i < NQ; i++) qq[i] = 0.0;
#pragma omp for reduction(+:sx,sy) schedule(static)
for (k = 1; k <= np; k++) {
kk = k_offset + k;
t1 = S;
t2 = an;
/* Find starting seed t1 for this kk. */
for (i = 1; i <= 100; i++) {
ik = kk / 2;
if (2 * ik != kk) t3 = randlc(&t1, t2);
if (ik == 0) break;
t3 = randlc(&t2, t2);
kk = ik;
}
/* Compute uniform pseudorandom numbers. */
if (TIMERS_ENABLED == TRUE) timer_start(3);
vranlc(2*NK, &t1, A, x-1);
if (TIMERS_ENABLED == TRUE) timer_stop(3);
/*
c Compute Gaussian deviates by acceptance-rejection method and
c tally counts in concentric square annuli. This loop is not
c vectorizable.
*/
if (TIMERS_ENABLED == TRUE) timer_start(2);
for ( i = 0; i < NK; i++) {
x1 = 2.0 * x[2*i] - 1.0;
x2 = 2.0 * x[2*i+1] - 1.0;
t1 = pow2(x1) + pow2(x2);
if (t1 <= 1.0) {
t2 = sqrt(-2.0 * log(t1) / t1);
t3 = (x1 * t2); /* Xi */
t4 = (x2 * t2); /* Yi */
l = max(fabs(t3), fabs(t4));
qq[l] += 1.0; /* counts */
sx = sx + t3; /* sum of Xi */
sy = sy + t4; /* sum of Yi */
}
}
if (TIMERS_ENABLED == TRUE) timer_stop(2);
}
#pragma omp critical(ep)
{
for (i = 0; i <= NQ - 1; i++) q[i] += qq[i];
}
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end of parallel region */
for (i = 0; i <= NQ-1; i++) {
gc = gc + q[i];
}
timer_stop(1);
tm = timer_read(1);
nit = 0;
if (M == 24) {
if((fabs((sx- (-3.247834652034740e3))/sx) <= EPSILON) &&
(fabs((sy- (-6.958407078382297e3))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 25) {
if ((fabs((sx- (-2.863319731645753e3))/sx) <= EPSILON) &&
(fabs((sy- (-6.320053679109499e3))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 28) {
if ((fabs((sx- (-4.295875165629892e3))/sx) <= EPSILON) &&
(fabs((sy- (-1.580732573678431e4))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 30) {
if ((fabs((sx- (4.033815542441498e4))/sx) <= EPSILON) &&
(fabs((sy- (-2.660669192809235e4))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 32) {
if ((fabs((sx- (4.764367927995374e4))/sx) <= EPSILON) &&
(fabs((sy- (-8.084072988043731e4))/sy) <= EPSILON)) {
verified = TRUE;
}
}
Mops = pow(2.0, M+1)/tm/1000000.0;
printf("EP Benchmark Results: \n"
"CPU Time = %10.4f\n"
"N = 2^%5d\n"
"No. Gaussian Pairs = %15.0f\n"
"Sums = %25.15e %25.15e\n"
"Counts:\n",
tm, M, gc, sx, sy);
for (i = 0; i <= NQ-1; i++) {
printf("%3d %15.0f\n", i, q[i]);
}
c_print_results("EP", CLASS, M+1, 0, 0, nit, nthreads,
tm, Mops,
"Random numbers generated",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
if (TIMERS_ENABLED == TRUE) {
printf("Total time: %f", timer_read(1));
printf("Gaussian pairs: %f", timer_read(2));
printf("Random numbers: %f", timer_read(3));
}
}
|
GB_binop__islt_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__islt_int8
// A.*B function (eWiseMult): GB_AemultB__islt_int8
// A*D function (colscale): GB_AxD__islt_int8
// D*A function (rowscale): GB_DxB__islt_int8
// C+=B function (dense accum): GB_Cdense_accumB__islt_int8
// C+=b function (dense accum): GB_Cdense_accumb__islt_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__islt_int8
// C=scalar+B GB_bind1st__islt_int8
// C=scalar+B' GB_bind1st_tran__islt_int8
// C=A+scalar GB_bind2nd__islt_int8
// C=A'+scalar GB_bind2nd_tran__islt_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_INT8 || GxB_NO_ISLT_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__islt_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__islt_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__islt_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__islt_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__islt_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__islt_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__islt_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__islt_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__islt_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB_bind1st_tran__islt_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB_bind2nd_tran__islt_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
intruder-locks.c | /* =============================================================================
*
* intruder.c
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include "decoder.h"
#include "detector.h"
#include "dictionary.h"
#include "packet.h"
#include "stream.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
enum param_types {
PARAM_ATTACK = (unsigned char)'a',
PARAM_LENGTH = (unsigned char)'l',
PARAM_NUM = (unsigned char)'n',
PARAM_SEED = (unsigned char)'s',
PARAM_THREAD = (unsigned char)'t',
};
enum param_defaults {
PARAM_DEFAULT_ATTACK = 10,
PARAM_DEFAULT_LENGTH = 16,
PARAM_DEFAULT_NUM = 1 << 20,
PARAM_DEFAULT_SEED = 1,
PARAM_DEFAULT_THREAD = 1,
};
long global_params[256] = { /* 256 = ascii limit */
[PARAM_ATTACK] = PARAM_DEFAULT_ATTACK,
[PARAM_LENGTH] = PARAM_DEFAULT_LENGTH,
[PARAM_NUM] = PARAM_DEFAULT_NUM,
[PARAM_SEED] = PARAM_DEFAULT_SEED,
[PARAM_THREAD] = PARAM_DEFAULT_THREAD,
};
typedef struct arg {
/* input: */
stream_t* streamPtr;
decoder_t* decoderPtr;
/* output: */
vector_t** errorVectors;
} arg_t;
/* =============================================================================
* displayUsage
* =============================================================================
*/
static void
displayUsage (const char* appName)
{
printf("Usage: %s [options]\n", appName);
puts("\nOptions: (defaults)\n");
printf(" a <UINT> Percent [a]ttack (%i)\n", PARAM_DEFAULT_ATTACK);
printf(" l <UINT> Max data [l]ength (%i)\n", PARAM_DEFAULT_LENGTH);
printf(" n <UINT> [n]umber of flows (%i)\n", PARAM_DEFAULT_NUM);
printf(" s <UINT> Random [s]eed (%i)\n", PARAM_DEFAULT_SEED);
printf(" t <UINT> Number of [t]hreads (%i)\n", PARAM_DEFAULT_THREAD);
exit(1);
}
/* =============================================================================
* parseArgs
* =============================================================================
*/
static void
parseArgs (long argc, char* const argv[])
{
long i;
long opt;
opterr = 0;
while ((opt = getopt(argc, argv, "a:l:n:s:t:")) != -1) {
switch (opt) {
case 'a':
case 'l':
case 'n':
case 's':
case 't':
global_params[(unsigned char)opt] = atol(optarg);
break;
case '?':
default:
opterr++;
break;
}
}
for (i = optind; i < argc; i++) {
fprintf(stderr, "Non-option argument: %s\n", argv[i]);
opterr++;
}
if (opterr) {
displayUsage(argv[0]);
}
}
/* =============================================================================
* processPackets
* =============================================================================
*/
void
processPackets (void* argPtr)
{
TM_THREAD_ENTER();
long threadId = thread_getId();
stream_t* streamPtr = ((arg_t*)argPtr)->streamPtr;
decoder_t* decoderPtr = ((arg_t*)argPtr)->decoderPtr;
vector_t** errorVectors = ((arg_t*)argPtr)->errorVectors;
detector_t* detectorPtr = PDETECTOR_ALLOC();
assert(detectorPtr);
PDETECTOR_ADDPREPROCESSOR(detectorPtr, &preprocessor_toLower);
vector_t* errorVectorPtr = errorVectors[threadId];
while (1) {
char* bytes;
unsigned int locks[1];
TM_BEGIN();
SINGLE_LOCK(streamPtr);
bytes = TMSTREAM_GETPACKET(streamPtr);
SINGLE_UNLOCK(streamPtr);
TM_END();
if (!bytes) {
break;
}
packet_t* packetPtr = (packet_t*)bytes;
long flowId = packetPtr->flowId;
error_t error;
TM_BEGIN();
error = TMDECODER_PROCESS(decoderPtr,
bytes,
(PACKET_HEADER_LENGTH + packetPtr->length));
TM_END();
if (error) {
/*
* Currently, stream_generate() does not create these errors.
*/
assert(0);
bool_t status = PVECTOR_PUSHBACK(errorVectorPtr, (void*)flowId);
assert(status);
}
char* data;
long decodedFlowId;
TM_BEGIN();
SINGLE_LOCK(decoderPtr);
data = TMDECODER_GETCOMPLETE(decoderPtr, &decodedFlowId);
SINGLE_UNLOCK(decoderPtr);
TM_END();
if (data) {
error_t error = PDETECTOR_PROCESS(detectorPtr, data);
P_FREE(data);
if (error) {
bool_t status = PVECTOR_PUSHBACK(errorVectorPtr,
(void*)decodedFlowId);
assert(status);
}
}
}
PDETECTOR_FREE(detectorPtr);
TM_THREAD_EXIT();
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN(argc, argv)
{
GOTO_REAL();
/*
* Initialization
*/
parseArgs(argc, (char** const)argv);
long numThread = global_params[PARAM_THREAD];
SIM_GET_NUM_CPU(numThread);
TM_STARTUP(numThread);
P_MEMORY_STARTUP(numThread);
thread_startup(numThread);
long percentAttack = global_params[PARAM_ATTACK];
long maxDataLength = global_params[PARAM_LENGTH];
long numFlow = global_params[PARAM_NUM];
long randomSeed = global_params[PARAM_SEED];
printf("Percent attack = %li\n", percentAttack);
printf("Max data length = %li\n", maxDataLength);
printf("Num flow = %li\n", numFlow);
printf("Random seed = %li\n", randomSeed);
dictionary_t* dictionaryPtr = dictionary_alloc();
assert(dictionaryPtr);
stream_t* streamPtr = stream_alloc(percentAttack);
assert(streamPtr);
long numAttack = stream_generate(streamPtr,
dictionaryPtr,
numFlow,
randomSeed,
maxDataLength);
printf("Num attack = %li\n", numAttack);
decoder_t* decoderPtr = decoder_alloc();
assert(decoderPtr);
vector_t** errorVectors = (vector_t**)malloc(numThread * sizeof(vector_t*));
assert(errorVectors);
long i;
for (i = 0; i < numThread; i++) {
vector_t* errorVectorPtr = vector_alloc(numFlow);
assert(errorVectorPtr);
errorVectors[i] = errorVectorPtr;
}
arg_t arg;
arg.streamPtr = streamPtr;
arg.decoderPtr = decoderPtr;
arg.errorVectors = errorVectors;
/*
* Run transactions
*/
TIMER_T startTime;
TIMER_READ(startTime);
GOTO_SIM();
#ifdef OTM
#pragma omp parallel
{
processPackets((void*)&arg);
}
#else
thread_start(processPackets, (void*)&arg);
#endif
GOTO_REAL();
TIMER_T stopTime;
TIMER_READ(stopTime);
printf("\nTime = %lf\n", TIMER_DIFF_SECONDS(startTime, stopTime));
/*
* Check solution
*/
/*long numFound = 0;
for (i = 0; i < numThread; i++) {
vector_t* errorVectorPtr = errorVectors[i];
long e;
long numError = vector_getSize(errorVectorPtr);
numFound += numError;
for (e = 0; e < numError; e++) {
long flowId = (long)vector_at(errorVectorPtr, e);
bool_t status = stream_isAttack(streamPtr, flowId);
assert(status);
}
}*/
/*printf("Num found = %li\n", numFound);
assert(numFound == numAttack);*/
/*
* Clean up
*/
for (i = 0; i < numThread; i++) {
vector_free(errorVectors[i]);
}
free(errorVectors);
decoder_free(decoderPtr);
stream_free(streamPtr);
dictionary_free(dictionaryPtr);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
GOTO_SIM();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of intruder.c
*
* =============================================================================
*/
|
for_loop.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#include "callback.h"
#include <omp.h>
int main()
{
int y[] = {0,1,2,3};
#pragma omp parallel num_threads(2)
{
//implicit barrier at end of for loop
int i;
#pragma omp for
for (i = 0; i < 4; i++)
{
y[i]++;
}
print_current_address();
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// master thread implicit barrier at loop end
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// master thread implicit barrier at parallel end
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// worker thread explicit barrier
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}
// worker thread implicit barrier after parallel
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
return 0;
}
|
dnn.c | //------------------------------------------------------------------------------
// LAGraph/Test/DNN/dnn: run all neural networks from http://graphchallenge.org
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2019 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph/Test/DNN/dnn: test for LAGraph_dnn. Contributed by Tim Davis,
// Texas A&M University.
// Usage: ./build/dnn nproblems
// nproblems is the # of test problems to solve. If not present, it defaults
// to 12 (run all 12 DNN's). The problems are solved in order from small to
// big. The Makefile just runs the first and smallest problem.
// NOTE: this test currently uses many GxB_* extensions in
// SuiteSparse:GraphBLAS. It optionally uses OpenMP.
#define LAGRAPH_EXPERIMENTAL_ASK_BEFORE_BENCHMARKING
#include <LAGraph.h>
#define LAGRAPH_FREE_ALL ;
int main (int argc, char **argv)
{
//--------------------------------------------------------------------------
// start LAGraph and GraphBLAS
//--------------------------------------------------------------------------
GrB_Info info ;
LAGRAPH_OK (LAGraph_init ( )) ;
//--------------------------------------------------------------------------
// problem size definitions
//--------------------------------------------------------------------------
// The 12 problems and their sizes are hard-coded below.
// It would be better to define these from the input files, but the problem
// data files are not formatted in a way that makes this easy to do. A
// Matrix Market file format would be better (which can specify the type
// and size of each matrix), with the additional of a problem specification
// file that defines each of the 12 problems to solve.
// Each problem is defined by a set of files in the DNN_DATA directory,
// which can be obtained from http://graphchallenge.org . The simplest way
// to redefine the location of the data files is to make ./dnn_data a
// symbolic link, and leave DNN_DATA unchanged. The .gitignore file will
// prevent dnn_data from syncing to github, so you could also simply change
// ./dnn_data to a true directory and place all files there. Or, change
// the DNN_DATA macro to point to your data files.
#define DNN_DATA "./dnn_data"
// Each of the 12 problems is defined by the # of neurons at each layer, N
// = (1024, 4096, 16384, 65536), and the # of layers, L = (120, 480, or
// 1920). Each problem has the same number of features (F = 60000). The
// input files for a given problem (N,L) are as follows:
// Input feature vectors: an F-by-N sparse matrix
// ./dnn_data/MNIST/sparse-images-(N).tsv
// Neural network layers, for i = 1 to L, each an N-by-N sparse matrix:
// ./dnn_data/DNN/neuron(N)/n(N)-l(i).tsv
// True categories, a list of integers, one per line:
// ./dnn_data/DNN/neuron(N)-l(L)-categories.tsv
// The Bias vectors are defined with the single scalar, neuralNetBias[ ],
// with one scalar for each value of N. This scalar is used to construct
// the diagonal Bias matrices for each layer. All the layers share the
// same matrix, but they are treated as different matrices here. In a more
// general problem, the Bias matrices would differ for each layer and
// perhaps for each neuron. As a result, this test is not permitted to
// exploit the fact that all neurons are biased the same way.
// Note that for a given number of neurons, N, each of the 3 problems for
// different layers shares the same weight matrices for the first layers.
// That is, the first 120 layers of the (1024,480) problem are the same as
// the 120 layers of the (1024,120) problem. This is not exploited in
// LAGraph_dnn, but it is exploited here, simply to reduce the time to load
// the problems.
int len = 1024 ;
char filename [len] ;
#define NMAXLAYERS 3
int maxLayers [NMAXLAYERS] = { 120, 480, 1920 } ;
// #define NMAXNEURONS 1
// int Nneurons [NMAXNEURONS] = { 65536 } ;
// double neuralNetBias [NMAXNEURONS] = { -0.45 } ;
#define NMAXNEURONS 4
int Nneurons [NMAXNEURONS] = { 1024, 4096, 16384, 65536 } ;
double neuralNetBias [NMAXNEURONS] = { -0.3, -0.35, -0.4, -0.45 } ;
int nfeatures = 60000 ;
GrB_Matrix Y0 = NULL, Y = NULL, W [65536], Bias [65536] ;
GrB_Vector TrueCategories = NULL, Categories = NULL, C = NULL ;
for (int layer = 0 ; layer < 65536 ; layer++)
{
W [layer] = NULL ;
Bias [layer] = NULL ;
}
#undef LAGRAPH_FREE_ALL
#define LAGRAPH_FREE_ALL \
{ \
GrB_free (&TrueCategories) ; \
GrB_free (&Categories) ; \
GrB_free (&C) ; \
GrB_free (&Y) ; \
GrB_free (&Y0) ; \
for (int layer = 0 ; layer < 65536 ; layer++) \
{ \
GrB_free (& (W [layer])) ; \
GrB_free (& (Bias [layer])) ; \
} \
}
// select the type. GrB_FP32 is faster and passes all the tests.
// GrB_Type type = GrB_FP64 ;
GrB_Type type = GrB_FP32 ;
printf ("type: ") ;
if (type == GrB_FP64) printf ("double\n") ; else printf ("float\n") ;
// get the max # of threads that can be used
int nthreads_max = LAGraph_get_nthreads ( ) ;
printf ("max # of nthreads: %d\n", nthreads_max) ;
#define NNTHREADS 12
int nthreads_list [NNTHREADS] =
{ 1, 2, 4, 8, 16, 20, 32, 40, 64, 128, 160, 256 } ;
// #define NNTHREADS 1
// int nthreads_list [NNTHREADS] = { 40 } ;
// determine the # of problems to solve
int nproblems = NMAXNEURONS * NMAXLAYERS ;
if (argc > 1)
{
sscanf (argv [1], "%d", &nproblems) ;
}
printf ("# of problems to solve: %d\n", nproblems) ;
int problem = 0 ;
//--------------------------------------------------------------------------
// run all problems
//--------------------------------------------------------------------------
for (int kn = 0 ; kn < NMAXNEURONS ; kn++)
{
//----------------------------------------------------------------------
// check if this problem is to be solved
//----------------------------------------------------------------------
if (problem > nproblems) continue ;
//----------------------------------------------------------------------
// get the number of nneurons and neural bias
//----------------------------------------------------------------------
double tic [2] ;
LAGraph_tic (tic) ;
int nneurons = Nneurons [kn] ;
double b = neuralNetBias [kn] ;
printf ("\n# neurons: %d bias: %g\n", nneurons, b) ;
//----------------------------------------------------------------------
// read in the initial feature vectors
//----------------------------------------------------------------------
sprintf (filename, "%s/MNIST/sparse-images-%d.tsv", DNN_DATA, nneurons);
FILE *f = fopen (filename, "r") ;
if (!f) { printf ("cannot open %s\n", filename) ; abort ( ) ; }
LAGRAPH_OK (LAGraph_tsvread (&Y0, f, type, nfeatures, nneurons)) ;
fclose (f) ;
double t = LAGraph_toc (tic) ;
printf ("# features: %" PRIu64 " read time: %g sec\n", nfeatures, t) ;
GrB_Index nvals ;
LAGRAPH_OK (GrB_Matrix_nvals (&nvals, Y0)) ;
printf ("# entries in Y0: %g million\n", (double) nvals / 1e6) ;
fflush (stdout) ;
//----------------------------------------------------------------------
// run each problem size (for all #'s of layers)
//----------------------------------------------------------------------
for (int kl = 0 ; kl < NMAXLAYERS ; kl++)
{
//------------------------------------------------------------------
// check if this problem is to be solved
//------------------------------------------------------------------
problem++ ;
if (problem > nproblems) continue ;
//------------------------------------------------------------------
// get the number of layers in this neural net
//------------------------------------------------------------------
int nlayers = maxLayers [kl] ;
printf ("\n--------------------------------------"
"neurons per layer: %d layers: %d\n", nneurons, nlayers) ;
//------------------------------------------------------------------
// read in the layers in parallel
//------------------------------------------------------------------
LAGraph_tic (tic) ;
int first_layer = (kl == 0) ? 0 : maxLayers [kl-1] ;
bool ok = true ;
// assume the I/O system can handle 2-way parallelism
#pragma omp parallel for schedule(dynamic,1) reduction(&&:ok) \
num_threads (2)
for (int layer = first_layer ; layer < nlayers ; layer++)
{
// read the neuron layer: W [layer]
char my_filename [1024] ;
sprintf (my_filename, "%s/DNN/neuron%d/n%d-l%d.tsv", DNN_DATA,
nneurons, nneurons, layer+1) ;
FILE *my_file = fopen (my_filename, "r") ;
bool my_ok = true ;
if (!my_file)
{
printf ("cannot open %s\n", my_filename) ;
my_ok = false ;
continue ;
}
GrB_Info my_info = LAGraph_tsvread (&(W [layer]), my_file,
type, nneurons, nneurons) ;
fclose (my_file) ;
my_ok = my_ok && (my_info == GrB_SUCCESS) ;
// construct the bias matrix: Bias [layer]. Note that all Bias
// matrices are the same for all layers, and all diagonal
// entries are also the same, but this test must not exploit
// that fact.
my_info = GrB_Matrix_new (&(Bias [layer]), type,
nneurons, nneurons) ;
my_ok = my_ok && (my_info == GrB_SUCCESS) ;
for (int i = 0 ; i < nneurons ; i++)
{
my_info = GrB_Matrix_setElement (Bias [layer], b, i, i) ;
my_ok = my_ok && (my_info == GrB_SUCCESS) ;
}
GrB_Index ignore ;
my_info = GrB_Matrix_nvals (&ignore, Bias [layer]) ;
my_ok = my_ok && (my_info == GrB_SUCCESS) ;
ok = ok && my_ok ;
}
if (!ok)
{
printf ("neural read failure\n") ;
abort ( ) ;
}
t = LAGraph_toc (tic) ;
printf ("read net time %g sec\n", t) ;
double nedges = 0 ;
for (int layer = 0 ; layer < nlayers ; layer++)
{
GrB_Index nvals ;
LAGRAPH_OK (GrB_Matrix_nvals (&nvals, W [layer])) ;
nedges += nvals ;
}
printf ("# edges in all layers: %g million\n\n",
(double) nedges / 1e6) ;
fflush (stdout) ;
// read TrueCategories as a boolean nfeatures-by-1 vector
LAGRAPH_OK (GrB_Vector_new (&TrueCategories, GrB_BOOL,
nfeatures)) ;
sprintf (filename, "%s/DNN/neuron%d-l%d-categories.tsv", DNN_DATA,
nneurons, nlayers) ;
f = fopen (filename, "r") ;
bool check_result = (f != NULL) ;
if (check_result)
{
while (1)
{
int category ;
if (fscanf (f, "%d\n", &category) == EOF) break ;
LAGRAPH_OK (GrB_Vector_setElement (TrueCategories,
(bool) true, category-1)) ;
}
fclose (f) ;
}
else
{
printf ("cannot open %s\n", filename) ;
}
//------------------------------------------------------------------
// solve the problem with 1, 2, 4, ..., nthreads_max threads
//------------------------------------------------------------------
double t1 = 0, tcheck = 0 ;
GrB_Index final_ynvals ;
for (int kth = 0 ; kth < NNTHREADS ; kth++)
{
//--------------------------------------------------------------
// set the # of threads to use
//--------------------------------------------------------------
int nthreads = nthreads_list [kth] ;
if (nthreads > nthreads_max) break ;
LAGraph_set_nthreads (nthreads) ;
printf ("nthreads %3d: ", nthreads) ;
fflush (stdout) ;
//--------------------------------------------------------------
// solve the problem
//--------------------------------------------------------------
LAGraph_tic (tic) ;
LAGRAPH_OK (LAGraph_dnn (&Y, W, Bias, nlayers, Y0)) ;
t = LAGraph_toc (tic) ;
printf ("soln time %12.2f sec", t) ;
if (nthreads == 1)
{
t1 = t ;
printf (" ") ;
}
else
{
printf (" speedup %8.2f", t1/t) ;
}
double rate = ((double) nfeatures) * ((double) nedges) / t ;
printf (" rate %10.4f (1e9 edges/sec) ", rate / 1e9) ;
//--------------------------------------------------------------
// check the result
//--------------------------------------------------------------
// this is so fast, it's hardly worth timing ...
LAGraph_tic (tic) ;
LAGRAPH_OK (GrB_Matrix_nvals (&final_ynvals, Y)) ;
// C = sum (Y)
LAGRAPH_OK (GrB_Vector_new (&C, type, nfeatures)) ;
LAGRAPH_OK (GrB_reduce (C, NULL, NULL, GrB_PLUS_FP64, Y, NULL));
// Categories = pattern of C
LAGRAPH_OK (GrB_Vector_new (&Categories, GrB_BOOL, nfeatures)) ;
LAGRAPH_OK (GrB_apply (Categories, NULL, NULL, GxB_ONE_BOOL,
C, NULL)) ;
// write out Categories, as a 1-based file
/*
sprintf (filename, "my_neuron%d-l%d-categories_threads%d.tsv",
nneurons, nlayers, nthreads) ;
FILE *ff = fopen (filename, "w") ;
for (int i = 0 ; i < nfeatures ; i++)
{
bool c = false ;
LAGRAPH_OK (GrB_Vector_extractElement (&c, Categories, i)) ;
if (c) fprintf (ff, "%d\n", i + 1) ;
}
fclose (ff) ;
*/
if (check_result)
{
// check if Categories and TrueCategories are the same
bool isequal ;
LAGRAPH_OK (LAGraph_Vector_isequal (&isequal,
TrueCategories, Categories, NULL)) ;
if (!isequal)
{
// GxB_print (TrueCategories, 3) ;
// GxB_print (Categories, 3) ;
printf ("test failure!\n") ;
// LAGRAPH_FREE_ALL ;
// abort ( ) ;
}
}
printf ("\n") ;
GrB_free (&Categories) ;
GrB_free (&C) ;
GrB_free (&Y) ;
tcheck = LAGraph_toc (tic) ;
}
printf ("\n# entries in final Y: %g million\n",
(double) final_ynvals / 1e6) ;
printf ("check time: %g sec\n", tcheck) ;
LAGraph_set_nthreads (nthreads_max) ;
}
//----------------------------------------------------------------------
// free the problem
//----------------------------------------------------------------------
LAGRAPH_FREE_ALL ;
}
//--------------------------------------------------------------------------
// finalize LAGraph and GraphBLAS
//--------------------------------------------------------------------------
LAGRAPH_OK (LAGraph_finalize ( )) ;
printf ("all tests passed\n") ;
return (GrB_SUCCESS) ;
}
|
rose_loop.c | #include <stdio.h>
#define N 100
#include "libxomp.h"
struct OUT__1__9384___data
{
float (*x_p)[100];
float (*y_p)[100];
void *a_p;
}
;
static void OUT__1__9384__(void *__out_argv);
int main(argc,argv)
int argc;
char **argv;
{
int status = 0;
XOMP_init(argc,argv);
float x[100];
float y[100];
float a = 2.0;
// initialize
for (int i = 0; i < 100; i++) {
x[i] = i;
y[i] = 0;
}
struct OUT__1__9384___data __out_argv1__9384__;
__out_argv1__9384__ . a_p = ((void *)(&a));
__out_argv1__9384__ . y_p = &y;
__out_argv1__9384__ . x_p = &x;
XOMP_parallel_start(OUT__1__9384__,&__out_argv1__9384__,1,0,"/home/awang15/Projects/rexdev/rex_src/tests/nonsmoke/functional/CompileTests/OpenMP_tests/loop.c",10);
XOMP_parallel_end("/home/awang15/Projects/rexdev/rex_src/tests/nonsmoke/functional/CompileTests/OpenMP_tests/loop.c",14);
if (y[100 - 1] != (100 - 1) * 2.0)
printf("Error: 2*(N-1) != y[N-1]=%f",y[100 - 1]);
XOMP_terminate(status);
}
static void OUT__1__9384__(void *__out_argv)
{
float (*x)[100] = (float (*)[100])(((struct OUT__1__9384___data *)__out_argv) -> x_p);
float (*y)[100] = (float (*)[100])(((struct OUT__1__9384___data *)__out_argv) -> y_p);
float *a = (float *)(((struct OUT__1__9384___data *)__out_argv) -> a_p);
#pragma omp loop bind(parallel)
for (int i = 0; i < 100; ++i)
( *y)[i] = *a * ( *x)[i] + ( *y)[i];
}
|
dropout-inl.h | /*!
* Copyright (c) 2015 by Contributors
* \file dropout-inl.h
* \brief
* \author Bing Xu
*/
#ifndef MXNET_OPERATOR_DROPOUT_INL_H_
#define MXNET_OPERATOR_DROPOUT_INL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <algorithm>
#include "./operator_common.h"
#include "./mshadow_op.h"
#if defined(USE_MKL) && defined(_OPENMP)
#include <omp.h>
#include <mkl_vml_functions.h>
#include <mkl_vsl.h>
#endif // USE_MKL && _OPENMP
namespace dropout {
enum DropoutOpInputs {kData};
enum DropoutOpOutputs {kOut, kMask};
enum DropoutOpForwardResource {kRandom};
} // namespace dropout
namespace mxnet {
namespace op {
#if defined(USE_MKL) && defined(_OPENMP)
static void bernoulli_generate(int n, double p, int* r) {
int seed = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
int nthr = omp_get_max_threads();
# pragma omp parallel num_threads(nthr)
{
const int ithr = omp_get_thread_num();
const int avg_amount = (n + nthr - 1) / nthr;
const int my_offset = ithr * avg_amount;
const int my_amount = std::min(my_offset + avg_amount, n) - my_offset;
if (my_amount > 0) {
VSLStreamStatePtr stream;
vslNewStream(&stream, VSL_BRNG_MCG31, seed);
vslSkipAheadStream(stream, my_offset);
viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount,
r + my_offset, p);
vslDeleteStream(&stream);
}
}
}
#endif // USE_MKL && _OPENMP
struct DropoutParam : public dmlc::Parameter<DropoutParam> {
float p;
DMLC_DECLARE_PARAMETER(DropoutParam) {
DMLC_DECLARE_FIELD(p).set_default(0.5)
.set_range(0, 1)
.describe("Fraction of the input that gets dropped out during training time.");
}
}; // struct DropoutParam
template<typename xpu, typename DType>
class DropoutOp : public Operator {
public:
explicit DropoutOp(DropoutParam param) {
this->pkeep_ = 1.0f - param.p;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_data.size(), 1U);
if (ctx.is_train) {
CHECK_EQ(out_data.size(), 2U);
}
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s);
if (ctx.is_train) {
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
#if !defined(__CUDACC__) && defined(USE_MKL) && defined(_OPENMP)
DType* outptr = out.dptr_;
DType* dataptr = data.dptr_;
int* maskptr = reinterpret_cast<int*>(mask.dptr_);
int count = mask.shape_[0]*mask.shape_[1];
bernoulli_generate(count, this->pkeep_, maskptr);
#pragma omp parallel for
for (int i = 0; i < count; ++i) {
outptr[i] = dataptr[i] * maskptr[i];
}
#else
Random<xpu> *prnd = ctx.requested[dropout::kRandom].get_random<xpu, real_t>(s);
mask = tcast<DType>(F<mshadow_op::threshold>(
prnd->uniform(mask.shape_), pkeep_) * (1.0f / pkeep_));
Assign(out, req[dropout::kOut], data * mask);
#endif // USE_MKL && _OPENMP
} else {
Assign(out, req[dropout::kOut], F<mshadow_op::identity>(data));
}
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(out_grad.size(), 1U);
CHECK_EQ(in_grad.size(), 1U);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s);
Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s);
#if !defined(__CUDACC__) && defined(USE_MKL) && defined(_OPENMP)
DType* ingradptr = gdata.dptr_;
DType* outgradptr = grad.dptr_;
int* maskptr = reinterpret_cast<int*>(mask.dptr_);
int count = mask.shape_[0]*mask.shape_[1];
#pragma omp parallel for
for (int i = 0; i < count; ++i) {
ingradptr[i] = outgradptr[i] * maskptr[i];
}
#else // USE_MKL && _OPENMP
Assign(gdata, req[dropout::kData], grad * mask);
#endif // USE_MKL && _OPENMP
}
private:
real_t pkeep_;
}; // class DropoutOp
template<typename xpu>
Operator *CreateOp(DropoutParam param, int dtype);
#if DMLC_USE_CXX11
class DropoutProp : public OperatorProperty {
public:
void Init(const std::vector<std::pair<std::string, std::string> >& kwargs) override {
param_.Init(kwargs);
}
std::map<std::string, std::string> GetParams() const override {
return param_.__DICT__();
}
bool InferShape(std::vector<TShape> *in_shape,
std::vector<TShape> *out_shape,
std::vector<TShape> *aux_shape) const override {
using namespace mshadow;
CHECK_EQ(in_shape->size(), 1U);
const TShape &dshape = in_shape->at(0);
if (dshape.ndim() == 0) return false;
out_shape->clear();
out_shape->push_back(dshape);
out_shape->push_back(dshape);
return true;
}
bool InferType(std::vector<int> *in_type,
std::vector<int> *out_type,
std::vector<int> *aux_type) const override {
CHECK_EQ(in_type->size(), 1U);
int dtype = in_type->at(0);
if (dtype == -1) {
LOG(FATAL) << "input type to dropout is not specified.";
return false;
}
size_t nout = this->ListOutputs().size();
out_type->clear();
for (size_t i = 0; i < nout; ++i) out_type->push_back(dtype);
return true;
}
OperatorProperty* Copy() const override {
auto ptr = new DropoutProp();
ptr->param_ = param_;
return ptr;
}
std::string TypeString() const override {
return "Dropout";
}
std::vector<int> DeclareBackwardDependency(
const std::vector<int> &out_grad,
const std::vector<int> &in_data,
const std::vector<int> &out_data) const override {
return {out_grad[dropout::kOut], out_data[dropout::kMask]};
}
std::vector<std::pair<int, void*> > BackwardInplaceOption(
const std::vector<int> &out_grad,
const std::vector<int> &in_data,
const std::vector<int> &out_data,
const std::vector<void*> &in_grad) const override {
return {{out_grad[dropout::kOut], in_grad[dropout::kData]}};
}
std::vector<std::pair<int, void*> > ForwardInplaceOption(
const std::vector<int> &in_data,
const std::vector<void*> &out_data) const override {
return {{in_data[dropout::kData], out_data[dropout::kOut]}};
}
std::vector<ResourceRequest> ForwardResource(
const std::vector<TShape> &in_shape) const override {
return {ResourceRequest::kRandom};
}
int NumVisibleOutputs() const override {
return 1;
}
int NumOutputs() const override {
return 2;
}
std::vector<std::string> ListOutputs() const override {
return {"output", "mask"};
}
Operator* CreateOperator(Context ctx) const override {
LOG(FATAL) << "Not Implemented";
return NULL;
}
Operator* CreateOperatorEx(Context ctx, std::vector<TShape> *in_shape,
std::vector<int> *in_type) const override;
private:
DropoutParam param_;
}; // class DropoutProp
#endif // DMLC_USE_CXX11
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_DROPOUT_INL_H_
|
mergeSortSeq.c | //#include <omp.h>
double time, timeBefore, timeAfter, timeAuxBefore, timeAuxAfter;
// left half is A[iBegin :iMiddle-1]
// right half is A[iMiddle:iEnd-1 ]
void TopDownMerge(int* A, int iBegin, int iMiddle, int iEnd, int* B) {
int i0 = iBegin, i1 = iMiddle;
// While there are elements in the left or right runs
int j;
//#pragma omp parallel for firstprivate(i0, i1)
for (j = iBegin; j < iEnd; j++) {
// If left run head exists and is <= existing right run head.
if (i0 < iMiddle && (i1 >= iEnd || A[i0] <= A[i1])) {
B[j] = A[i0];
i0 = i0 + 1;
} else {
B[j] = A[i1];
i1 = i1 + 1;
}
}
}
void CopyArray(int* B, int iBegin, int iEnd, int* A) {
int k;
//#pragma omp parallel for
for (k = iBegin; k < iEnd; k++)
A[k] = B[k];
}
// iBegin is inclusive; iEnd is exclusive (A[iEnd] is not in the set)
void TopDownSplitMerge(int* A, int iBegin, int iEnd, int* B) {
if (iEnd - iBegin < 2) // if run size == 1
return; // consider it sorted
// recursively split runs into two halves until run size == 1,
// then merge them and return back up the call chain
int iMiddle = (iEnd + iBegin) / 2; // iMiddle = mid point, it keeps the lower, doesn't round up
#pragma omp parallel sections num_threads(2)
{
// printf("Num threads inside %d \n", omp_get_num_threads());
#pragma omp section
{
TopDownSplitMerge(A, iBegin, iMiddle, B); // split / merge left
}
#pragma omp section
{
TopDownSplitMerge(A, iMiddle, iEnd, B); // split / merge right half
}
}
timeAuxBefore = omp_get_wtime(); //get the time before the Sequential part
TopDownMerge(A, iBegin, iMiddle, iEnd, B); // merge the two half runs
CopyArray(B, iBegin, iEnd, A); // copy the merged runs back to A
timeAuxAfter = omp_get_wtime(); //compute the result
}
/*void printTimeOutFile(double time){
FILE *out;
out = fopen("timeOut.out", "a");
fprintf(out, "S %f\n", time);
fclose(out);
}*/
void TopDownMergeSort(int* A, int* B, int n) {
//printf("NUM THREADS %d\n", omp_get_num_threads());
//start = -omp_get_wtime();
timeBefore = omp_get_wtime();
TopDownSplitMerge(A, 0, n, B);
timeAfter = omp_get_wtime();
//end = omp_get_wtime();
timeBefore += timeAuxAfter-timeAuxBefore;
time = timeAfter - timeBefore;
printf("%f\n", time);
//printTimeOutFile(end - start);
}
|
fixed_version.c | #include <stdio.h>
int main(){
int sum = 0;
int DATA_MAG = 100;
int H[100];
int scale_factor = 10;
#pragma omp parallel for reduction(+: sum)
for (int i =0; i < DATA_MAG;i++) {
H[i] = i;
}
int LUT[100];
for (int i = 0; i < DATA_MAG; i++)
{
sum += H[i];
LUT[i] = sum * scale_factor;
}
for (int i = 0; i < 100; i++) {
printf("%d \n",LUT[i]);
}
return 0;
}
|
UtilitiesBase.h | //
// UtilitiesBase.h
// Gauss
//
// Created by David Levin on 6/1/17.
//
// Some useful methods for dealing with aggregating data across physical systems and what not
#ifndef UtilitiesBase_h
#define UtilitiesBase_h
#include <Assembler.h>
#include <DOFParticle.h>
#include <DOFRotation.h>
#include <DOFPair.h>
#include <DOFList.h>
#include <PhysicalSystem.h>
#include <igl/boundary_facets.h>
#include <igl/writeOBJ.h>
template<typename World>
double getEnergy(World &world) {
double energy = 0.0;
forEach(world.getSystemList(), [&energy, &world](auto a) {
energy += a->getEnergy(world.getState());
});
forEach(world.getForceList(), [&energy, &world](auto a) {
energy += a->getEnergy(world.getState());
});
return energy;
}
template<typename World>
double getBodyForceEnergy(World &world) {
double energy = 0.0;
forEach(world.getSystemList(), [&energy, &world](auto a) {
energy += a->getBodyForceEnergy(world.getState());
});
return energy;
}
template<typename Matrix, typename World>
void getMassMatrix(Matrix &massMatrix, World &world) {
//get mass matrix
ASSEMBLEMATINIT(massMatrix, world.getNumQDotDOFs(), world.getNumQDotDOFs());
ASSEMBLELIST(massMatrix, world.getSystemList(), getMassMatrix);
ASSEMBLEEND(massMatrix);
}
template<typename Matrix, typename World>
void getStiffnessMatrix(Matrix &stiffnessMatrix, World &world) {
//get stiffness matrix
ASSEMBLEMATINIT(stiffnessMatrix, world.getNumQDotDOFs(), world.getNumQDotDOFs());
ASSEMBLELIST(stiffnessMatrix, world.getSystemList(), getStiffnessMatrix);
ASSEMBLELIST(stiffnessMatrix, world.getForceList(), getStiffnessMatrix);
ASSEMBLEEND(stiffnessMatrix);
}
template<typename Matrix, typename World>
void getForceVector(Matrix &forceVector, World &world) {
ASSEMBLEVECINIT(forceVector, world.getNumQDotDOFs());
ASSEMBLELIST(forceVector, world.getForceList(), getForce);
ASSEMBLELIST(forceVector, world.getSystemList(), getForce);
ASSEMBLEEND(forceVector);
}
template<typename Matrix, typename System, typename World>
void getForceVector(Matrix &forceVector, System &system, World &world) {
ASSEMBLEVECINIT(forceVector, system.getQ().getNumScalarDOF());
forceVector.setOffset(-system.getQ().getGlobalId(), 0);
system.getForce(forceVector, world.getState());
//ASSEMBLELIST(forceVector, world.getForceList(), getForce);
//ASSEMBLELIST(forceVector, world.getSystemList(), getForce);
ASSEMBLEEND(forceVector);
}
template<typename Matrix, typename System, typename World>
void getInternalForceVector(Matrix &forceVector, System &system, World &world) {
ASSEMBLEVECINIT(forceVector, system.getQ().getNumScalarDOF());
forceVector.setOffset(-system.getQ().getGlobalId(), 0);
system.getInternalForce(forceVector, world.getState());
//ASSEMBLELIST(forceVector, world.getForceList(), getForce);
//ASSEMBLELIST(forceVector, world.getSystemList(), getForce);
ASSEMBLEEND(forceVector);
}
template<typename Matrix, typename World>
void getInternalForceVector(Matrix &forceVector, World &world) {
ASSEMBLEVECINIT(forceVector, world.getNumQDotDOFs());
ASSEMBLELIST(forceVector, world.getSystemList(), getInternalForce);
ASSEMBLEEND(forceVector);
}
//get strain energy
template<typename World>
double getStrainEnergy(World &world) {
double energy = 0.0;
forEach(world.getSystemList(), [&energy, &world](auto a) {
energy += a->getStrainEnergy(world.getState());
});
return energy;
}
//add in the constraints
template<typename Matrix, typename World>
void getConstraintMatrix(Matrix &constraintMatrix, World &world) {
ASSEMBLEMATINIT(constraintMatrix, world.getNumConstraints(), world.getNumQDotDOFs());
ASSEMBLELIST(constraintMatrix, world.getConstraintList(), getGradient);
ASSEMBLEEND(constraintMatrix);
}
//given a a function templated on system type, run it on a system given a system index
struct SystemIndex {
inline SystemIndex() {
m_type = -1;
m_index = 0;
}
inline SystemIndex(unsigned int type, unsigned int index) {
m_type = type;
m_index = index;
}
inline int & type() { return m_type; }
inline int & index() { return m_index; }
inline const int & type() const { return m_type; }
inline const int & index() const { return m_index; }
int m_type; //-1 is fixed object, doesn't need collision response
int m_index; //index of object in respective systems list
};
class PassSystem {
public:
template<typename Func, typename TupleParam, typename ...Params>
inline decltype(auto) operator()(TupleParam &tuple, Func &func, SystemIndex &index, Params ...params) {
return func(tuple[index.index()], params...);
}
};
template<typename SystemList, typename Func, typename ...Params>
inline decltype(auto) apply(SystemList &list, SystemIndex index, Func &func, Params ...params) {
PassSystem A;
apply(list.getStorage(), index.type(), A, func, index, params...);
}
template<typename SystemList, typename Func, typename ...Params>
inline decltype(auto) apply(SystemList &list, SystemIndex index, Func func, Params ...params) {
PassSystem A;
apply(list.getStorage(), index.type(), A, func, index, params...);
}
template<typename Geometry>
inline void writeGeoToFile(std::string filename, Geometry &geo, Eigen::VectorXd &u) {
std::cout<<"This write GEO method does nothing\n";
}
template<>
inline void writeGeoToFile<std::pair<Eigen::MatrixXd &, Eigen::MatrixXi &> >(std::string filename, std::pair<Eigen::MatrixXd &, Eigen::MatrixXi &> &geo, Eigen::VectorXd &u) {
Eigen::MatrixXi B; //boundary facets
Eigen::MatrixXd uMat = Eigen::Map<Eigen::MatrixXd>(u.data(), 3, u.rows()/3);
std::cout<<"Writing "<<filename<<"\n";
//get the boundary facets for my data then write everything to disk
igl::boundary_facets(geo.second, B);
B = B.rowwise().reverse().eval();
igl::writeOBJ(filename, geo.first+uMat.transpose(), B);
}
//write obj file for each object in scene something like 'simname_objindex_frame_index.obj'
template<typename World>
inline void writeWorldToOBJ(std::string folder, std::string simName, World &world, unsigned int frameNumber) {
//iterate through world, get geometry for each system and write to OBJ
std::cout<<"WARNING Only works for FEM Systems Currently\n";
//build protostring for file names
std::string firstPart = folder+"/"+simName;
unsigned int numObjects = world.getNumSystems();
//Loop through every object, check if any points are on the wrong side of the floor, if so
//record collision
forEachIndex(world.getSystemList(), [&world, &firstPart, &numObjects, &frameNumber](auto type, auto index, auto &a) {
auto geo = a->getGeometry();
int objID = type*numObjects + index;
std::string padFrameNumber = std::string(10-std::to_string(frameNumber).size(), '0').append(std::to_string(frameNumber));
std::string outputFile = firstPart + "_"+std::to_string(objID)+"_"+padFrameNumber+".obj";
//get object displacuments
Eigen::VectorXd disp = mapDOFEigen(a->getQ(), world.getState());
writeGeoToFile(outputFile, geo, disp);
});
}
//Specific map for rotations
template<typename DataType, unsigned int Property>
inline Eigen::Map<Eigen::Quaternion<DataType> > mapDOFEigenQuat(const DOFRotation<DataType, Property> &dof, const State<DataType> &state) {
std::tuple<double *, unsigned int> qPtr = dof.getPtr(state);
return Eigen::Map<Eigen::Quaternion<DataType> >(std::get<0>(qPtr));
}
//Initializers for DOFS
//Default Initializers just zeros things out
template<typename DOFType>
class InitializeDOFClass
{
public:
template<typename State>
explicit inline InitializeDOFClass(DOFType &dof, State &state) {
std::cout<<"Should not be here \n";
exit(0);
}
};
template<typename DataType, unsigned int PropertyIndex>
class InitializeDOFClass<DOFRotation<DataType,PropertyIndex> >
{
public:
template<typename State>
explicit inline InitializeDOFClass(DOFRotation<DataType,PropertyIndex> &dof, State &state) {
auto statePtr = dof.getPtr(state);
std::memset(std::get<0>(statePtr), 0, sizeof(DataType)*std::get<1>(statePtr));
std::get<0>(statePtr)[3] = 1.0;
}
};
template<typename DataType, unsigned int PropertyIndex>
class InitializeDOFClass<DOFParticle<DataType,PropertyIndex> >
{
public:
template<typename State>
explicit inline InitializeDOFClass(DOFParticle<DataType,PropertyIndex> &dof, State &state) {
//standard initializer sets everything to zero
auto statePtr = dof.getPtr(state);
std::memset(std::get<0>(statePtr), 0, sizeof(DataType)*std::get<1>(statePtr));
}
};
template<typename DataType, unsigned int PropertyIndex, template<typename A, unsigned int B> class DOF1, template<typename A, unsigned int B> class DOF2>
class InitializeDOFClass< DOFPair<DataType, DOF1, DOF2, PropertyIndex> >
{
public:
template<typename State>
explicit inline InitializeDOFClass(DOFPair<DataType,DOF1, DOF2, PropertyIndex> &dof, State &state) {
InitializeDOFClass<DOF1<DataType, PropertyIndex> >(dof.first(), state);
InitializeDOFClass<DOF2<DataType, PropertyIndex>>(dof.second(), state);
}
};
//Initialize DOF List
template<typename DataType, unsigned int PropertyIndex, template<typename A, unsigned int B> class DOF>
class InitializeDOFClass< DOFList<DataType, DOF, PropertyIndex> >
{
public:
template<typename State>
explicit inline InitializeDOFClass(DOFList<DataType,DOF, PropertyIndex> &dof, State &state) {
//parallelize
#pragma omp parallel for
for(unsigned int ii=0; ii< dof.getNumDOFs(); ++ii) {
InitializeDOF(dof[ii], state);
}
}
};
template<typename DOF, typename DataType>
inline void InitializeDOF(DOF &dof, State<DataType> &state) {
InitializeDOFClass<DOF>(dof, state);
}
//Initialize everything
template<typename World>
void initializeDOFs(World &world) {
forEach(world.getSystemList(), [&world](auto a){
InitializeDOF(a->getQ(), world.getState());
InitializeDOF(a->getQDot(), world.getState());
});
}
//incrementing DOFs
template<typename QDOF, typename QDOTDOF, typename DataType>
class IncrementDOFClass
{
public:
template<typename State>
explicit inline IncrementDOFClass(QDOF &q, QDOTDOF &qDot, DataType a, State &state) {
//normal addition
#pragma omp parallel for
for(unsigned int ii=0; ii<q.getNumScalarDOF(); ++ii) {
std::get<0>(q.getPtr(state))[ii] += a*std::get<0>(qDot.getPtr(state))[ii];
}
}
};
//deal with rotations (standard addition doesn't work)
template<typename DataType>
class IncrementDOFClass<DOFRotation<DataType,0>, DOFParticle<DataType,1>, DataType >
{
public:
template<typename State>
explicit inline IncrementDOFClass(DOFRotation<DataType,0> &q, DOFParticle<DataType,1> &qDot, DataType a, State &state) {
//convert angular velocity to quaternion and post multiply to update current rotation
mapDOFEigenQuat(q, state) = Eigen::Quaternion<DataType>(Eigen::AngleAxis<DataType>(a*mapDOFEigen(qDot, state).norm(), mapDOFEigen(qDot, state).normalized()))*mapDOFEigenQuat(q, state);
}
};
//Pair
template<typename DataType, template<typename A, unsigned int B> class DOF1, template<typename A, unsigned int B> class DOF2,
template<typename A, unsigned int B> class DOF3, template<typename A, unsigned int B> class DOF4>
class IncrementDOFClass<DOFPair<DataType, DOF1, DOF2, 0>, DOFPair<DataType, DOF3, DOF4, 1>, DataType >
{
public:
template<typename State>
explicit inline IncrementDOFClass(DOFPair<DataType, DOF1, DOF2, 0> &q, DOFPair<DataType, DOF3, DOF4, 1> &qDot, DataType a, State &state) {
#pragma omp task shared(a, state, q, qDot)
{
IncrementDOFClass<DOF1<DataType, 0>, DOF2<DataType, 1>, DataType >(q.first(), qDot.first(), a, state);
IncrementDOFClass<DOF3<DataType, 0>, DOF4<DataType, 1>, DataType>(q.second(), qDot.second(), a, state);
}
}
};
//Rigid bodies (my rigid body velocities are in body space so we need to convert to world space)
template<typename DataType>
class IncrementDOFClass<DOFPair<DataType, DOFRotation, DOFParticle, 0>, DOFPair<DataType, DOFParticle, DOFParticle, 1>, DataType >
{
public:
template<typename State>
explicit inline IncrementDOFClass(DOFPair<DataType, DOFRotation, DOFParticle, 0> &q, DOFPair<DataType, DOFParticle, DOFParticle, 1> &qDot, DataType a, State &state) {
//update center of mass position in the world space
auto R0 = mapDOFEigenQuat(q.first(), state).toRotationMatrix();
//update the rotation
IncrementDOFClass<DOFRotation<DataType, 0>, DOFParticle<DataType, 1>, DataType >(q.first(), qDot.first(), a, state);
//update position
mapDOFEigen(q.second(), state) += a*R0*mapDOFEigen(qDot.second(), state);
//update body space velocity
mapDOFEigen(qDot.second(), state) = mapDOFEigenQuat(q.first(), state).toRotationMatrix().transpose()*R0*mapDOFEigen(qDot.second(), state);
}
};
//List
template<template<typename A, unsigned int B> class DOF0, template<typename A, unsigned int B> class DOF1, typename DataType>
class IncrementDOFClass<DOFList<DataType, DOF0, 0>, DOFList<DataType, DOF1, 1>, DataType>
{
public:
template<typename State>
explicit inline IncrementDOFClass(DOFList<DataType,DOF0, 0> &q, DOFList<DataType,DOF1, 1> &qDot, DataType a, State &state) {
//parallelize
#pragma omp parallel for
for(unsigned int ii=0; ii< q.getNumDOFs(); ++ii) {
IncrementDOFClass<DOF0<DataType,0>, DOF1<DataType,1>, DataType>(q[ii], qDot[ii], a, state);
}
}
};
template<typename QDOF, typename QDOTDOF, typename DataType, typename State>
inline void incrementDOF(QDOF &q, QDOTDOF &qDot, DataType a, State &state) {
IncrementDOFClass<QDOF, QDOTDOF, DataType>(q, qDot, a, state);
}
//build one that specifically works for the rigid body DOF pair
//Update is a covenience method to do the following operation that occurs all the time
// q = q + dt*qDot where + is approriate to the particular DOF
template<typename World, typename State, typename DataType>
inline void updateState(World &world, State & state, DataType dt) {
//update position in state
forEach(world.getSystemList(), [&world, &state, &dt](auto a) {
#pragma omp task shared(world, state, dt)
{
//iterate through dofs in this system and do the update
incrementDOF(a->getQ(), a->getQDot(), dt, state);
}
});
}
#endif /* UtilitiesBase_h */
|
blas_server_omp.c | /*********************************************************************/
/* Copyright 2009, 2010 The University of Texas at Austin. */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or */
/* without modification, are permitted provided that the following */
/* conditions are met: */
/* */
/* 1. Redistributions of source code must retain the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer in the documentation and/or other materials */
/* provided with the distribution. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
/* */
/* The views and conclusions contained in the software and */
/* documentation are those of the authors and should not be */
/* interpreted as representing official policies, either expressed */
/* or implied, of The University of Texas at Austin. */
/*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
//#include <sys/mman.h>
#include "common.h"
#ifndef USE_OPENMP
#include "blas_server.c"
#else
int blas_server_avail = 0;
void goto_set_num_threads(int num_threads) {
if (num_threads < 1) num_threads = blas_num_threads;
if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER;
if (num_threads > blas_num_threads) {
blas_num_threads = num_threads;
}
blas_cpu_number = num_threads;
omp_set_num_threads(blas_cpu_number);
#if defined(ARCH_MIPS64)
//set parameters for different number of threads.
blas_set_parameter();
#endif
}
void openblas_set_num_threads(int num_threads) {
goto_set_num_threads(num_threads);
}
int blas_thread_init(void){
blas_get_cpu_number();
blas_server_avail = 1;
return 0;
}
int BLASFUNC(blas_thread_shutdown)(void){
blas_server_avail = 0;
return 0;
}
static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){
if (!(mode & BLAS_COMPLEX)){
#ifdef EXPRECISION
if (mode & BLAS_XDOUBLE){
/* REAL / Extended Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble,
xdouble *, BLASLONG, xdouble *, BLASLONG,
xdouble *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((xdouble *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else
#endif
if (mode & BLAS_DOUBLE){
/* REAL / Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double,
double *, BLASLONG, double *, BLASLONG,
double *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((double *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else {
/* REAL / Single */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float,
float *, BLASLONG, float *, BLASLONG,
float *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((float *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
}
} else {
#ifdef EXPRECISION
if (mode & BLAS_XDOUBLE){
/* COMPLEX / Extended Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble,
xdouble *, BLASLONG, xdouble *, BLASLONG,
xdouble *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((xdouble *)args -> alpha)[0],
((xdouble *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else
#endif
if (mode & BLAS_DOUBLE){
/* COMPLEX / Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double,
double *, BLASLONG, double *, BLASLONG,
double *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((double *)args -> alpha)[0],
((double *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else {
/* COMPLEX / Single */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float,
float *, BLASLONG, float *, BLASLONG,
float *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((float *)args -> alpha)[0],
((float *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
}
}
}
static void exec_threads(blas_queue_t *queue){
void *buffer, *sa, *sb;
buffer = NULL;
sa = queue -> sa;
sb = queue -> sb;
#ifdef CONSISTENT_FPCSR
__asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode));
__asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode));
#endif
if ((sa == NULL) && (sb == NULL) && ((queue -> mode & BLAS_PTHREAD) == 0)) {
buffer = blas_memory_alloc(2);
if (sa == NULL) sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A);
if (sb == NULL) {
if (!(queue -> mode & BLAS_COMPLEX)){
#ifdef EXPRECISION
if (queue -> mode & BLAS_XDOUBLE){
sb = (void *)(((BLASLONG)sa + ((QGEMM_P * QGEMM_Q * sizeof(xdouble)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else
#endif
if (queue -> mode & BLAS_DOUBLE){
sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else {
sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
}
} else {
#ifdef EXPRECISION
if (queue -> mode & BLAS_XDOUBLE){
sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else
#endif
if (queue -> mode & BLAS_DOUBLE){
sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else {
sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
}
}
}
}
if (queue -> mode & BLAS_LEGACY) {
legacy_exec(queue -> routine, queue -> mode, queue -> args, sb);
} else
if (queue -> mode & BLAS_PTHREAD) {
void (*pthreadcompat)(void *) = queue -> routine;
(pthreadcompat)(queue -> args);
} else {
int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine;
(routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position);
}
if (buffer != NULL) blas_memory_free(buffer);
}
int exec_blas(BLASLONG num, blas_queue_t *queue){
BLASLONG i;
if ((num <= 0) || (queue == NULL)) return 0;
#ifdef CONSISTENT_FPCSR
for (i = 0; i < num; i ++) {
__asm__ __volatile__ ("fnstcw %0" : "=m" (queue[i].x87_mode));
__asm__ __volatile__ ("stmxcsr %0" : "=m" (queue[i].sse_mode));
}
#endif
#pragma omp parallel for schedule(static)
for (i = 0; i < num; i ++) {
#ifndef USE_SIMPLE_THREADED_LEVEL3
queue[i].position = i;
#endif
exec_threads(&queue[i]);
}
return 0;
}
#endif
|
diagmv_x_sky_u.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_SKY *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT thread_num = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
alpha_madde(y[i], alpha, x[i]);
// y[i] = beta * y[i] + alpha * x[i];
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_SKY *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return ONAME_omp(alpha, A, x, beta, y);
}
|
Lfold.c | /* Last changed Time-stamp: <2007-09-04 11:16:01 ivo> */
/*
minimum free energy
RNA secondary structure prediction
with maximum distance base pairs
c Ivo Hofacker, Peter Stadler
Vienna RNA package
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include <limits.h>
#include "utils.h"
#include "energy_par.h"
#include "fold_vars.h"
#include "pair_mat.h"
#include "params.h"
#include "loop_energies.h"
#include "gquad.h"
#include "Lfold.h"
#ifdef USE_SVM
#include "svm.h"
#include "svm_utils.h"
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#define PAREN
#define STACK_BULGE1 1 /* stacking energies for bulges of size 1 */
#define NEW_NINIO 1 /* new asymetry penalty */
/*@unused@*/
#define MAXSECTORS 500 /* dimension for a backtrack array */
#define LOCALITY 0. /* locality parameter for base-pairs */
#define INT_CLOSE_TO_UNDERFLOW(i) ((i) <= (INT_MIN/16))
#define UNDERFLOW_CORRECTION (INT_MIN/32)
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
PRIVATE paramT *P = NULL;
PRIVATE int **c = NULL; /* energy array, given that i-j pair */
PRIVATE int *cc = NULL; /* linear array for calculating canonical structures */
PRIVATE int *cc1 = NULL; /* " " */
PRIVATE int *f3 = NULL; /* energy of 5' end */
PRIVATE int **fML = NULL; /* multi-loop auxiliary energy array */
PRIVATE int *Fmi = NULL; /* holds row i of fML (avoids jumps in memory) */
PRIVATE int *DMLi = NULL; /* DMLi[j] holds MIN(fML[i,k]+fML[k+1,j]) */
PRIVATE int *DMLi1 = NULL; /* MIN(fML[i+1,k]+fML[k+1,j]) */
PRIVATE int *DMLi2 = NULL; /* MIN(fML[i+2,k]+fML[k+1,j]) */
PRIVATE char **ptype = NULL; /* precomputed array of pair types */
PRIVATE short *S = NULL, *S1 = NULL;
PRIVATE unsigned int length;
PRIVATE char *prev = NULL;
#ifdef USE_SVM
PRIVATE struct svm_model *avg_model = NULL;
PRIVATE struct svm_model *sd_model = NULL;
#endif
PRIVATE int with_gquad = 0;
PRIVATE int **ggg = NULL;
#ifdef _OPENMP
#ifdef USE_SVM
#pragma omp threadprivate(P, c, cc, cc1, f3, fML, Fmi, DMLi, DMLi1, DMLi2, ptype, S, S1, length, sd_model, avg_model, ggg, with_gquad, prev)
#else
#pragma omp threadprivate(P, c, cc, cc1, f3, fML, Fmi, DMLi, DMLi1, DMLi2, ptype, S, S1, length, ggg, with_gquad, prev)
#endif
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE void initialize_Lfold(int length, int maxdist);
PRIVATE void update_fold_params(void);
PRIVATE void get_arrays(unsigned int size, int maxdist);
PRIVATE void free_arrays(int maxdist);
PRIVATE void make_ptypes(const short *S, int i, int maxdist, int n);
PRIVATE char *backtrack(const char *sequence, int start, int maxdist);
PRIVATE int fill_arrays(const char *sequence, int maxdist, int zsc, double min_z, int *underflow);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
/*--------------------------------------------------------------------------*/
PRIVATE void initialize_Lfold(int length, int maxdist){
if (length<1) nrerror("initialize_Lfold: argument must be greater 0");
get_arrays((unsigned) length, maxdist);
update_fold_params();
}
/*--------------------------------------------------------------------------*/
PRIVATE void get_arrays(unsigned int size, int maxdist){
int i;
c = (int **) space(sizeof(int *) *(size+1));
fML = (int **) space(sizeof(int *) *(size+1));
ptype = (char **) space(sizeof(char *)*(size+1));
f3 = (int *) space(sizeof(int) *(size+2)); /* has to be one longer */
cc = (int *) space(sizeof(int) *(maxdist+5));
cc1 = (int *) space(sizeof(int) *(maxdist+5));
Fmi = (int *) space(sizeof(int) *(maxdist+5));
DMLi = (int *) space(sizeof(int) *(maxdist+5));
DMLi1 = (int *) space(sizeof(int) *(maxdist+5));
DMLi2 = (int *) space(sizeof(int) *(maxdist+5));
for (i=size; (i>(int)size-maxdist-5) && (i>=0); i--) {
c[i] = (int *) space(sizeof(int)*(maxdist+5));
}
for (i=size; (i>(int)size-maxdist-5) && (i>=0); i--) {
fML[i] = (int *) space(sizeof(int)*(maxdist+5));
}
for (i=size; (i>(int)size-maxdist-5) && (i>=0); i--) {
ptype[i] = (char *) space(sizeof(char)*(maxdist+5));
}
}
/*--------------------------------------------------------------------------*/
PRIVATE void free_arrays(int maxdist){
int i;
for (i=0; (i<maxdist+5) && (i<=length); i++){
free(c[i]);
free(fML[i]);
free(ptype[i]);
}
free(c);
free(fML);
free(ptype);
free(f3);
free(cc);
free(cc1);
free(Fmi);
free(DMLi);
free(DMLi1);
free(DMLi2);
if(ggg){
for (i=0; (i<maxdist+5) && (i<=length); i++){
free(ggg[i]);
}
free(ggg);
ggg = NULL;
}
f3 = cc = cc1 = Fmi = DMLi = DMLi1 = DMLi2 = NULL;
c = fML = NULL;
ptype = NULL;
}
/*--------------------------------------------------------------------------*/
PUBLIC float Lfold(const char *string, char *structure, int maxdist){
return Lfoldz(string, structure, maxdist, 0, 0.0);
}
PUBLIC float
Lfoldz( const char *string,
char *structure,
int maxdist,
int zsc,
double min_z){
int i, energy, underflow;
float mfe_local;
length = (int) strlen(string);
if (maxdist>length) maxdist = length;
initialize_Lfold(length, maxdist);
if (fabs(P->temperature - temperature)>1e-6) update_fold_params();
with_gquad = P->model_details.gquad;
S = encode_sequence(string, 0);
S1 = encode_sequence(string, 1);
for (i=length; i>=(int)length-(int)maxdist-4 && i>0; i--)
make_ptypes(S, i, maxdist, length);
#ifdef USE_SVM /*svm*/
if(zsc){
avg_model = svm_load_model_string(avg_model_string);
sd_model = svm_load_model_string(sd_model_string);
}
#endif
/* keep track of how many times we were close to an integer underflow */
underflow = 0;
energy = fill_arrays(string, maxdist, zsc, min_z, &underflow);
#ifdef USE_SVM /*svm*/
if(zsc){
svm_destroy_model(avg_model);
svm_destroy_model(sd_model);
}
#endif
free(S); free(S1);
free_arrays(maxdist);
mfe_local = (underflow > 0) ? ((float)underflow * (float)(UNDERFLOW_CORRECTION)) / 100. : 0.;
mfe_local += (float)energy/100.;
return mfe_local;
}
PRIVATE int
fill_arrays(const char *string,
int maxdist,
int zsc,
double min_z,
int *underflow) {
/* fill "c", "fML" and "f3" arrays and return optimal energy */
int i, j, k, length, energy;
int decomp, new_fML;
int no_close, type, type_2, tt;
int fij;
int lind;
length = (int) strlen(string);
prev=NULL;
for (j=0; j<maxdist+5; j++)
Fmi[j]=DMLi[j]=DMLi1[j]=DMLi2[j]=INF;
for (j=length; j>length-maxdist-4; j--) {
for (i=(length-maxdist-4>0)?length-maxdist-4:1 ; i<j; i++)
c[i][j-i] = fML[i][j-i] = INF;
}
if(with_gquad){
ggg = get_gquad_L_matrix(S, length - maxdist - 4, maxdist, length, ggg, P);
}
for (i = length-TURN-1; i >= 1; i--) { /* i,j in [1..length] */
for (j = i+TURN+1; j <= length && j <= i+maxdist; j++) {
int p, q;
type = ptype[i][j-i];
no_close = (((type==3)||(type==4))&&no_closingGU);
if (type) { /* we have a pair */
int new_c=0, stackEnergy=INF;
/* hairpin ----------------------------------------------*/
new_c = (no_close) ? FORBIDDEN : E_Hairpin(j-i-1, type, S1[i+1], S1[j-1], string+i-1, P);
/*--------------------------------------------------------
check for elementary structures involving more than one
closing pair.
--------------------------------------------------------*/
for (p = i+1; p <= MIN2(j-2-TURN,i+MAXLOOP+1) ; p++){
int minq = j-i+p-MAXLOOP-2;
if (minq<p+1+TURN) minq = p+1+TURN;
for (q = minq; q < j; q++) {
type_2 = ptype[p][q-p];
if (type_2==0) continue;
type_2 = rtype[type_2];
if (no_closingGU)
if (no_close||(type_2==3)||(type_2==4))
if ((p>i+1)||(q<j-1)) continue; /* continue unless stack */
energy = E_IntLoop(p-i-1, j-q-1, type, type_2, S1[i+1], S1[j-1], S1[p-1], S1[q+1],P);
new_c = MIN2(new_c, energy + c[p][q-p]);
if ((p==i+1)&&(j==q+1)) stackEnergy = energy; /* remember stack energy */
} /* end q-loop */
} /* end p-loop */
/* multi-loop decomposition ------------------------*/
if (!no_close) {
decomp = DMLi1[j-1-(i+1)];
tt = rtype[type];
switch(dangles){
/* no dangles */
case 0: decomp += E_MLstem(tt, -1, -1, P);
break;
/* double dangles */
case 2: decomp += E_MLstem(tt, S1[j-1], S1[i+1], P);
break;
/* normal dangles, aka dangles = 1 */
default: decomp += E_MLstem(tt, -1, -1, P);
decomp = MIN2(decomp, DMLi2[j-1-(i+2)] + E_MLstem(tt, -1, S1[i+1], P) + P->MLbase);
decomp = MIN2(decomp, DMLi2[j-2-(i+2)] + E_MLstem(tt, S1[j-1], S1[i+1], P) + 2*P->MLbase);
decomp = MIN2(decomp, DMLi1[j-2-(i+1)] + E_MLstem(tt, S1[j-1], -1, P) + P->MLbase);
break;
}
new_c = MIN2(new_c, decomp + P->MLclosing);
}
/* coaxial stacking of (i.j) with (i+1.k) or (k+1.j-1) */
if (dangles==3) {
decomp = INF;
for (k = i+2+TURN; k < j-2-TURN; k++) {
type_2 = ptype[i+1][k-i-1]; type_2 = rtype[type_2];
if (type_2)
decomp = MIN2(decomp, c[i+1][k-i-1]+P->stack[type][type_2]+
fML[k+1][j-1-k-1]);
type_2 = ptype[k+1][j-1-k-1]; type_2 = rtype[type_2];
if (type_2)
decomp = MIN2(decomp, c[k+1][j-1-k-1]+P->stack[type][type_2]+
fML[i+1][k-i-1]);
}
/* no TermAU penalty if coax stack */
decomp += 2*P->MLintern[1] + P->MLclosing;
new_c = MIN2(new_c, decomp);
}
if(with_gquad){
/* include all cases where a g-quadruplex may be enclosed by base pair (i,j) */
if (!no_close) {
tt = rtype[type];
energy = E_GQuad_IntLoop_L(i, j, type, S1, ggg, maxdist, P);
new_c = MIN2(new_c, energy);
}
}
new_c = MIN2(new_c, cc1[j-1-(i+1)]+stackEnergy);
cc[j-i] = new_c;
if (noLonelyPairs)
c[i][j-i] = cc1[j-1-(i+1)]+stackEnergy;
else
c[i][j-i] = cc[j-i];
} /* end >> if (pair) << */
else c[i][j-i] = INF;
/* done with c[i,j], now compute fML[i,j] */
/* free ends ? -----------------------------------------*/
new_fML = INF;
switch(dangles){
/* no dangles */
case 0: new_fML = fML[i+1][j-i-1] + P->MLbase;
new_fML = MIN2(new_fML, fML[i][j-1-i] + P->MLbase);
new_fML = MIN2(new_fML, c[i][j-i] + E_MLstem(type, -1, -1, P));
break;
/* double dangles */
case 2: new_fML = fML[i+1][j-i-1] + P->MLbase;
new_fML = MIN2(fML[i][j-1-i] + P->MLbase, new_fML);
new_fML = MIN2(new_fML, c[i][j-i] + E_MLstem(type, (i>1) ? S1[i-1] : -1, (j<length) ? S1[j+1] : -1, P));
break;
/* normal dangles, aka dangles = 1 */
default: /* i unpaired */
new_fML = fML[i+1][j-i-1] + P->MLbase;
/* j unpaired */
new_fML = MIN2(new_fML, fML[i][j-1-i] + P->MLbase);
/* i,j */
if(type) new_fML = MIN2(new_fML, c[i][j-i] + E_MLstem(type, -1, -1, P));
/* i+1,j */
tt = ptype[i+1][j-i-1];
if(tt) new_fML = MIN2(new_fML, c[i+1][j-i-1] + E_MLstem(tt, S1[i], -1, P) + P->MLbase);
/* i, j-1 */
tt = ptype[i][j-1-i];
if(tt) new_fML = MIN2(new_fML, c[i][j-1-i] + E_MLstem(tt, -1, S1[j], P) + P->MLbase);
/* i+1,j-1 */
tt = ptype[i+1][j-1-i-1];
if(tt) new_fML = MIN2(new_fML, c[i+1][j-1-i-1] + E_MLstem(tt, S1[i], S1[j], P) + 2*P->MLbase);
break;
}
if(with_gquad){
new_fML = MIN2(new_fML, ggg[i][j - i] + E_MLstem(0, -1, -1, P));
}
/* modular decomposition -------------------------------*/
for (decomp = INF, k = i+1+TURN; k <= j-2-TURN; k++)
decomp = MIN2(decomp, Fmi[k-i]+fML[k+1][j-k-1]);
DMLi[j-i] = decomp; /* store for use in ML decompositon */
new_fML = MIN2(new_fML, decomp);
/* coaxial stacking */
if (dangles==3) {
/* additional ML decomposition as two coaxially stacked helices */
for (decomp = INF, k = i+1+TURN; k <= j-2-TURN; k++) {
type = ptype[i][k-i]; type = rtype[type];
type_2 = ptype[k+1][j-k-1]; type_2 = rtype[type_2];
if (type && type_2)
decomp = MIN2(decomp,
c[i][k-i]+c[k+1][j-k-1]+P->stack[type][type_2]);
}
decomp += 2*P->MLintern[1]; /* no TermAU penalty if coax stack */
#if 0
/* This is needed for Y shaped ML loops with coax stacking of
interior pairts, but backtracking will fail if activated */
DMLi[j-i] = MIN2(DMLi[j-i], decomp);
DMLi[j-i] = MIN2(DMLi[j-i], DMLi[j-1-i]+P->MLbase);
DMLi[j-i] = MIN2(DMLi[j-i], DMLi1[j-(i+1)]+P->MLbase);
new_fML = MIN2(new_fML, DMLi[j-i]);
#endif
new_fML = MIN2(new_fML, decomp);
}
fML[i][j-i] = Fmi[j-i] = new_fML; /* substring energy */
} /* for (j...) */
/* calculate energies of 5' and 3' fragments */
{
static int do_backtrack = 0, prev_i=0;
#pragma omp threadprivate(do_backtrack, prev_i)
char *ss=NULL;
double prevz;
/* first case: i stays unpaired */
f3[i] = f3[i+1];
/* next all cases where i is paired */
switch(dangles){
/* dont use dangling end and mismatch contributions at all */
case 0: for(j=i+TURN+1; j<length && j<=i+maxdist; j++){
type = ptype[i][j-i];
if(with_gquad){
f3[i] = MIN2(f3[i], f3[j+1] + ggg[i][j-i]);
}
if(type)
f3[i] = MIN2(f3[i], f3[j+1] + c[i][j-i] + E_ExtLoop(type, -1, -1, P));
}
if(length<=i+maxdist){
j=length;
if(with_gquad){
f3[i] = MIN2(f3[i], ggg[i][j-i]);
}
type = ptype[i][j-i];
if(type)
f3[i] = MIN2(f3[i], c[i][j-i] + E_ExtLoop(type, -1, -1, P));
}
break;
/* always use dangles on both sides */
case 2: for(j=i+TURN+1; j<length && j<=i+maxdist; j++){
type = ptype[i][j-i];
if(with_gquad){
if(ggg[i][j-i] != INF)
f3[i] = MIN2(f3[i], f3[j+1] + ggg[i][j-i]);
}
if(type)
f3[i] = MIN2(f3[i], f3[j+1] + c[i][j-i] + E_ExtLoop(type, (i>1) ? S1[i-1] : -1, S1[j+1], P));
}
if(length<=i+maxdist){
j=length;
if(with_gquad){
f3[i] = MIN2(f3[i], ggg[i][j-i]);
}
type = ptype[i][j-i];
if(type)
f3[i] = MIN2(f3[i], c[i][j-i] + E_ExtLoop(type, (i>1) ? S1[i-1] : -1, -1, P));
}
break;
/* normal dangles, aka dangles = 1 */
default: for(j=i+TURN+1; j<length && j<=i+maxdist; j++){
type = ptype[i][j-i];
if(with_gquad){
f3[i] = MIN2(f3[i], f3[j+1] + ggg[i][j-i]);
}
if(type){
f3[i] = MIN2(f3[i], f3[j+1] + c[i][j-i] + E_ExtLoop(type, -1, -1, P));
f3[i] = MIN2(f3[i], ((j+2<=length) ? f3[j+2] : 0) + c[i][j-i] + E_ExtLoop(type, -1, S1[j+1], P));
}
type = ptype[i+1][j-i-1];
if(type){
f3[i] = MIN2(f3[i], f3[j+1] + c[i+1][j-i-1] + E_ExtLoop(type, S1[i], -1, P));
f3[i] = MIN2(f3[i], ((j + 1 < length) ? f3[j+2] : 0) + c[i+1][j-i-1] + E_ExtLoop(type, S1[i], S1[j+1], P));
}
}
if(length<=i+maxdist){
j = length;
if(with_gquad){
f3[i] = MIN2(f3[i], ggg[i][j-i]);
}
type = ptype[i][j-i];
if(type)
f3[i] = MIN2(f3[i], c[i][j-i] + E_ExtLoop(type, -1, -1, P));
type = ptype[i+1][j-i-1];
if(type)
f3[i] = MIN2(f3[i], c[i+1][j-i-1] + E_ExtLoop(type, S1[i], -1, P));
}
break;
} /* switch(dangles)... */
/* backtrack partial structure */
if (f3[i] < f3[i+1]){
do_backtrack=1;
}
else if (do_backtrack) {
int pairpartner; /*i+1?? is paired with pairpartner*/
int cc;
int traced2=0;
fij = f3[i+1];
lind=i+1;
/*start "short" backtrack*/
/*get paired base*/
while(fij==f3[lind+1])
lind++;
/*get pairpartner*/
for (pairpartner = lind + TURN; pairpartner <= lind + maxdist; pairpartner++){
type = ptype[lind][pairpartner-lind];
switch(dangles){
case 0: if(type){
cc = c[lind][pairpartner-lind] + E_ExtLoop(type, -1, -1, P);
if(fij == cc + f3[pairpartner + 1])
traced2 = 1;
}
else if(with_gquad) {
cc = ggg[lind][pairpartner-lind];
if(fij == cc + f3[pairpartner + 1])
traced2 = 1;
}
break;
case 2: if(type){
cc = c[lind][pairpartner-lind] + E_ExtLoop(type, (lind > 1) ? S1[lind-1] : -1, (pairpartner < length) ? S1[pairpartner+1] : -1, P);
if(fij == cc + f3[pairpartner + 1])
traced2 = 1;
}
else if(with_gquad){
cc = ggg[lind][pairpartner-lind];
if(fij == cc + f3[pairpartner + 1])
traced2 = 1;
}
break;
default: if(type){
cc = c[lind][pairpartner-lind] + E_ExtLoop(type, -1, -1, P);
if(fij == cc + f3[pairpartner + 1]){
traced2 = 1;
break;
}
else if(pairpartner < length){
cc = c[lind][pairpartner-lind] + E_ExtLoop(type, -1, S1[pairpartner+1], P);
if(fij == cc + f3[pairpartner + 2]){
traced2 = 1;
break;
}
}
}
else if(with_gquad){
cc = ggg[lind][pairpartner-lind];
if(fij == cc + f3[pairpartner + 1])
traced2 = 1;
}
type = ptype[lind+1][pairpartner-lind-1];
if(type){
cc = c[lind+1][pairpartner-(lind+1)] + E_ExtLoop(type, S1[lind], -1, P);
if(fij == cc + f3[pairpartner+1]){
traced2 = 1;
break;
}
else if(pairpartner < length){
cc = c[lind+1][pairpartner-(lind+1)] + E_ExtLoop(type, S1[lind], S1[pairpartner+1], P);
if(fij == cc + f3[pairpartner+2])
traced2 = 1;
}
}
break;
}
if(traced2) break;
}
if (!traced2) nrerror("backtrack failed in short backtrack 1");
if (zsc){
#ifdef USE_SVM
int info_avg;
double average_free_energy;
double sd_free_energy;
double my_z;
int *AUGC = get_seq_composition(S, lind-1, MIN2((pairpartner+1),length));
/*\svm*/
average_free_energy = avg_regression(AUGC[0], AUGC[1], AUGC[2], AUGC[3], AUGC[4], avg_model, &info_avg);
if (info_avg == 0) {
double difference;
double min_sd = minimal_sd(AUGC[0],AUGC[1],AUGC[2],AUGC[3],AUGC[4]);
difference=(fij-f3[pairpartner+1])/100.-average_free_energy;
if ( difference - ( min_z * min_sd ) <= 0.0001 ) {
sd_free_energy = sd_regression(AUGC[0],AUGC[1],AUGC[2],AUGC[3],AUGC[4],sd_model);
my_z=difference/sd_free_energy;
if (my_z<=min_z){
ss = backtrack(string, lind , pairpartner+1);
if (prev) {
if ((i+strlen(ss)<prev_i+strlen(prev)) ||
strncmp(ss+prev_i-i,prev,strlen(prev))) { /* ss does not contain prev */
if (dangles==2)
printf(".%s (%6.2f) %4d z= %.3f\n", prev, (f3[prev_i]-f3[prev_i+strlen(prev)-1])/100., prev_i-1, prevz);
else
printf("%s (%6.2f) %4d z=%.3f\n ", prev, (f3[prev_i]-f3[prev_i+strlen(prev)])/100., prev_i, prevz);
}
free(prev);
}
prev=ss; prev_i = lind; prevz=my_z;
}
}
}
free(AUGC);
do_backtrack=0;
#endif
}
else {
/* original code for Lfold*/
ss = backtrack(string, lind , pairpartner+1);
if (prev) {
if ((i+strlen(ss)<prev_i+strlen(prev)) || strncmp(ss+prev_i-i,prev,strlen(prev))){
/* ss does not contain prev */
if (dangles==2){
printf(".%s (%6.2f) %4d\n", prev, (f3[prev_i]-f3[prev_i+strlen(prev)-1])/100., prev_i-1);
} else
printf("%s (%6.2f) %4d\n", prev, (f3[prev_i]-f3[prev_i+strlen(prev)])/100., prev_i);
}
free(prev);
}
prev=ss;
prev_i = lind;
do_backtrack=0;
}
}
if (i==1) {
if (prev) {
if(zsc) {
if (dangles==2)
printf(".%s (%6.2f) %4d z= %.2f\n", prev, (f3[prev_i]-f3[prev_i+strlen(prev)-1])/100., prev_i-1, prevz);
else
printf("%s (%6.2f) %4dz= %.2f \n", prev, (f3[prev_i]-f3[prev_i+strlen(prev)])/100., prev_i, prevz);
free(prev); prev=NULL;
}
else {
if (dangles==2)
printf(".%s (%6.2f) %4d\n", prev, (f3[prev_i]-f3[prev_i+strlen(prev)-1])/100., prev_i-1);
else
printf("%s (%6.2f) %4d\n", prev, (f3[prev_i]-f3[prev_i+strlen(prev)])/100., prev_i);
}
} else if ((f3[i]<0) && (!zsc)) do_backtrack=1;
if (do_backtrack) {
int pairpartner; /*i+1?? is paired with pairpartner*/
int cc;
double average_free_energy;
double sd_free_energy;
int info_avg;
double my_z;
int traced2 = 0;
fij = f3[i];
lind=i;
while(fij==f3[lind+1]) lind++;
/*get pairpartner*/
for(pairpartner = lind + TURN; pairpartner <= lind + maxdist; pairpartner++){
type = ptype[lind][pairpartner-lind];
switch(dangles){
case 0: if(type){
cc = c[lind][pairpartner-lind] + E_ExtLoop(type, -1, -1, P);
if(fij == cc + f3[pairpartner + 1])
traced2 = 1;
}
else if(with_gquad){
cc = ggg[lind][pairpartner-lind];
if(fij == cc + f3[pairpartner + 1])
traced2 = 1;
}
break;
case 2: if(type){
cc = c[lind][pairpartner-lind] + E_ExtLoop(type, (lind > 1) ? S1[lind-1] : -1, (pairpartner < length) ? S1[pairpartner+1] : -1, P);
if(fij == cc + f3[pairpartner + 1])
traced2 = 1;
}
else if(with_gquad){
cc = ggg[lind][pairpartner-lind];
if(fij == cc + f3[pairpartner + 1])
traced2 = 1;
}
break;
default: if(type){
cc = c[lind][pairpartner-lind] + E_ExtLoop(type, -1, -1, P);
if(fij == cc + f3[pairpartner + 1]){
traced2 = 1;
break;
}
else if(pairpartner < length){
cc = c[lind][pairpartner-lind] + E_ExtLoop(type, -1, S1[pairpartner + 1], P);
if(fij == cc + f3[pairpartner + 1]){
traced2 = 1;
break;
}
}
}
else if(with_gquad){
cc = ggg[lind][pairpartner-lind];
if(fij == cc + f3[pairpartner + 1])
traced2 = 1;
}
type = ptype[lind+1][pairpartner-lind-1];
if(type){
cc = c[lind+1][pairpartner-(lind+1)] + E_ExtLoop(type, S1[lind], -1, P);
if(fij == cc + f3[pairpartner+1]){
traced2 = 1;
break;
}
else if (pairpartner < length){
cc = c[lind+1][pairpartner-(lind+1)] + E_ExtLoop(type, S1[lind], S1[pairpartner+1], P);
if(fij == cc + f3[pairpartner + 2]){
traced2 =1;
break;
}
}
}
}
if(traced2) break;
}
if (!traced2) nrerror("backtrack failed in short backtrack 2");
if(zsc){
#ifdef USE_SVM
int *AUGC = get_seq_composition(S, lind-1, MIN2((pairpartner+1),length));
average_free_energy = avg_regression(AUGC[0],AUGC[1],AUGC[2],AUGC[3],AUGC[4],avg_model,&info_avg);
if (info_avg == 0) {
double difference;
double min_sd = minimal_sd(AUGC[0],AUGC[1],AUGC[2],AUGC[3],AUGC[4]);
difference=(fij-f3[pairpartner+1])/100.-average_free_energy;
if ( difference - ( min_z * min_sd ) <= 0.0001 ) {
sd_free_energy = sd_regression(AUGC[0],AUGC[1],AUGC[2],AUGC[3],AUGC[4],sd_model);
my_z=difference/sd_free_energy;
if (my_z<=min_z){
ss = backtrack(string, lind , pairpartner+1);
printf("%s (%6.2f) %4d z= %.2f\n", ss, (f3[lind]-f3[lind+strlen(ss)-1])/100., lind, my_z);
}
}
}
free(AUGC);
#endif
}
else {
ss = backtrack(string, lind , pairpartner+1);
if (dangles==2)
printf("%s (%6.2f) %4d\n", ss, (f3[lind]-f3[lind+strlen(ss)-1])/100., 1);
else
printf("%s (%6.2f) %4d\n", ss, (f3[lind]-f3[lind+strlen(ss)])/100., 1);
free(ss);
}
}
do_backtrack=0;
}
}
{
int ii, *FF; /* rotate the auxilliary arrays */
/* check for values close to integer underflow */
if(INT_CLOSE_TO_UNDERFLOW(f3[i])){
/* correct f3 free energies and increase underflow counter */
int cnt, cnt2;
for(cnt=i; cnt <= length && cnt <= lind + maxdist + 2; cnt++) {
f3[cnt] -= UNDERFLOW_CORRECTION;
}
(*underflow)++;
}
FF = DMLi2; DMLi2 = DMLi1; DMLi1 = DMLi; DMLi = FF;
FF = cc1; cc1=cc; cc=FF;
for (j=0; j< maxdist+5; j++){
cc[j] = Fmi[j] = DMLi[j] = INF;
}
if (i+maxdist+4<=length) {
c[i-1] = c[i+maxdist+4];
c[i+maxdist+4] = NULL;
fML[i-1] = fML[i+maxdist+4];
fML[i+maxdist+4]=NULL;
ptype[i-1] = ptype[i+maxdist+4];
ptype[i+maxdist+4] = NULL;
if (i>1){
make_ptypes(S, i-1, maxdist, length);
if(with_gquad){
ggg = get_gquad_L_matrix(S, i - 1, maxdist, length, ggg, P);
}
}
for (ii=0; ii<maxdist+5; ii++) {
c[i-1][ii] = INF;
fML[i-1][ii] = INF;
}
}
}
}
return f3[1];
}
PRIVATE char *backtrack(const char *string, int start, int maxdist){
/*------------------------------------------------------------------
trace back through the "c", "f3" and "fML" arrays to get the
base pairing list. No search for equivalent structures is done.
This is fast, since only few structure elements are recalculated.
------------------------------------------------------------------*/
sect sector[MAXSECTORS]; /* backtracking sectors */
int i, j, k, energy, new, no_close, type, type_2, tt, s=0;
char *structure;
/* length = strlen(string); */
sector[++s].i = start;
sector[s].j = MIN2(length, maxdist+1);
sector[s].ml = (backtrack_type=='M') ? 1 : ((backtrack_type=='C')?2:0);
structure = (char *) space((MIN2(length-start, maxdist)+3)*sizeof(char));
for (i=0; i<=MIN2(length-start, maxdist); i++) structure[i] = '-';
while (s>0) {
int ml, fij, cij, traced, i1, j1, d3, d5, mm, mm5, mm3, mm53, p, q, jj=0, gq=0;
int canonical = 1; /* (i,j) closes a canonical structure */
i = sector[s].i;
j = sector[s].j;
ml = sector[s--].ml; /* ml is a flag indicating if backtracking is to
occur in the fML- (1) or in the f-array (0) */
if (ml==2) {
structure[i-start] = '(';
structure[j-start] = ')';
goto repeat1;
}
if (j < i+TURN+1) continue; /* no more pairs in this interval */
fij = (ml)? fML[i][j-i] : f3[i];
if (ml == 0) { /* backtrack in f3 */
if (fij == f3[i+1]) {
sector[++s].i = i+1;
sector[s].j = j;
sector[s].ml = ml;
continue;
}
/* i or i+1 is paired. Find pairing partner */
switch(dangles){
case 0: for(traced = 0, k=j; k>i+TURN; k--){
if(with_gquad){
if(fij == ggg[i][k-i] + f3[k+1]){
/* found the decomposition */
traced = i; jj = k + 1; gq = 1;
break;
}
}
jj = k+1;
type = ptype[i][k-i];
if(type)
if(fij == c[i][k-i] + E_ExtLoop(type, -1, -1, P) + f3[k+1]){
traced = i;
break;
}
}
break;
case 2: for(traced = 0, k=j; k>i+TURN; k--){
if(with_gquad){
if(fij == ggg[i][k-i] + f3[k+1]){
/* found the decomposition */
traced = i; jj = k + 1; gq = 1;
break;
}
}
jj = k+1;
type = ptype[i][k-i];
if(type)
if(fij == c[i][k-i] + E_ExtLoop(type, (i>1) ? S1[i-1] : -1, (k<length) ? S1[k+1] : -1, P) + f3[k+1]){
traced = i;
break;
}
}
break;
default: for(traced = 0,k=j; k>i+TURN; k--){
if(with_gquad){
if(fij == ggg[i][k-i] + f3[k+1]){
/* found the decomposition */
traced = i; jj = k + 1; gq = 1;
break;
}
}
jj = k+1;
type = ptype[i+1][k-(i+1)];
if(type){
if(fij == c[i+1][k-(i+1)] + E_ExtLoop(type, S1[i], -1, P) + f3[k+1]){
traced=i+1;
}
if(k < length){
if(fij == c[i+1][k-(i+1)] + E_ExtLoop(type, S1[i], S1[k+1], P) + f3[k+2]){
traced = i+1;
jj = k+2;
}
}
}
type = ptype[i][k-i];
if(type){
if(fij == c[i][k-i] + E_ExtLoop(type, -1, -1, P) + f3[k+1]){
traced = i;
}
if(k<length){
if(fij == c[i][k-i] + E_ExtLoop(type, -1, S1[k+1], P) + f3[k+2]){
traced = i;
jj = k+2;
}
}
}
if(traced) break;
}
break;
} /* switch(dangles)...*/
if (!traced) nrerror("backtrack failed in f3");
if (j==length) { /* backtrack only one component, unless j==length */
sector[++s].i = jj;
sector[s].j = j;
sector[s].ml = ml;
}
i=traced; j=k;
if(with_gquad && gq){
/* goto backtrace of gquadruplex */
goto repeat_gquad;
}
structure[i-start] = '('; structure[j-start] = ')';
if (((jj==j+2) || (dangles==2)) && (j < length)) structure[j+1-start] = '.';
goto repeat1;
}
else { /* trace back in fML array */
if (fML[i][j-1-i]+P->MLbase == fij) { /* 3' end is unpaired */
sector[++s].i = i;
sector[s].j = j-1;
sector[s].ml = ml;
continue;
}
if (fML[i+1][j-(i+1)]+P->MLbase == fij) { /* 5' end is unpaired */
sector[++s].i = i+1;
sector[s].j = j;
sector[s].ml = ml;
continue;
}
if(with_gquad){
if(fij == ggg[i][j-i] + E_MLstem(0, -1, -1, P)){
/* go to backtracing of quadruplex */
goto repeat_gquad;
}
}
switch(dangles){
case 0: tt = ptype[i][j-i];
if(fij == c[i][j-i] + E_MLstem(tt, -1, -1, P)){
structure[i-start] = '(';
structure[j-start] = ')';
goto repeat1;
}
break;
case 2: tt = ptype[i][j-i];
if(fij == c[i][j-i] + E_MLstem(tt, (i>1) ? S1[i-1] : -1, (j < length) ? S1[j+1] : -1, P)){
structure[i-start] = '(';
structure[j-start] = ')';
goto repeat1;
}
break;
default: tt = ptype[i][j-i];
if(fij == c[i][j-i] + E_MLstem(tt, -1, -1, P)){
structure[i-start] = '(';
structure[j-start] = ')';
goto repeat1;
}
tt = ptype[i+1][j-(i+1)];
if(fij == c[i+1][j-(i+1)] + E_MLstem(tt, S1[i], -1, P) + P->MLbase){
structure[++i-start] = '(';
structure[j-start] = ')';
goto repeat1;
}
tt = ptype[i][j-1-i];
if(fij == c[i][j-1-i] + E_MLstem(tt, -1, S1[j], P) + P->MLbase){
structure[i-start] = '(';
structure[--j-start] = ')';
goto repeat1;
}
tt = ptype[i+1][j-1-(i+1)];
if(fij == c[i+1][j-1-(i+1)] + E_MLstem(tt, S1[i], S1[j], P) + 2*P->MLbase){
structure[++i-start] = '(';
structure[--j-start] = ')';
goto repeat1;
}
break;
} /* switch(dangles)... */
/* modular decomposition */
for (k = i+1+TURN; k <= j-2-TURN; k++)
if (fij == (fML[i][k-i]+fML[k+1][j-(k+1)]))
break;
if ((dangles==3)&&(k>j-2-TURN)) { /* must be coax stack */
ml = 2;
for (k = i+1+TURN; k <= j-2-TURN; k++) {
type = ptype[i][k-i]; type= rtype[type];
type_2 = ptype[k+1][j-(k+1)]; type_2= rtype[type_2];
if (type && type_2)
if (fij == c[i][k-i]+c[k+1][j-(k+1)]+P->stack[type][type_2]+
2*P->MLintern[1])
break;
}
}
sector[++s].i = i;
sector[s].j = k;
sector[s].ml = ml;
sector[++s].i = k+1;
sector[s].j = j;
sector[s].ml = ml;
if (k>j-2-TURN) nrerror("backtrack failed in fML");
continue;
}
repeat1:
/*----- begin of "repeat:" -----*/
if (canonical) cij = c[i][j-i];
type = ptype[i][j-i];
if (noLonelyPairs)
if (cij == c[i][j-i]) {
/* (i.j) closes canonical structures, thus
(i+1.j-1) must be a pair */
type_2 = ptype[i+1][j-1-(i+1)]; type_2 = rtype[type_2];
cij -= P->stack[type][type_2];
structure[i+1-start] = '('; structure[j-1-start] = ')';
i++; j--;
canonical=0;
goto repeat1;
}
canonical = 1;
no_close = (((type==3)||(type==4))&&no_closingGU);
if (no_close) {
if (cij == FORBIDDEN) continue;
} else
if (cij == E_Hairpin(j-i-1, type, S1[i+1], S1[j-1],string+i-1, P))
continue;
for (p = i+1; p <= MIN2(j-2-TURN,i+MAXLOOP+1); p++) {
int minq;
minq = j-i+p-MAXLOOP-2;
if (minq<p+1+TURN) minq = p+1+TURN;
for (q = j-1; q >= minq; q--) {
type_2 = ptype[p][q-p];
if (type_2==0) continue;
type_2 = rtype[type_2];
if (no_closingGU)
if (no_close||(type_2==3)||(type_2==4))
if ((p>i+1)||(q<j-1)) continue; /* continue unless stack */
/* energy = oldLoopEnergy(i, j, p, q, type, type_2); */
energy = E_IntLoop(p-i-1, j-q-1, type, type_2, S1[i+1], S1[j-1], S1[p-1], S1[q+1],P);
new = energy+c[p][q-p];
traced = (cij == new);
if (traced) {
structure[p-start] = '(';
structure[q-start] = ')';
i = p, j = q;
goto repeat1;
}
}
}
/* end of repeat: --------------------------------------------------*/
/* (i.j) must close a multi-loop */
tt = rtype[type];
i1 = i+1; j1 = j-1;
if(with_gquad){
/*
The case that is handled here actually resembles something like
an interior loop where the enclosing base pair is of regular
kind and the enclosed pair is not a canonical one but a g-quadruplex
that should then be decomposed further...
*/
if(backtrack_GQuad_IntLoop_L(cij, i, j, type, S, ggg, maxdist, &p, &q, P)){
i = p; j = q;
goto repeat_gquad;
}
}
sector[s+1].ml = sector[s+2].ml = 1;
switch(dangles){
case 0: mm = P->MLclosing + E_MLstem(tt, -1, -1, P);
for(k = i+2+TURN; k < j-2-TURN; k++){
if(cij == fML[i+1][k-(i+1)] + fML[k+1][j-1-(k+1)] + mm)
break;
}
break;
case 2: mm = P->MLclosing + E_MLstem(tt, S1[j-1], S1[i+1], P);
for(k = i+2+TURN; k < j-2-TURN; k++){
if(cij == fML[i+1][k-(i+1)] + fML[k+1][j-1-(k+1)] + mm)
break;
}
break;
default: mm = P->MLclosing + E_MLstem(tt, -1, -1, P);
mm5 = P->MLclosing + E_MLstem(tt, S1[j-1], -1, P) + P->MLbase;
mm3 = P->MLclosing + E_MLstem(tt, -1, S1[i+1], P) + P->MLbase;
mm53 = P->MLclosing + E_MLstem(tt, S1[j-1], S1[i+1], P) + 2*P->MLbase;
for(k = i+2+TURN; k < j-2-TURN; k++){
if(cij == fML[i+1][k-(i+1)] + fML[k+1][j-1-(k+1)] + mm)
break;
else if(cij == fML[i+2][k-(i+2)] + fML[k+1][j-1-(k+1)] + mm3){
i1 = i+2;
break;
}
else if(cij == fML[i+1][k-(i+1)] + fML[k+1][j-2-(k+1)] + mm5){
j1 = j-2;
break;
}
else if(cij == fML[i+2][k-(i+2)] + fML[k+1][j-2-(k+1)] + mm53){
i1 = i+2;
j1 = j-2;
break;
}
/* coaxial stacking of (i.j) with (i+1.k) or (k.j-1) */
/* use MLintern[1] since coax stacked pairs don't get TerminalAU */
if (dangles==3) {
int en;
type_2 = ptype[i+1][k-(i+1)]; type_2 = rtype[type_2];
if (type_2) {
en = c[i+1][k-(i+1)]+P->stack[type][type_2]+fML[k+1][j-1-(k+1)];
if (cij == en+2*P->MLintern[1]+P->MLclosing) {
ml = 2;
sector[s+1].ml = 2;
break;
}
}
type_2 = ptype[k+1][j-1-(k+1)]; type_2 = rtype[type_2];
if (type_2) {
en = c[k+1][j-1-(k+1)]+P->stack[type][type_2]+fML[i+1][k-(i+1)];
if (cij == en+2*P->MLintern[1]+P->MLclosing) {
sector[s+2].ml = 2;
break;
}
}
}
}
break;
} /* switch(dangles)... */
if (k<=j-3-TURN) { /* found the decomposition */
sector[++s].i = i1;
sector[s].j = k;
sector[++s].i = k+1;
sector[s].j = j1;
} else {
#if 0
/* Y shaped ML loops fon't work yet */
if (dangles==3) {
/* (i,j) must close a Y shaped ML loop with coax stacking */
if (cij == fML[i+1][j-2-(i+2)] + mm + d3 + d5 + P->MLbase + P->MLbase) {
i1 = i+2;
j1 = j-2;
} else if (cij == fML[i+1][j-2-(i+1)] + mm + d5 + P->MLbase)
j1 = j-2;
else if (cij == fML[i+2][j-1-(i+2)] + mm + d3 + P->MLbase)
i1 = i+2;
else /* last chance */
if (cij != fML[i+1][j-1-(i+1)] + mm + P->MLbase)
fprintf(stderr, "backtracking failed in repeat");
/* if we arrive here we can express cij via fML[i1,j1]+dangles */
sector[++s].i = i1;
sector[s].j = j1;
}
else
#endif
nrerror("backtracking failed in repeat");
}
continue; /* this is a workarround to not accidentally proceed in the following block */
repeat_gquad:
/*
now we do some fancy stuff to backtrace the stacksize and linker lengths
of the g-quadruplex that should reside within position i,j
*/
{
int l[3], L, a;
L = -1;
get_gquad_pattern_mfe(S, i, j, P, &L, l);
if(L != -1){
/* fill the G's of the quadruplex into the structure string */
for(a=0;a<L;a++){
structure[i+a-start] = '+';
structure[i+L+l[0]+a-start] = '+';
structure[i+L+l[0]+L+l[1]+a-start] = '+';
structure[i+L+l[0]+L+l[1]+L+l[2]+a-start] = '+';
}
goto repeat_gquad_exit;
}
nrerror("backtracking failed in repeat_gquad");
}
repeat_gquad_exit:
asm("nop");
}
for (i=strlen(structure)-1; i>0 && structure[i] == '-'; i--)
structure[i] = '\0';
for (;i>=0; i--)
if (structure[i]=='-') structure[i]='.';
return structure;
}
PRIVATE void update_fold_params(void){
if(P) free(P);
P = scale_parameters();
make_pair_matrix();
}
/*---------------------------------------------------------------------------*/
PRIVATE void make_ptypes(const short *S, int i, int maxdist, int n) {
int j,k, type;
for (k=TURN+1; k<maxdist; k++) {
j = i+k;
if (j>n) continue;
type = pair[S[i]][S[j]];
if (noLonelyPairs && type) {
if (!ptype[i+1][j-1-i-1])
if (j==n || i==1 || (!pair[S[i-1]][S[j+1]])) type=0;
}
ptype[i][j-i]=type;
}
}
|
openmp_wrapper.h | /*!
* Copyright (c) 2017 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_OPENMP_WRAPPER_H_
#define LIGHTGBM_OPENMP_WRAPPER_H_
#ifdef _OPENMP
#include <omp.h>
#include <LightGBM/utils/log.h>
#include <exception>
#include <memory>
#include <mutex>
#include <stdexcept>
#include <vector>
class ThreadExceptionHelper {
public:
ThreadExceptionHelper() {
ex_ptr_ = nullptr;
}
~ThreadExceptionHelper() {
ReThrow();
}
void ReThrow() {
if (ex_ptr_ != nullptr) {
std::rethrow_exception(ex_ptr_);
}
}
void CaptureException() {
// only catch first exception.
if (ex_ptr_ != nullptr) { return; }
std::unique_lock<std::mutex> guard(lock_);
if (ex_ptr_ != nullptr) { return; }
ex_ptr_ = std::current_exception();
}
private:
std::exception_ptr ex_ptr_;
std::mutex lock_;
};
#define OMP_INIT_EX() ThreadExceptionHelper omp_except_helper
#define OMP_LOOP_EX_BEGIN() try {
#define OMP_LOOP_EX_END() } \
catch(std::exception& ex) { Log::Warning(ex.what()); omp_except_helper.CaptureException(); } \
catch(...) { omp_except_helper.CaptureException(); }
#define OMP_THROW_EX() omp_except_helper.ReThrow()
#else
#ifdef _MSC_VER
#pragma warning(disable: 4068) // disable unknown pragma warning
#endif
#ifdef __cplusplus
extern "C" {
#endif
/** Fall here if no OPENMP support, so just
simulate a single thread running.
All #pragma omp should be ignored by the compiler **/
inline void omp_set_num_threads(int) {}
inline void omp_set_nested(int) {}
inline int omp_get_num_threads() {return 1;}
inline int omp_get_thread_num() {return 0;}
#ifdef __cplusplus
}; // extern "C"
#endif
#define OMP_INIT_EX()
#define OMP_LOOP_EX_BEGIN()
#define OMP_LOOP_EX_END()
#define OMP_THROW_EX()
#endif
#endif /* LIGHTGBM_OPENMP_WRAPPER_H_ */
|
myFunc.h | #include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <malloc.h>
#include <math.h>
#include <omp.h>
#define MIC_DEV 0
#define ALLOC alloc_if(1) free_if(0)
#define FREE alloc_if(0) free_if(1)
#define REUSE alloc_if(0) free_if(0)
#pragma offload_attribute (push, target (mic))
typedef struct userData {
// Data information
int nExamples;
__declspec(align(64)) float * restrict example;
// Timing information
double timeObjFunc;
int countObjFunc;
double timeDataLoad;
double minTime, maxTime;
} userData_t;
// helper macros to index into the example array
#define IN(i,nExamples,j) (i*nExamples+j)
#define OUT(i,nExamples,j) ((i+N_INPUT)*nExamples+j)
inline double getTime() { return(omp_get_wtime());}
// Define the Sigmoid
#ifdef USE_LINEAR
char *desc="generated_PCA_func LINEAR()";
inline float G(float x) { return( x ) ;}
#define G_ESTIMATE 0
#elif USE_TANH
char *desc="generated_func tanh()";
inline float G(float x) { return( tanhf(x) ) ;}
#define G_ESTIMATE 7 // estimate 7 flops for G
#elif LOGISTIC
char *desc="generated func logistic()";
inline float G(float x) { return( 1.f/(1.f+expf(-x)) ) ;}
#define G_ESTIMATE 7 // estimate flops for G
#else // Use Elliott function
char *desc="generated func Eliott activation: x/(1+fabsf(x))";
inline float G(float x) { return( x/(1.f+fabsf(x)) ) ;}
#define G_ESTIMATE 3 // estimate flops for G
#endif
#include "fcn.h"
double objFunc(unsigned n, const double * restrict x, double * restrict grad,
void * restrict my_func_data)
{
double err;
userData_t *uData = (userData_t *) my_func_data;
if(grad) {
//fprintf(stderr,"Gradient not implemented!\n");
exit(1);
}
double runTime=getTime();
int nExamples = uData->nExamples;
__declspec(align(64)) float * restrict example = uData->example;
#pragma offload target(mic:MIC_DEV) in(x:length(N_PARAM)) out(err) in(example:length(0) REUSE)
{
err=0.; // initialize error here in case offload selected
// convert from double to float for speed
__declspec(align(64)) float P[N_PARAM];
for(int i=0; i < N_PARAM; i++) P[i]=x[i];
#pragma omp parallel for reduction(+ : err)
for(int i=0; i < nExamples; i++) {
float d=myFunc(i, P, example, nExamples, NULL);
err += d*d;
}
}
runTime = getTime() - runTime;
// Note a maxTime of zero means this is the first call
if(uData->maxTime == 0.) {
uData->maxTime = uData->minTime = runTime;
}
uData->maxTime = (uData->maxTime > runTime)?uData->maxTime:runTime;
uData->minTime = (uData->minTime < runTime)?uData->minTime:runTime;
uData->timeObjFunc += runTime;
uData->countObjFunc++;
return sqrt(err);
}
#pragma offload_attribute (pop)
void fini(userData_t *uData)
{
int nThreads=0;
// The intel recommended way to get the number of threads in offload mode.
#pragma offload target(mic:MIC_DEV) out(nThreads)
{
#pragma omp parallel
{
#pragma omp single
{
nThreads = omp_get_num_threads();
}
}
}
printf("number OMP threads %d\n", nThreads);
printf("DataLoadTime %g\n", uData->timeDataLoad);
printf("AveObjTime %g, countObjFunc %d, totalObjTime %g\n",
uData->timeObjFunc/uData->countObjFunc, uData->countObjFunc, uData->timeObjFunc);
#ifdef FLOP_ESTIMATE
printf("Estimated flops in myFunc %d, estimated average GFlop/s %g\n", FLOP_ESTIMATE,
(((double)uData->nExamples*FLOP_ESTIMATE)/(uData->timeObjFunc/uData->countObjFunc)/1.e9) );
printf("Estimated maximum GFlop/s %g, minimum GFLop/s %g\n",
(((double)uData->nExamples*FLOP_ESTIMATE)/(uData->minTime)/1.e9),
(((double)uData->nExamples*FLOP_ESTIMATE)/(uData->maxTime)/1.e9) );
#endif
// free example vector if using offload mode
__declspec(align(64)) float * restrict example = uData->example;
#pragma offload target(mic:MIC_DEV) in(example: length(0) FREE)
{}
// free on the host
free(example); uData->example=NULL;
}
void init(char*filename, userData_t *uData)
{
FILE *fn=stdin;
if(strcmp("-", filename) != 0)
fn=fopen(filename,"r");
if(!fn) {
fprintf(stderr,"Cannot open %s\n",filename);
exit(1);
}
// read the header information
double startTime=getTime();
int32_t nInput, nOutput;
int32_t nExamples;
fread(&nInput,sizeof(int32_t), 1, fn);
if(nInput != N_INPUT) {
fprintf(stderr,"Number of inputs incorrect!\n");
exit(1);
}
fread(&nOutput,sizeof(int32_t), 1, fn);
if(nOutput != N_OUTPUT) {
fprintf(stderr,"Number of outputs incorrect!\n");
exit(1);
}
fread(&nExamples,sizeof(int32_t), 1, fn);
if(nExamples <= 0) {
fprintf(stderr,"Number of examples incorrect!\n");
exit(1);
}
uData->nExamples = nExamples;
// aligned allocation of the data
uData->example=(float*) memalign(64,nExamples*EXAMPLE_SIZE*sizeof(float));
if(!uData->example) {
fprintf(stderr,"Not enough memory for examples!\n");
exit(1);
}
// read the data
for(int exIndex=0; exIndex < uData->nExamples; exIndex++) {
for(int i=0; i < nInput; i++)
fread(&uData->example[IN(i,uData->nExamples, exIndex)],1, sizeof(float), fn);
for(int i=0; i < nOutput; i++)
fread(&uData->example[OUT(i,uData->nExamples, exIndex)],1, sizeof(float), fn);
}
double startOffload=getTime();
__declspec(align(64)) float * restrict example = uData->example;
int Xsiz = uData->nExamples*EXAMPLE_SIZE; // single variable works around a compiler bug
#pragma offload target(mic:MIC_DEV) in(example: length(Xsiz) ALLOC)
{}
uData->timeDataLoad = getTime() - startTime;
if(fn!=stdin) fclose(fn);
}
|
pi-v8.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
#if _DEBUG_
#define _DEBUG_ 1
#else
#define _DEBUG_ 0
#include "extrae_user_events.h"
#define PROGRAM 1000
#define PI_COMPUTATION 1
#define END 0
#endif
int main(int argc, char *argv[]) {
double x, sum=0.0, pi=0.0;
#if _DEBUG_
double start,end;
#endif
int i;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
int num_steps = atoi(argv[1]);
double step = 1.0/(double) num_steps;
#if _DEBUG_
start= omp_get_wtime();
#else
Extrae_event (PROGRAM, PI_COMPUTATION);
#endif
/* do computation -- using all available threads */
// WARNING : correct code
#pragma omp parallel private(i,x) reduction(+:sum)
{
#if _DEBUG_
int id = omp_get_thread_num();
#endif
#pragma omp for schedule(static,1)
for (i=0; i < num_steps; i++) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
#if _DEBUG_
printf("thread id:%d it:%d\n",id,i);
#endif
}
}
pi = step * sum;
#if _DEBUG_
end = omp_get_wtime();
printf("Wall clock execution time = %.9f seconds\n", end-start);
#else
Extrae_event (PROGRAM, END);
#endif
/* print results */
printf("Value of pi = %12.10f\n", pi);
return EXIT_SUCCESS;
}
|
dcraw.c | #ifndef IGNOREALL
/*
dcraw.c -- Dave Coffin's raw photo decoder
Copyright 1997-2015 by Dave Coffin, dcoffin a cybercom o net
This is a command-line ANSI C program to convert raw photos from
any digital camera on any computer running any operating system.
No license is required to download and use dcraw.c. However,
to lawfully redistribute dcraw, you must either (a) offer, at
no extra charge, full source code* for all executable files
containing RESTRICTED functions, (b) distribute this code under
the GPL Version 2 or later, (c) remove all RESTRICTED functions,
re-implement them, or copy them from an earlier, unrestricted
Revision of dcraw.c, or (d) purchase a license from the author.
The functions that process Foveon images have been RESTRICTED
since Revision 1.237. All other code remains free for all uses.
*If you have not modified dcraw.c in any way, a link to my
homepage qualifies as "full source code".
$Revision: 1.476 $
$Date: 2015/05/25 02:29:14 $
*/
/*@out DEFINES
#ifndef USE_JPEG
#define NO_JPEG
#endif
#ifndef USE_JASPER
#define NO_JASPER
#endif
@end DEFINES */
#define NO_LCMS
#define DCRAW_VERBOSE
//@out DEFINES
#define DCRAW_VERSION "9.26"
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#define _USE_MATH_DEFINES
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <float.h>
#include <limits.h>
#include <math.h>
#include <setjmp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <sys/types.h>
//@end DEFINES
#if defined(DJGPP) || defined(__MINGW32__)
#define fseeko fseek
#define ftello ftell
#else
#define fgetc getc_unlocked
#endif
//@out DEFINES
#ifdef __CYGWIN__
#include <io.h>
#endif
#if defined WIN32 || defined(__MINGW32__)
#include <sys/utime.h>
#include <winsock2.h>
#pragma comment(lib, "ws2_32.lib")
#define snprintf _snprintf
#define strcasecmp stricmp
#define strncasecmp strnicmp
//@end DEFINES
typedef __int64 INT64;
typedef unsigned __int64 UINT64;
//@out DEFINES
#else
#include <unistd.h>
#include <utime.h>
#include <netinet/in.h>
typedef long long INT64;
typedef unsigned long long UINT64;
#endif
#ifdef NODEPS
#define NO_JASPER
#define NO_JPEG
#define NO_LCMS
#endif
#ifndef NO_JASPER
#include <jasper/jasper.h> /* Decode Red camera movies */
#endif
#ifndef NO_JPEG
#include <jpeglib.h> /* Decode compressed Kodak DC120 photos */
#endif /* and Adobe Lossy DNGs */
#ifndef NO_LCMS
#ifdef USE_LCMS
#include <lcms.h> /* Support color profiles */
#else
#include <lcms2.h> /* Support color profiles */
#endif
#endif
#ifdef LOCALEDIR
#include <libintl.h>
#define _(String) gettext(String)
#else
#define _(String) (String)
#endif
#ifdef LJPEG_DECODE
#error Please compile dcraw.c by itself.
#error Do not link it with ljpeg_decode.
#endif
#ifndef LONG_BIT
#define LONG_BIT (8 * sizeof(long))
#endif
//@end DEFINES
#if !defined(uchar)
#define uchar unsigned char
#endif
#if !defined(ushort)
#define ushort unsigned short
#endif
/*
All global variables are defined here, and all functions that
access them are prefixed with "CLASS". Note that a thread-safe
C++ class cannot have non-const static local variables.
*/
FILE *ifp, *ofp;
short order;
const char *ifname;
char *meta_data, xtrans[6][6], xtrans_abs[6][6];
char cdesc[5], desc[512], make[64], model[64], model2[64], artist[64], software[64];
float flash_used, canon_ev, iso_speed, shutter, aperture, focal_len;
time_t timestamp;
off_t strip_offset, data_offset;
off_t thumb_offset, meta_offset, profile_offset;
unsigned shot_order, kodak_cbpp, exif_cfa, unique_id;
unsigned thumb_length, meta_length, profile_length;
unsigned thumb_misc, *oprof, fuji_layout, shot_select = 0, multi_out = 0;
unsigned tiff_nifds, tiff_samples, tiff_bps, tiff_compress;
unsigned black, maximum, mix_green, raw_color, zero_is_bad;
unsigned zero_after_ff, is_raw, dng_version, is_foveon, data_error;
unsigned tile_width, tile_length, gpsdata[32], load_flags;
unsigned flip, tiff_flip, filters, colors;
ushort raw_height, raw_width, height, width, top_margin, left_margin;
ushort shrink, iheight, iwidth, fuji_width, thumb_width, thumb_height;
ushort *raw_image, (*image)[4], cblack[4102];
ushort white[8][8], curve[0x10000], cr2_slice[3], sraw_mul[4];
double pixel_aspect, aber[4] = {1, 1, 1, 1}, gamm[6] = {0.45, 4.5, 0, 0, 0, 0};
float bright = 1, user_mul[4] = {0, 0, 0, 0}, threshold = 0;
int mask[8][4];
int half_size = 0, four_color_rgb = 0, document_mode = 0, highlight = 0;
int verbose = 0, use_auto_wb = 0, use_camera_wb = 0, use_camera_matrix = 1;
int output_color = 1, output_bps = 8, output_tiff = 0, med_passes = 0;
int no_auto_bright = 0;
unsigned greybox[4] = {0, 0, UINT_MAX, UINT_MAX};
float cam_mul[4], pre_mul[4], cmatrix[3][4], rgb_cam[3][4];
const double xyz_rgb[3][3] = {/* XYZ from RGB */
{0.412453, 0.357580, 0.180423},
{0.212671, 0.715160, 0.072169},
{0.019334, 0.119193, 0.950227}};
const float d65_white[3] = {0.950456, 1, 1.088754};
int histogram[4][0x2000];
void (*write_thumb)(), (*write_fun)();
void (*load_raw)(), (*thumb_load_raw)();
jmp_buf failure;
struct decode
{
struct decode *branch[2];
int leaf;
} first_decode[2048], *second_decode, *free_decode;
struct tiff_ifd
{
int t_width, t_height, bps, comp, phint, offset, t_flip, samples, bytes;
int t_tile_width, t_tile_length, sample_format, predictor;
float t_shutter;
} tiff_ifd[10];
struct ph1
{
int format, key_off, tag_21a;
int t_black, split_col, black_col, split_row, black_row;
float tag_210;
} ph1;
#define CLASS
//@out DEFINES
#define FORC(cnt) for (c = 0; c < cnt; c++)
#define FORC3 FORC(3)
#define FORC4 FORC(4)
#define FORCC for (c = 0; c < colors && c < 4; c++)
#define SQR(x) ((x) * (x))
#define ABS(x) (((int)(x) ^ ((int)(x) >> 31)) - ((int)(x) >> 31))
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define LIM(x, min, max) MAX(min, MIN(x, max))
#define ULIM(x, y, z) ((y) < (z) ? LIM(x, y, z) : LIM(x, z, y))
#define CLIP(x) LIM((int)(x), 0, 65535)
#define CLIP15(x) LIM((int)(x), 0, 32767)
#define SWAP(a, b) \
{ \
a = a + b; \
b = a - b; \
a = a - b; \
}
#define my_swap(type, i, j) \
{ \
type t = i; \
i = j; \
j = t; \
}
static float fMAX(float a, float b) { return MAX(a, b); }
/*
In order to inline this calculation, I make the risky
assumption that all filter patterns can be described
by a repeating pattern of eight rows and two columns
Do not use the FC or BAYER macros with the Leaf CatchLight,
because its pattern is 16x16, not 2x8.
Return values are either 0/1/2/3 = G/M/C/Y or 0/1/2/3 = R/G1/B/G2
PowerShot 600 PowerShot A50 PowerShot Pro70 Pro90 & G1
0xe1e4e1e4: 0x1b4e4b1e: 0x1e4b4e1b: 0xb4b4b4b4:
0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5
0 G M G M G M 0 C Y C Y C Y 0 Y C Y C Y C 0 G M G M G M
1 C Y C Y C Y 1 M G M G M G 1 M G M G M G 1 Y C Y C Y C
2 M G M G M G 2 Y C Y C Y C 2 C Y C Y C Y
3 C Y C Y C Y 3 G M G M G M 3 G M G M G M
4 C Y C Y C Y 4 Y C Y C Y C
PowerShot A5 5 G M G M G M 5 G M G M G M
0x1e4e1e4e: 6 Y C Y C Y C 6 C Y C Y C Y
7 M G M G M G 7 M G M G M G
0 1 2 3 4 5
0 C Y C Y C Y
1 G M G M G M
2 C Y C Y C Y
3 M G M G M G
All RGB cameras use one of these Bayer grids:
0x16161616: 0x61616161: 0x49494949: 0x94949494:
0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5
0 B G B G B G 0 G R G R G R 0 G B G B G B 0 R G R G R G
1 G R G R G R 1 B G B G B G 1 R G R G R G 1 G B G B G B
2 B G B G B G 2 G R G R G R 2 G B G B G B 2 R G R G R G
3 G R G R G R 3 B G B G B G 3 R G R G R G 3 G B G B G B
*/
#define RAWINDEX(row, col) ((row)*raw_width + (col))
#define RAW(row, col) raw_image[(row)*raw_width + (col)]
//@end DEFINES
#define FC(row, col) (filters >> ((((row) << 1 & 14) + ((col)&1)) << 1) & 3)
//@out DEFINES
#define BAYER(row, col) image[((row) >> shrink) * iwidth + ((col) >> shrink)][FC(row, col)]
#define BAYER2(row, col) image[((row) >> shrink) * iwidth + ((col) >> shrink)][fcol(row, col)]
//@end DEFINES
/* @out COMMON
#include <math.h>
#define CLASS LibRaw::
#include "libraw/libraw_types.h"
#define LIBRAW_LIBRARY_BUILD
#define LIBRAW_IO_REDEFINED
#include "libraw/libraw.h"
#include "internal/defines.h"
#include "internal/var_defines.h"
@end COMMON */
//@out COMMON
int CLASS fcol(int row, int col)
{
static const char filter[16][16] = {
{2, 1, 1, 3, 2, 3, 2, 0, 3, 2, 3, 0, 1, 2, 1, 0}, {0, 3, 0, 2, 0, 1, 3, 1, 0, 1, 1, 2, 0, 3, 3, 2},
{2, 3, 3, 2, 3, 1, 1, 3, 3, 1, 2, 1, 2, 0, 0, 3}, {0, 1, 0, 1, 0, 2, 0, 2, 2, 0, 3, 0, 1, 3, 2, 1},
{3, 1, 1, 2, 0, 1, 0, 2, 1, 3, 1, 3, 0, 1, 3, 0}, {2, 0, 0, 3, 3, 2, 3, 1, 2, 0, 2, 0, 3, 2, 2, 1},
{2, 3, 3, 1, 2, 1, 2, 1, 2, 1, 1, 2, 3, 0, 0, 1}, {1, 0, 0, 2, 3, 0, 0, 3, 0, 3, 0, 3, 2, 1, 2, 3},
{2, 3, 3, 1, 1, 2, 1, 0, 3, 2, 3, 0, 2, 3, 1, 3}, {1, 0, 2, 0, 3, 0, 3, 2, 0, 1, 1, 2, 0, 1, 0, 2},
{0, 1, 1, 3, 3, 2, 2, 1, 1, 3, 3, 0, 2, 1, 3, 2}, {2, 3, 2, 0, 0, 1, 3, 0, 2, 0, 1, 2, 3, 0, 1, 0},
{1, 3, 1, 2, 3, 2, 3, 2, 0, 2, 0, 1, 1, 0, 3, 0}, {0, 2, 0, 3, 1, 0, 0, 1, 1, 3, 3, 2, 3, 2, 2, 1},
{2, 1, 3, 2, 3, 1, 2, 1, 0, 3, 0, 2, 0, 2, 0, 2}, {0, 3, 1, 0, 0, 2, 0, 3, 2, 1, 3, 1, 1, 3, 1, 3}};
if (filters == 1)
return filter[(row + top_margin) & 15][(col + left_margin) & 15];
if (filters == 9)
return xtrans[(row + 6) % 6][(col + 6) % 6];
return FC(row, col);
}
#if !defined(__FreeBSD__)
static size_t local_strnlen(const char *s, size_t n)
{
const char *p = (const char *)memchr(s, 0, n);
return (p ? p - s : n);
}
/* add OS X version check here ?? */
#define strnlen(a, b) local_strnlen(a, b)
#endif
#ifdef LIBRAW_LIBRARY_BUILD
static int Fuji_wb_list1[] = {LIBRAW_WBI_FineWeather, LIBRAW_WBI_Shade, LIBRAW_WBI_FL_D,
LIBRAW_WBI_FL_L, LIBRAW_WBI_FL_W, LIBRAW_WBI_Tungsten};
static int nFuji_wb_list1 = sizeof(Fuji_wb_list1) / sizeof(int);
static int FujiCCT_K[31] = {2500, 2550, 2650, 2700, 2800, 2850, 2950, 3000, 3100, 3200, 3300,
3400, 3600, 3700, 3800, 4000, 4200, 4300, 4500, 4800, 5000, 5300,
5600, 5900, 6300, 6700, 7100, 7700, 8300, 9100, 10000};
static int Fuji_wb_list2[] = {LIBRAW_WBI_Auto, 0, LIBRAW_WBI_Custom, 6, LIBRAW_WBI_FineWeather, 1,
LIBRAW_WBI_Shade, 8, LIBRAW_WBI_FL_D, 10, LIBRAW_WBI_FL_L, 11,
LIBRAW_WBI_FL_W, 12, LIBRAW_WBI_Tungsten, 2, LIBRAW_WBI_Underwater, 35,
LIBRAW_WBI_Ill_A, 82, LIBRAW_WBI_D65, 83};
static int nFuji_wb_list2 = sizeof(Fuji_wb_list2) / sizeof(int);
static int Oly_wb_list1[] = {LIBRAW_WBI_Shade, LIBRAW_WBI_Cloudy, LIBRAW_WBI_FineWeather,
LIBRAW_WBI_Tungsten, LIBRAW_WBI_Sunset, LIBRAW_WBI_FL_D,
LIBRAW_WBI_FL_N, LIBRAW_WBI_FL_W, LIBRAW_WBI_FL_WW};
static int Oly_wb_list2[] = {LIBRAW_WBI_Auto,
0,
LIBRAW_WBI_Tungsten,
3000,
0x100,
3300,
0x100,
3600,
0x100,
3900,
LIBRAW_WBI_FL_W,
4000,
0x100,
4300,
LIBRAW_WBI_FL_D,
4500,
0x100,
4800,
LIBRAW_WBI_FineWeather,
5300,
LIBRAW_WBI_Cloudy,
6000,
LIBRAW_WBI_FL_N,
6600,
LIBRAW_WBI_Shade,
7500,
LIBRAW_WBI_Custom1,
0,
LIBRAW_WBI_Custom2,
0,
LIBRAW_WBI_Custom3,
0,
LIBRAW_WBI_Custom4,
0};
static int Pentax_wb_list1[] = {LIBRAW_WBI_Daylight, LIBRAW_WBI_Shade, LIBRAW_WBI_Cloudy, LIBRAW_WBI_Tungsten,
LIBRAW_WBI_FL_D, LIBRAW_WBI_FL_N, LIBRAW_WBI_FL_W, LIBRAW_WBI_Flash};
static int Pentax_wb_list2[] = {LIBRAW_WBI_Daylight, LIBRAW_WBI_Shade, LIBRAW_WBI_Cloudy,
LIBRAW_WBI_Tungsten, LIBRAW_WBI_FL_D, LIBRAW_WBI_FL_N,
LIBRAW_WBI_FL_W, LIBRAW_WBI_Flash, LIBRAW_WBI_FL_L};
static int nPentax_wb_list2 = sizeof(Pentax_wb_list2) / sizeof(int);
static int stread(char *buf, size_t len, LibRaw_abstract_datastream *fp)
{
if(len>0)
{
int r = fp->read(buf, len, 1);
buf[len - 1] = 0;
return r;
}
else
return 0;
}
#define stmread(buf, maxlen, fp) stread(buf, MIN(maxlen, sizeof(buf)), fp)
#endif
#if !defined(__GLIBC__) && !defined(__FreeBSD__)
char *my_memmem(char *haystack, size_t haystacklen, char *needle, size_t needlelen)
{
char *c;
for (c = haystack; c <= haystack + haystacklen - needlelen; c++)
if (!memcmp(c, needle, needlelen))
return c;
return 0;
}
#define memmem my_memmem
char *my_strcasestr(char *haystack, const char *needle)
{
char *c;
for (c = haystack; *c; c++)
if (!strncasecmp(c, needle, strlen(needle)))
return c;
return 0;
}
#define strcasestr my_strcasestr
#endif
#define strbuflen(buf) strnlen(buf, sizeof(buf) - 1)
//@end COMMON
void CLASS merror(void *ptr, const char *where)
{
if (ptr)
return;
fprintf(stderr, _("%s: Out of memory in %s\n"), ifname, where);
longjmp(failure, 1);
}
void CLASS derror()
{
if (!data_error)
{
fprintf(stderr, "%s: ", ifname);
if (feof(ifp))
fprintf(stderr, _("Unexpected end of file\n"));
else
fprintf(stderr, _("Corrupt data near 0x%llx\n"), (INT64)ftello(ifp));
}
data_error++;
}
//@out COMMON
ushort CLASS sget2(uchar *s)
{
if (order == 0x4949) /* "II" means little-endian */
return s[0] | s[1] << 8;
else /* "MM" means big-endian */
return s[0] << 8 | s[1];
}
// DNG was written by:
#define nonDNG 0
#define CameraDNG 1
#define AdobeDNG 2
#ifdef LIBRAW_LIBRARY_BUILD
static int getwords(char *line, char *words[], int maxwords, int maxlen)
{
line[maxlen - 1] = 0;
char *p = line;
int nwords = 0;
while (1)
{
while (isspace(*p))
p++;
if (*p == '\0')
return nwords;
words[nwords++] = p;
while (!isspace(*p) && *p != '\0')
p++;
if (*p == '\0')
return nwords;
*p++ = '\0';
if (nwords >= maxwords)
return nwords;
}
}
static ushort saneSonyCameraInfo(uchar a, uchar b, uchar c, uchar d, uchar e, uchar f)
{
if ((a >> 4) > 9)
return 0;
else if ((a & 0x0f) > 9)
return 0;
else if ((b >> 4) > 9)
return 0;
else if ((b & 0x0f) > 9)
return 0;
else if ((c >> 4) > 9)
return 0;
else if ((c & 0x0f) > 9)
return 0;
else if ((d >> 4) > 9)
return 0;
else if ((d & 0x0f) > 9)
return 0;
else if ((e >> 4) > 9)
return 0;
else if ((e & 0x0f) > 9)
return 0;
else if ((f >> 4) > 9)
return 0;
else if ((f & 0x0f) > 9)
return 0;
return 1;
}
static ushort bcd2dec(uchar data)
{
if ((data >> 4) > 9)
return 0;
else if ((data & 0x0f) > 9)
return 0;
else
return (data >> 4) * 10 + (data & 0x0f);
}
static uchar SonySubstitution[257] =
"\x00\x01\x32\xb1\x0a\x0e\x87\x28\x02\xcc\xca\xad\x1b\xdc\x08\xed\x64\x86\xf0\x4f\x8c\x6c\xb8\xcb\x69\xc4\x2c\x03"
"\x97\xb6\x93\x7c\x14\xf3\xe2\x3e\x30\x8e\xd7\x60\x1c\xa1\xab\x37\xec\x75\xbe\x23\x15\x6a\x59\x3f\xd0\xb9\x96\xb5"
"\x50\x27\x88\xe3\x81\x94\xe0\xc0\x04\x5c\xc6\xe8\x5f\x4b\x70\x38\x9f\x82\x80\x51\x2b\xc5\x45\x49\x9b\x21\x52\x53"
"\x54\x85\x0b\x5d\x61\xda\x7b\x55\x26\x24\x07\x6e\x36\x5b\x47\xb7\xd9\x4a\xa2\xdf\xbf\x12\x25\xbc\x1e\x7f\x56\xea"
"\x10\xe6\xcf\x67\x4d\x3c\x91\x83\xe1\x31\xb3\x6f\xf4\x05\x8a\x46\xc8\x18\x76\x68\xbd\xac\x92\x2a\x13\xe9\x0f\xa3"
"\x7a\xdb\x3d\xd4\xe7\x3a\x1a\x57\xaf\x20\x42\xb2\x9e\xc3\x8b\xf2\xd5\xd3\xa4\x7e\x1f\x98\x9c\xee\x74\xa5\xa6\xa7"
"\xd8\x5e\xb0\xb4\x34\xce\xa8\x79\x77\x5a\xc1\x89\xae\x9a\x11\x33\x9d\xf5\x39\x19\x65\x78\x16\x71\xd2\xa9\x44\x63"
"\x40\x29\xba\xa0\x8f\xe4\xd6\x3b\x84\x0d\xc2\x4e\x58\xdd\x99\x22\x6b\xc9\xbb\x17\x06\xe5\x7d\x66\x43\x62\xf6\xcd"
"\x35\x90\x2e\x41\x8d\x6d\xaa\x09\x73\x95\x0c\xf1\x1d\xde\x4c\x2f\x2d\xf7\xd1\x72\xeb\xef\x48\xc7\xf8\xf9\xfa\xfb"
"\xfc\xfd\xfe\xff";
ushort CLASS sget2Rev(uchar *s) // specific to some Canon Makernotes fields, where they have endian in reverse
{
if (order == 0x4d4d) /* "II" means little-endian, and we reverse to "MM" - big endian */
return s[0] | s[1] << 8;
else /* "MM" means big-endian... */
return s[0] << 8 | s[1];
}
#endif
ushort CLASS get2()
{
uchar str[2] = {0xff, 0xff};
fread(str, 1, 2, ifp);
return sget2(str);
}
unsigned CLASS sget4(uchar *s)
{
if (order == 0x4949)
return s[0] | s[1] << 8 | s[2] << 16 | s[3] << 24;
else
return s[0] << 24 | s[1] << 16 | s[2] << 8 | s[3];
}
#define sget4(s) sget4((uchar *)s)
unsigned CLASS get4()
{
uchar str[4] = {0xff, 0xff, 0xff, 0xff};
fread(str, 1, 4, ifp);
return sget4(str);
}
unsigned CLASS getint(int type) { return type == 3 ? get2() : get4(); }
float CLASS int_to_float(int i)
{
union {
int i;
float f;
} u;
u.i = i;
return u.f;
}
double CLASS getreal(int type)
{
union {
char c[8];
double d;
} u, v;
int i, rev;
switch (type)
{
case 3:
return (unsigned short)get2();
case 4:
return (unsigned int)get4();
case 5:
u.d = (unsigned int)get4();
v.d = (unsigned int)get4();
return u.d / (v.d ? v.d : 1);
case 8:
return (signed short)get2();
case 9:
return (signed int)get4();
case 10:
u.d = (signed int)get4();
v.d = (signed int)get4();
return u.d / (v.d ? v.d : 1);
case 11:
return int_to_float(get4());
case 12:
rev = 7 * ((order == 0x4949) == (ntohs(0x1234) == 0x1234));
for (i = 0; i < 8; i++)
u.c[i ^ rev] = fgetc(ifp);
return u.d;
default:
return fgetc(ifp);
}
}
void CLASS read_shorts(ushort *pixel, unsigned count)
{
if (fread(pixel, 2, count, ifp) < count)
derror();
if ((order == 0x4949) == (ntohs(0x1234) == 0x1234))
swab((char *)pixel, (char *)pixel, count * 2);
}
void CLASS cubic_spline(const int *x_, const int *y_, const int len)
{
float **A, *b, *c, *d, *x, *y;
int i, j;
A = (float **)calloc(((2 * len + 4) * sizeof **A + sizeof *A), 2 * len);
if (!A)
return;
A[0] = (float *)(A + 2 * len);
for (i = 1; i < 2 * len; i++)
A[i] = A[0] + 2 * len * i;
y = len + (x = i + (d = i + (c = i + (b = A[0] + i * i))));
for (i = 0; i < len; i++)
{
x[i] = x_[i] / 65535.0;
y[i] = y_[i] / 65535.0;
}
for (i = len - 1; i > 0; i--)
{
b[i] = (y[i] - y[i - 1]) / (x[i] - x[i - 1]);
d[i - 1] = x[i] - x[i - 1];
}
for (i = 1; i < len - 1; i++)
{
A[i][i] = 2 * (d[i - 1] + d[i]);
if (i > 1)
{
A[i][i - 1] = d[i - 1];
A[i - 1][i] = d[i - 1];
}
A[i][len - 1] = 6 * (b[i + 1] - b[i]);
}
for (i = 1; i < len - 2; i++)
{
float v = A[i + 1][i] / A[i][i];
for (j = 1; j <= len - 1; j++)
A[i + 1][j] -= v * A[i][j];
}
for (i = len - 2; i > 0; i--)
{
float acc = 0;
for (j = i; j <= len - 2; j++)
acc += A[i][j] * c[j];
c[i] = (A[i][len - 1] - acc) / A[i][i];
}
for (i = 0; i < 0x10000; i++)
{
float x_out = (float)(i / 65535.0);
float y_out = 0;
for (j = 0; j < len - 1; j++)
{
if (x[j] <= x_out && x_out <= x[j + 1])
{
float v = x_out - x[j];
y_out = y[j] + ((y[j + 1] - y[j]) / d[j] - (2 * d[j] * c[j] + c[j + 1] * d[j]) / 6) * v + (c[j] * 0.5) * v * v +
((c[j + 1] - c[j]) / (6 * d[j])) * v * v * v;
}
}
curve[i] = y_out < 0.0 ? 0 : (y_out >= 1.0 ? 65535 : (ushort)(y_out * 65535.0 + 0.5));
}
free(A);
}
void CLASS canon_600_fixed_wb(int temp)
{
static const short mul[4][5] = {
{667, 358, 397, 565, 452}, {731, 390, 367, 499, 517}, {1119, 396, 348, 448, 537}, {1399, 485, 431, 508, 688}};
int lo, hi, i;
float frac = 0;
for (lo = 4; --lo;)
if (*mul[lo] <= temp)
break;
for (hi = 0; hi < 3; hi++)
if (*mul[hi] >= temp)
break;
if (lo != hi)
frac = (float)(temp - *mul[lo]) / (*mul[hi] - *mul[lo]);
for (i = 1; i < 5; i++)
pre_mul[i - 1] = 1 / (frac * mul[hi][i] + (1 - frac) * mul[lo][i]);
}
/* Return values: 0 = white 1 = near white 2 = not white */
int CLASS canon_600_color(int ratio[2], int mar)
{
int clipped = 0, target, miss;
if (flash_used)
{
if (ratio[1] < -104)
{
ratio[1] = -104;
clipped = 1;
}
if (ratio[1] > 12)
{
ratio[1] = 12;
clipped = 1;
}
}
else
{
if (ratio[1] < -264 || ratio[1] > 461)
return 2;
if (ratio[1] < -50)
{
ratio[1] = -50;
clipped = 1;
}
if (ratio[1] > 307)
{
ratio[1] = 307;
clipped = 1;
}
}
target = flash_used || ratio[1] < 197 ? -38 - (398 * ratio[1] >> 10) : -123 + (48 * ratio[1] >> 10);
if (target - mar <= ratio[0] && target + 20 >= ratio[0] && !clipped)
return 0;
miss = target - ratio[0];
if (abs(miss) >= mar * 4)
return 2;
if (miss < -20)
miss = -20;
if (miss > mar)
miss = mar;
ratio[0] = target - miss;
return 1;
}
void CLASS canon_600_auto_wb()
{
int mar, row, col, i, j, st, count[] = {0, 0};
int test[8], total[2][8], ratio[2][2], stat[2];
memset(&total, 0, sizeof total);
i = canon_ev + 0.5;
if (i < 10)
mar = 150;
else if (i > 12)
mar = 20;
else
mar = 280 - 20 * i;
if (flash_used)
mar = 80;
for (row = 14; row < height - 14; row += 4)
for (col = 10; col < width; col += 2)
{
for (i = 0; i < 8; i++)
test[(i & 4) + FC(row + (i >> 1), col + (i & 1))] = BAYER(row + (i >> 1), col + (i & 1));
for (i = 0; i < 8; i++)
if (test[i] < 150 || test[i] > 1500)
goto next;
for (i = 0; i < 4; i++)
if (abs(test[i] - test[i + 4]) > 50)
goto next;
for (i = 0; i < 2; i++)
{
for (j = 0; j < 4; j += 2)
ratio[i][j >> 1] = ((test[i * 4 + j + 1] - test[i * 4 + j]) << 10) / test[i * 4 + j];
stat[i] = canon_600_color(ratio[i], mar);
}
if ((st = stat[0] | stat[1]) > 1)
goto next;
for (i = 0; i < 2; i++)
if (stat[i])
for (j = 0; j < 2; j++)
test[i * 4 + j * 2 + 1] = test[i * 4 + j * 2] * (0x400 + ratio[i][j]) >> 10;
for (i = 0; i < 8; i++)
total[st][i] += test[i];
count[st]++;
next:;
}
if (count[0] | count[1])
{
st = count[0] * 200 < count[1];
for (i = 0; i < 4; i++)
pre_mul[i] = 1.0 / (total[st][i] + total[st][i + 4]);
}
}
void CLASS canon_600_coeff()
{
static const short table[6][12] = {{-190, 702, -1878, 2390, 1861, -1349, 905, -393, -432, 944, 2617, -2105},
{-1203, 1715, -1136, 1648, 1388, -876, 267, 245, -1641, 2153, 3921, -3409},
{-615, 1127, -1563, 2075, 1437, -925, 509, 3, -756, 1268, 2519, -2007},
{-190, 702, -1886, 2398, 2153, -1641, 763, -251, -452, 964, 3040, -2528},
{-190, 702, -1878, 2390, 1861, -1349, 905, -393, -432, 944, 2617, -2105},
{-807, 1319, -1785, 2297, 1388, -876, 769, -257, -230, 742, 2067, -1555}};
int t = 0, i, c;
float mc, yc;
mc = pre_mul[1] / pre_mul[2];
yc = pre_mul[3] / pre_mul[2];
if (mc > 1 && mc <= 1.28 && yc < 0.8789)
t = 1;
if (mc > 1.28 && mc <= 2)
{
if (yc < 0.8789)
t = 3;
else if (yc <= 2)
t = 4;
}
if (flash_used)
t = 5;
for (raw_color = i = 0; i < 3; i++)
FORCC rgb_cam[i][c] = table[t][i * 4 + c] / 1024.0;
}
void CLASS canon_600_load_raw()
{
uchar data[1120], *dp;
ushort *pix;
int irow, row;
for (irow = row = 0; irow < height; irow++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread(data, 1, 1120, ifp) < 1120)
derror();
pix = raw_image + row * raw_width;
for (dp = data; dp < data + 1120; dp += 10, pix += 8)
{
pix[0] = (dp[0] << 2) + (dp[1] >> 6);
pix[1] = (dp[2] << 2) + (dp[1] >> 4 & 3);
pix[2] = (dp[3] << 2) + (dp[1] >> 2 & 3);
pix[3] = (dp[4] << 2) + (dp[1] & 3);
pix[4] = (dp[5] << 2) + (dp[9] & 3);
pix[5] = (dp[6] << 2) + (dp[9] >> 2 & 3);
pix[6] = (dp[7] << 2) + (dp[9] >> 4 & 3);
pix[7] = (dp[8] << 2) + (dp[9] >> 6);
}
if ((row += 2) > height)
row = 1;
}
}
void CLASS canon_600_correct()
{
int row, col, val;
static const short mul[4][2] = {{1141, 1145}, {1128, 1109}, {1178, 1149}, {1128, 1109}};
for (row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < width; col++)
{
if ((val = BAYER(row, col) - black) < 0)
val = 0;
val = val * mul[row & 3][col & 1] >> 9;
BAYER(row, col) = val;
}
}
canon_600_fixed_wb(1311);
canon_600_auto_wb();
canon_600_coeff();
maximum = (0x3ff - black) * 1109 >> 9;
black = 0;
}
int CLASS canon_s2is()
{
unsigned row;
for (row = 0; row < 100; row++)
{
fseek(ifp, row * 3340 + 3284, SEEK_SET);
if (getc(ifp) > 15)
return 1;
}
return 0;
}
unsigned CLASS getbithuff(int nbits, ushort *huff)
{
#ifdef LIBRAW_NOTHREADS
static unsigned bitbuf = 0;
static int vbits = 0, reset = 0;
#else
#define bitbuf tls->getbits.bitbuf
#define vbits tls->getbits.vbits
#define reset tls->getbits.reset
#endif
unsigned c;
if (nbits > 25)
return 0;
if (nbits < 0)
return bitbuf = vbits = reset = 0;
if (nbits == 0 || vbits < 0)
return 0;
while (!reset && vbits < nbits && (c = fgetc(ifp)) != EOF && !(reset = zero_after_ff && c == 0xff && fgetc(ifp)))
{
bitbuf = (bitbuf << 8) + (uchar)c;
vbits += 8;
}
c = bitbuf << (32 - vbits) >> (32 - nbits);
if (huff)
{
vbits -= huff[c] >> 8;
c = (uchar)huff[c];
}
else
vbits -= nbits;
if (vbits < 0)
derror();
return c;
#ifndef LIBRAW_NOTHREADS
#undef bitbuf
#undef vbits
#undef reset
#endif
}
#define getbits(n) getbithuff(n, 0)
#define gethuff(h) getbithuff(*h, h + 1)
/*
Construct a decode tree according the specification in *source.
The first 16 bytes specify how many codes should be 1-bit, 2-bit
3-bit, etc. Bytes after that are the leaf values.
For example, if the source is
{ 0,1,4,2,3,1,2,0,0,0,0,0,0,0,0,0,
0x04,0x03,0x05,0x06,0x02,0x07,0x01,0x08,0x09,0x00,0x0a,0x0b,0xff },
then the code is
00 0x04
010 0x03
011 0x05
100 0x06
101 0x02
1100 0x07
1101 0x01
11100 0x08
11101 0x09
11110 0x00
111110 0x0a
1111110 0x0b
1111111 0xff
*/
ushort *CLASS make_decoder_ref(const uchar **source)
{
int max, len, h, i, j;
const uchar *count;
ushort *huff;
count = (*source += 16) - 17;
for (max = 16; max && !count[max]; max--)
;
huff = (ushort *)calloc(1 + (1 << max), sizeof *huff);
merror(huff, "make_decoder()");
huff[0] = max;
for (h = len = 1; len <= max; len++)
for (i = 0; i < count[len]; i++, ++*source)
for (j = 0; j < 1 << (max - len); j++)
if (h <= 1 << max)
huff[h++] = len << 8 | **source;
return huff;
}
ushort *CLASS make_decoder(const uchar *source) { return make_decoder_ref(&source); }
void CLASS crw_init_tables(unsigned table, ushort *huff[2])
{
static const uchar first_tree[3][29] = {
{0, 1, 4, 2, 3, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0x04, 0x03, 0x05, 0x06, 0x02, 0x07, 0x01, 0x08, 0x09, 0x00, 0x0a, 0x0b, 0xff},
{0, 2, 2, 3, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0,
0, 0x03, 0x02, 0x04, 0x01, 0x05, 0x00, 0x06, 0x07, 0x09, 0x08, 0x0a, 0x0b, 0xff},
{0, 0, 6, 3, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0x06, 0x05, 0x07, 0x04, 0x08, 0x03, 0x09, 0x02, 0x00, 0x0a, 0x01, 0x0b, 0xff},
};
static const uchar second_tree[3][180] = {
{0, 2, 2, 2, 1, 4, 2, 1, 2, 5, 1, 1, 0, 0, 0, 139, 0x03, 0x04,
0x02, 0x05, 0x01, 0x06, 0x07, 0x08, 0x12, 0x13, 0x11, 0x14, 0x09, 0x15, 0x22, 0x00, 0x21, 0x16, 0x0a, 0xf0,
0x23, 0x17, 0x24, 0x31, 0x32, 0x18, 0x19, 0x33, 0x25, 0x41, 0x34, 0x42, 0x35, 0x51, 0x36, 0x37, 0x38, 0x29,
0x79, 0x26, 0x1a, 0x39, 0x56, 0x57, 0x28, 0x27, 0x52, 0x55, 0x58, 0x43, 0x76, 0x59, 0x77, 0x54, 0x61, 0xf9,
0x71, 0x78, 0x75, 0x96, 0x97, 0x49, 0xb7, 0x53, 0xd7, 0x74, 0xb6, 0x98, 0x47, 0x48, 0x95, 0x69, 0x99, 0x91,
0xfa, 0xb8, 0x68, 0xb5, 0xb9, 0xd6, 0xf7, 0xd8, 0x67, 0x46, 0x45, 0x94, 0x89, 0xf8, 0x81, 0xd5, 0xf6, 0xb4,
0x88, 0xb1, 0x2a, 0x44, 0x72, 0xd9, 0x87, 0x66, 0xd4, 0xf5, 0x3a, 0xa7, 0x73, 0xa9, 0xa8, 0x86, 0x62, 0xc7,
0x65, 0xc8, 0xc9, 0xa1, 0xf4, 0xd1, 0xe9, 0x5a, 0x92, 0x85, 0xa6, 0xe7, 0x93, 0xe8, 0xc1, 0xc6, 0x7a, 0x64,
0xe1, 0x4a, 0x6a, 0xe6, 0xb3, 0xf1, 0xd3, 0xa5, 0x8a, 0xb2, 0x9a, 0xba, 0x84, 0xa4, 0x63, 0xe5, 0xc5, 0xf3,
0xd2, 0xc4, 0x82, 0xaa, 0xda, 0xe4, 0xf2, 0xca, 0x83, 0xa3, 0xa2, 0xc3, 0xea, 0xc2, 0xe2, 0xe3, 0xff, 0xff},
{0, 2, 2, 1, 4, 1, 4, 1, 3, 3, 1, 0, 0, 0, 0, 140, 0x02, 0x03,
0x01, 0x04, 0x05, 0x12, 0x11, 0x06, 0x13, 0x07, 0x08, 0x14, 0x22, 0x09, 0x21, 0x00, 0x23, 0x15, 0x31, 0x32,
0x0a, 0x16, 0xf0, 0x24, 0x33, 0x41, 0x42, 0x19, 0x17, 0x25, 0x18, 0x51, 0x34, 0x43, 0x52, 0x29, 0x35, 0x61,
0x39, 0x71, 0x62, 0x36, 0x53, 0x26, 0x38, 0x1a, 0x37, 0x81, 0x27, 0x91, 0x79, 0x55, 0x45, 0x28, 0x72, 0x59,
0xa1, 0xb1, 0x44, 0x69, 0x54, 0x58, 0xd1, 0xfa, 0x57, 0xe1, 0xf1, 0xb9, 0x49, 0x47, 0x63, 0x6a, 0xf9, 0x56,
0x46, 0xa8, 0x2a, 0x4a, 0x78, 0x99, 0x3a, 0x75, 0x74, 0x86, 0x65, 0xc1, 0x76, 0xb6, 0x96, 0xd6, 0x89, 0x85,
0xc9, 0xf5, 0x95, 0xb4, 0xc7, 0xf7, 0x8a, 0x97, 0xb8, 0x73, 0xb7, 0xd8, 0xd9, 0x87, 0xa7, 0x7a, 0x48, 0x82,
0x84, 0xea, 0xf4, 0xa6, 0xc5, 0x5a, 0x94, 0xa4, 0xc6, 0x92, 0xc3, 0x68, 0xb5, 0xc8, 0xe4, 0xe5, 0xe6, 0xe9,
0xa2, 0xa3, 0xe3, 0xc2, 0x66, 0x67, 0x93, 0xaa, 0xd4, 0xd5, 0xe7, 0xf8, 0x88, 0x9a, 0xd7, 0x77, 0xc4, 0x64,
0xe2, 0x98, 0xa5, 0xca, 0xda, 0xe8, 0xf3, 0xf6, 0xa9, 0xb2, 0xb3, 0xf2, 0xd2, 0x83, 0xba, 0xd3, 0xff, 0xff},
{0, 0, 6, 2, 1, 3, 3, 2, 5, 1, 2, 2, 8, 10, 0, 117, 0x04, 0x05,
0x03, 0x06, 0x02, 0x07, 0x01, 0x08, 0x09, 0x12, 0x13, 0x14, 0x11, 0x15, 0x0a, 0x16, 0x17, 0xf0, 0x00, 0x22,
0x21, 0x18, 0x23, 0x19, 0x24, 0x32, 0x31, 0x25, 0x33, 0x38, 0x37, 0x34, 0x35, 0x36, 0x39, 0x79, 0x57, 0x58,
0x59, 0x28, 0x56, 0x78, 0x27, 0x41, 0x29, 0x77, 0x26, 0x42, 0x76, 0x99, 0x1a, 0x55, 0x98, 0x97, 0xf9, 0x48,
0x54, 0x96, 0x89, 0x47, 0xb7, 0x49, 0xfa, 0x75, 0x68, 0xb6, 0x67, 0x69, 0xb9, 0xb8, 0xd8, 0x52, 0xd7, 0x88,
0xb5, 0x74, 0x51, 0x46, 0xd9, 0xf8, 0x3a, 0xd6, 0x87, 0x45, 0x7a, 0x95, 0xd5, 0xf6, 0x86, 0xb4, 0xa9, 0x94,
0x53, 0x2a, 0xa8, 0x43, 0xf5, 0xf7, 0xd4, 0x66, 0xa7, 0x5a, 0x44, 0x8a, 0xc9, 0xe8, 0xc8, 0xe7, 0x9a, 0x6a,
0x73, 0x4a, 0x61, 0xc7, 0xf4, 0xc6, 0x65, 0xe9, 0x72, 0xe6, 0x71, 0x91, 0x93, 0xa6, 0xda, 0x92, 0x85, 0x62,
0xf3, 0xc5, 0xb2, 0xa4, 0x84, 0xba, 0x64, 0xa5, 0xb3, 0xd2, 0x81, 0xe5, 0xd3, 0xaa, 0xc4, 0xca, 0xf2, 0xb1,
0xe4, 0xd1, 0x83, 0x63, 0xea, 0xc3, 0xe2, 0x82, 0xf1, 0xa3, 0xc2, 0xa1, 0xc1, 0xe3, 0xa2, 0xe1, 0xff, 0xff}};
if (table > 2)
table = 2;
huff[0] = make_decoder(first_tree[table]);
huff[1] = make_decoder(second_tree[table]);
}
/*
Return 0 if the image starts with compressed data,
1 if it starts with uncompressed low-order bits.
In Canon compressed data, 0xff is always followed by 0x00.
*/
int CLASS canon_has_lowbits()
{
uchar test[0x4000];
int ret = 1, i;
fseek(ifp, 0, SEEK_SET);
fread(test, 1, sizeof test, ifp);
for (i = 540; i < sizeof test - 1; i++)
if (test[i] == 0xff)
{
if (test[i + 1])
return 1;
ret = 0;
}
return ret;
}
void CLASS canon_load_raw()
{
ushort *pixel, *prow, *huff[2];
int nblocks, lowbits, i, c, row, r, save, val;
int block, diffbuf[64], leaf, len, diff, carry = 0, pnum = 0, base[2];
crw_init_tables(tiff_compress, huff);
lowbits = canon_has_lowbits();
if (!lowbits)
maximum = 0x3ff;
fseek(ifp, 540 + lowbits * raw_height * raw_width / 4, SEEK_SET);
zero_after_ff = 1;
getbits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
for (row = 0; row < raw_height; row += 8)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pixel = raw_image + row * raw_width;
nblocks = MIN(8, raw_height - row) * raw_width >> 6;
for (block = 0; block < nblocks; block++)
{
memset(diffbuf, 0, sizeof diffbuf);
for (i = 0; i < 64; i++)
{
leaf = gethuff(huff[i > 0]);
if (leaf == 0 && i)
break;
if (leaf == 0xff)
continue;
i += leaf >> 4;
len = leaf & 15;
if (len == 0)
continue;
diff = getbits(len);
if ((diff & (1 << (len - 1))) == 0)
diff -= (1 << len) - 1;
if (i < 64)
diffbuf[i] = diff;
}
diffbuf[0] += carry;
carry = diffbuf[0];
for (i = 0; i < 64; i++)
{
if (pnum++ % raw_width == 0)
base[0] = base[1] = 512;
if ((pixel[(block << 6) + i] = base[i & 1] += diffbuf[i]) >> 10)
derror();
}
}
if (lowbits)
{
save = ftell(ifp);
fseek(ifp, 26 + row * raw_width / 4, SEEK_SET);
for (prow = pixel, i = 0; i < raw_width * 2; i++)
{
c = fgetc(ifp);
for (r = 0; r < 8; r += 2, prow++)
{
val = (*prow << 2) + ((c >> r) & 3);
if (raw_width == 2672 && val < 512)
val += 2;
*prow = val;
}
}
fseek(ifp, save, SEEK_SET);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
FORC(2) free(huff[c]);
throw;
}
#endif
FORC(2) free(huff[c]);
}
//@end COMMON
struct jhead
{
int algo, bits, high, wide, clrs, sraw, psv, restart, vpred[6];
ushort quant[64], idct[64], *huff[20], *free[20], *row;
};
//@out COMMON
int CLASS ljpeg_start(struct jhead *jh, int info_only)
{
ushort c, tag, len;
int cnt = 0;
uchar data[0x10000];
const uchar *dp;
memset(jh, 0, sizeof *jh);
jh->restart = INT_MAX;
if ((fgetc(ifp), fgetc(ifp)) != 0xd8)
return 0;
do
{
if (feof(ifp))
return 0;
if (cnt++ > 1024)
return 0; // 1024 tags limit
if (!fread(data, 2, 2, ifp))
return 0;
tag = data[0] << 8 | data[1];
len = (data[2] << 8 | data[3]) - 2;
if (tag <= 0xff00)
return 0;
fread(data, 1, len, ifp);
switch (tag)
{
case 0xffc3: // start of frame; lossless, Huffman
jh->sraw = ((data[7] >> 4) * (data[7] & 15) - 1) & 3;
case 0xffc1:
case 0xffc0:
jh->algo = tag & 0xff;
jh->bits = data[0];
jh->high = data[1] << 8 | data[2];
jh->wide = data[3] << 8 | data[4];
jh->clrs = data[5] + jh->sraw;
if (len == 9 && !dng_version)
getc(ifp);
break;
case 0xffc4: // define Huffman tables
if (info_only)
break;
for (dp = data; dp < data + len && !((c = *dp++) & -20);)
jh->free[c] = jh->huff[c] = make_decoder_ref(&dp);
break;
case 0xffda: // start of scan
jh->psv = data[1 + data[0] * 2];
jh->bits -= data[3 + data[0] * 2] & 15;
break;
case 0xffdb:
FORC(64) jh->quant[c] = data[c * 2 + 1] << 8 | data[c * 2 + 2];
break;
case 0xffdd:
jh->restart = data[0] << 8 | data[1];
}
} while (tag != 0xffda);
if (jh->bits > 16 || jh->clrs > 6 || !jh->bits || !jh->high || !jh->wide || !jh->clrs)
return 0;
if (info_only)
return 1;
if (!jh->huff[0])
return 0;
FORC(19) if (!jh->huff[c + 1]) jh->huff[c + 1] = jh->huff[c];
if (jh->sraw)
{
FORC(4) jh->huff[2 + c] = jh->huff[1];
FORC(jh->sraw) jh->huff[1 + c] = jh->huff[0];
}
jh->row = (ushort *)calloc(jh->wide * jh->clrs, 4);
merror(jh->row, "ljpeg_start()");
return zero_after_ff = 1;
}
void CLASS ljpeg_end(struct jhead *jh)
{
int c;
FORC4 if (jh->free[c]) free(jh->free[c]);
free(jh->row);
}
int CLASS ljpeg_diff(ushort *huff)
{
int len, diff;
if (!huff)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp(failure, 2);
#endif
len = gethuff(huff);
if (len == 16 && (!dng_version || dng_version >= 0x1010000))
return -32768;
diff = getbits(len);
if ((diff & (1 << (len - 1))) == 0)
diff -= (1 << len) - 1;
return diff;
}
ushort *CLASS ljpeg_row(int jrow, struct jhead *jh)
{
int col, c, diff, pred, spred = 0;
ushort mark = 0, *row[3];
if (jrow * jh->wide % jh->restart == 0)
{
FORC(6) jh->vpred[c] = 1 << (jh->bits - 1);
if (jrow)
{
fseek(ifp, -2, SEEK_CUR);
do
mark = (mark << 8) + (c = fgetc(ifp));
while (c != EOF && mark >> 4 != 0xffd);
}
getbits(-1);
}
FORC3 row[c] = jh->row + jh->wide * jh->clrs * ((jrow + c) & 1);
for (col = 0; col < jh->wide; col++)
FORC(jh->clrs)
{
diff = ljpeg_diff(jh->huff[c]);
if (jh->sraw && c <= jh->sraw && (col | c))
pred = spred;
else if (col)
pred = row[0][-jh->clrs];
else
pred = (jh->vpred[c] += diff) - diff;
if (jrow && col)
switch (jh->psv)
{
case 1:
break;
case 2:
pred = row[1][0];
break;
case 3:
pred = row[1][-jh->clrs];
break;
case 4:
pred = pred + row[1][0] - row[1][-jh->clrs];
break;
case 5:
pred = pred + ((row[1][0] - row[1][-jh->clrs]) >> 1);
break;
case 6:
pred = row[1][0] + ((pred - row[1][-jh->clrs]) >> 1);
break;
case 7:
pred = (pred + row[1][0]) >> 1;
break;
default:
pred = 0;
}
if ((**row = pred + diff) >> jh->bits)
derror();
if (c <= jh->sraw)
spred = **row;
row[0]++;
row[1]++;
}
return row[2];
}
void CLASS lossless_jpeg_load_raw()
{
int jwide, jhigh, jrow, jcol, val, jidx, i, j, row = 0, col = 0;
struct jhead jh;
ushort *rp;
if (!ljpeg_start(&jh, 0))
return;
if (jh.wide < 1 || jh.high < 1 || jh.clrs < 1 || jh.bits < 1)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp(failure, 2);
#endif
jwide = jh.wide * jh.clrs;
jhigh = jh.high;
if (jh.clrs == 4 && jwide >= raw_width * 2)
jhigh *= 2;
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
for (jrow = 0; jrow < jh.high; jrow++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
rp = ljpeg_row(jrow, &jh);
if (load_flags & 1)
row = jrow & 1 ? height - 1 - jrow / 2 : jrow / 2;
for (jcol = 0; jcol < jwide; jcol++)
{
val = curve[*rp++];
if (cr2_slice[0])
{
jidx = jrow * jwide + jcol;
i = jidx / (cr2_slice[1] * raw_height);
if ((j = i >= cr2_slice[0]))
i = cr2_slice[0];
jidx -= i * (cr2_slice[1] * raw_height);
row = jidx / cr2_slice[1 + j];
col = jidx % cr2_slice[1 + j] + i * cr2_slice[1];
}
if (raw_width == 3984 && (col -= 2) < 0)
col += (row--, raw_width);
if (row > raw_height)
#ifdef LIBRAW_LIBRARY_BUILD
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#else
longjmp(failure, 3);
#endif
if ((unsigned)row < raw_height)
RAW(row, col) = val;
if (++col >= raw_width)
col = (row++, 0);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
ljpeg_end(&jh);
throw;
}
#endif
ljpeg_end(&jh);
}
void CLASS canon_sraw_load_raw()
{
struct jhead jh;
short *rp = 0, (*ip)[4];
int jwide, slice, scol, ecol, row, col, jrow = 0, jcol = 0, pix[3], c;
int v[3] = {0, 0, 0}, ver, hue;
#ifdef LIBRAW_LIBRARY_BUILD
int saved_w = width, saved_h = height;
#endif
char *cp;
if (!ljpeg_start(&jh, 0) || jh.clrs < 4)
return;
jwide = (jh.wide >>= 1) * jh.clrs;
#ifdef LIBRAW_LIBRARY_BUILD
if (load_flags & 256)
{
width = raw_width;
height = raw_height;
}
try
{
#endif
for (ecol = slice = 0; slice <= cr2_slice[0]; slice++)
{
scol = ecol;
ecol += cr2_slice[1] * 2 / jh.clrs;
if (!cr2_slice[0] || ecol > raw_width - 1)
ecol = raw_width & -2;
for (row = 0; row < height; row += (jh.clrs >> 1) - 1)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
ip = (short(*)[4])image + row * width;
for (col = scol; col < ecol; col += 2, jcol += jh.clrs)
{
if ((jcol %= jwide) == 0)
rp = (short *)ljpeg_row(jrow++, &jh);
if (col >= width)
continue;
#ifdef LIBRAW_LIBRARY_BUILD
if (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SRAW_NO_INTERPOLATE)
{
FORC(jh.clrs - 2)
{
ip[col + (c >> 1) * width + (c & 1)][0] = rp[jcol + c];
ip[col + (c >> 1) * width + (c & 1)][1] = ip[col + (c >> 1) * width + (c & 1)][2] = 8192;
}
ip[col][1] = rp[jcol + jh.clrs - 2] - 8192;
ip[col][2] = rp[jcol + jh.clrs - 1] - 8192;
}
else if (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SRAW_NO_RGB)
{
FORC(jh.clrs - 2)
ip[col + (c >> 1) * width + (c & 1)][0] = rp[jcol + c];
ip[col][1] = rp[jcol + jh.clrs - 2] - 8192;
ip[col][2] = rp[jcol + jh.clrs - 1] - 8192;
}
else
#endif
{
FORC(jh.clrs - 2)
ip[col + (c >> 1) * width + (c & 1)][0] = rp[jcol + c];
ip[col][1] = rp[jcol + jh.clrs - 2] - 16384;
ip[col][2] = rp[jcol + jh.clrs - 1] - 16384;
}
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
ljpeg_end(&jh);
throw;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SRAW_NO_INTERPOLATE)
{
ljpeg_end(&jh);
maximum = 0x3fff;
height = saved_h;
width = saved_w;
return;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
for (cp = model2; *cp && !isdigit(*cp); cp++)
;
sscanf(cp, "%d.%d.%d", v, v + 1, v + 2);
ver = (v[0] * 1000 + v[1]) * 1000 + v[2];
hue = (jh.sraw + 1) << 2;
if (unique_id >= 0x80000281 || (unique_id == 0x80000218 && ver > 1000006))
hue = jh.sraw << 1;
ip = (short(*)[4])image;
rp = ip[0];
for (row = 0; row < height; row++, ip += width)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (row & (jh.sraw >> 1))
{
for (col = 0; col < width; col += 2)
for (c = 1; c < 3; c++)
if (row == height - 1)
{
ip[col][c] = ip[col - width][c];
}
else
{
ip[col][c] = (ip[col - width][c] + ip[col + width][c] + 1) >> 1;
}
}
for (col = 1; col < width; col += 2)
for (c = 1; c < 3; c++)
if (col == width - 1)
ip[col][c] = ip[col - 1][c];
else
ip[col][c] = (ip[col - 1][c] + ip[col + 1][c] + 1) >> 1;
}
#ifdef LIBRAW_LIBRARY_BUILD
if (!(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SRAW_NO_RGB))
#endif
for (; rp < ip[0]; rp += 4)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (unique_id == 0x80000218 || unique_id == 0x80000250 || unique_id == 0x80000261 || unique_id == 0x80000281 ||
unique_id == 0x80000287)
{
rp[1] = (rp[1] << 2) + hue;
rp[2] = (rp[2] << 2) + hue;
pix[0] = rp[0] + ((50 * rp[1] + 22929 * rp[2]) >> 14);
pix[1] = rp[0] + ((-5640 * rp[1] - 11751 * rp[2]) >> 14);
pix[2] = rp[0] + ((29040 * rp[1] - 101 * rp[2]) >> 14);
}
else
{
if (unique_id < 0x80000218)
rp[0] -= 512;
pix[0] = rp[0] + rp[2];
pix[2] = rp[0] + rp[1];
pix[1] = rp[0] + ((-778 * rp[1] - (rp[2] << 11)) >> 12);
}
FORC3 rp[c] = CLIP15(pix[c] * sraw_mul[c] >> 10);
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
ljpeg_end(&jh);
throw;
}
height = saved_h;
width = saved_w;
#endif
ljpeg_end(&jh);
maximum = 0x3fff;
}
void CLASS adobe_copy_pixel(unsigned row, unsigned col, ushort **rp)
{
int c;
if (tiff_samples == 2 && shot_select)
(*rp)++;
if (raw_image)
{
if (row < raw_height && col < raw_width)
RAW(row, col) = curve[**rp];
*rp += tiff_samples;
}
else
{
#ifdef LIBRAW_LIBRARY_BUILD
if (row < raw_height && col < raw_width)
FORC(tiff_samples)
image[row * raw_width + col][c] = curve[(*rp)[c]];
*rp += tiff_samples;
#else
if (row < height && col < width)
FORC(tiff_samples)
image[row * width + col][c] = curve[(*rp)[c]];
*rp += tiff_samples;
#endif
}
if (tiff_samples == 2 && shot_select)
(*rp)--;
}
void CLASS ljpeg_idct(struct jhead *jh)
{
int c, i, j, len, skip, coef;
float work[3][8][8];
static float cs[106] = {0};
static const uchar zigzag[80] = {0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33,
40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54,
47, 55, 62, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63};
if (!cs[0])
FORC(106) cs[c] = cos((c & 31) * M_PI / 16) / 2;
memset(work, 0, sizeof work);
work[0][0][0] = jh->vpred[0] += ljpeg_diff(jh->huff[0]) * jh->quant[0];
for (i = 1; i < 64; i++)
{
len = gethuff(jh->huff[16]);
i += skip = len >> 4;
if (!(len &= 15) && skip < 15)
break;
coef = getbits(len);
if ((coef & (1 << (len - 1))) == 0)
coef -= (1 << len) - 1;
((float *)work)[zigzag[i]] = coef * jh->quant[i];
}
FORC(8) work[0][0][c] *= M_SQRT1_2;
FORC(8) work[0][c][0] *= M_SQRT1_2;
for (i = 0; i < 8; i++)
for (j = 0; j < 8; j++)
FORC(8) work[1][i][j] += work[0][i][c] * cs[(j * 2 + 1) * c];
for (i = 0; i < 8; i++)
for (j = 0; j < 8; j++)
FORC(8) work[2][i][j] += work[1][c][j] * cs[(i * 2 + 1) * c];
FORC(64) jh->idct[c] = CLIP(((float *)work[2])[c] + 0.5);
}
void CLASS lossless_dng_load_raw()
{
unsigned save, trow = 0, tcol = 0, jwide, jrow, jcol, row, col, i, j;
struct jhead jh;
ushort *rp;
while (trow < raw_height)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
save = ftell(ifp);
if (tile_length < INT_MAX)
fseek(ifp, get4(), SEEK_SET);
if (!ljpeg_start(&jh, 0))
break;
jwide = jh.wide;
if (filters)
jwide *= jh.clrs;
jwide /= MIN(is_raw, tiff_samples);
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
switch (jh.algo)
{
case 0xc1:
jh.vpred[0] = 16384;
getbits(-1);
for (jrow = 0; jrow + 7 < jh.high; jrow += 8)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (jcol = 0; jcol + 7 < jh.wide; jcol += 8)
{
ljpeg_idct(&jh);
rp = jh.idct;
row = trow + jcol / tile_width + jrow * 2;
col = tcol + jcol % tile_width;
for (i = 0; i < 16; i += 2)
for (j = 0; j < 8; j++)
adobe_copy_pixel(row + i, col + j, &rp);
}
}
break;
case 0xc3:
for (row = col = jrow = 0; jrow < jh.high; jrow++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
rp = ljpeg_row(jrow, &jh);
if(tiff_samples == 1 && jh.clrs > 1 && jh.clrs*jwide == raw_width)
for (jcol = 0; jcol < jwide*jh.clrs; jcol++)
{
adobe_copy_pixel(trow + row, tcol + col, &rp);
if (++col >= tile_width || col >= raw_width)
row += 1 + (col = 0);
}
else
for (jcol = 0; jcol < jwide; jcol++)
{
adobe_copy_pixel(trow + row, tcol + col, &rp);
if (++col >= tile_width || col >= raw_width)
row += 1 + (col = 0);
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
ljpeg_end(&jh);
throw;
}
#endif
fseek(ifp, save + 4, SEEK_SET);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
ljpeg_end(&jh);
}
}
void CLASS packed_dng_load_raw()
{
ushort *pixel, *rp;
int row, col;
pixel = (ushort *)calloc(raw_width, tiff_samples * sizeof *pixel);
merror(pixel, "packed_dng_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (tiff_bps == 16)
read_shorts(pixel, raw_width * tiff_samples);
else
{
getbits(-1);
for (col = 0; col < raw_width * tiff_samples; col++)
pixel[col] = getbits(tiff_bps);
}
for (rp = pixel, col = 0; col < raw_width; col++)
adobe_copy_pixel(row, col, &rp);
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
free(pixel);
throw;
}
#endif
free(pixel);
}
void CLASS pentax_load_raw()
{
ushort bit[2][15], huff[4097];
int dep, row, col, diff, c, i;
ushort vpred[2][2] = {{0, 0}, {0, 0}}, hpred[2];
fseek(ifp, meta_offset, SEEK_SET);
dep = (get2() + 12) & 15;
fseek(ifp, 12, SEEK_CUR);
FORC(dep) bit[0][c] = get2();
FORC(dep) bit[1][c] = fgetc(ifp);
FORC(dep)
for (i = bit[0][c]; i <= ((bit[0][c] + (4096 >> bit[1][c]) - 1) & 4095);)
huff[++i] = bit[1][c] << 8 | c;
huff[0] = 12;
fseek(ifp, data_offset, SEEK_SET);
getbits(-1);
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < raw_width; col++)
{
diff = ljpeg_diff(huff);
if (col < 2)
hpred[col] = vpred[row & 1][col] += diff;
else
hpred[col & 1] += diff;
RAW(row, col) = hpred[col & 1];
if (hpred[col & 1] >> tiff_bps)
derror();
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS nikon_coolscan_load_raw()
{
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
int bypp = tiff_bps <= 8 ? 1 : 2;
int bufsize = width * 3 * bypp;
if (tiff_bps <= 8)
gamma_curve(1.0 / imgdata.params.coolscan_nef_gamma, 0., 1, 255);
else
gamma_curve(1.0 / imgdata.params.coolscan_nef_gamma, 0., 1, 65535);
fseek(ifp, data_offset, SEEK_SET);
unsigned char *buf = (unsigned char *)malloc(bufsize);
unsigned short *ubuf = (unsigned short *)buf;
for (int row = 0; row < raw_height; row++)
{
int red = fread(buf, 1, bufsize, ifp);
unsigned short(*ip)[4] = (unsigned short(*)[4])image + row * width;
if (tiff_bps <= 8)
for (int col = 0; col < width; col++)
{
ip[col][0] = curve[buf[col * 3]];
ip[col][1] = curve[buf[col * 3 + 1]];
ip[col][2] = curve[buf[col * 3 + 2]];
ip[col][3] = 0;
}
else
for (int col = 0; col < width; col++)
{
ip[col][0] = curve[ubuf[col * 3]];
ip[col][1] = curve[ubuf[col * 3 + 1]];
ip[col][2] = curve[ubuf[col * 3 + 2]];
ip[col][3] = 0;
}
}
free(buf);
}
#endif
void CLASS nikon_load_raw()
{
static const uchar nikon_tree[][32] = {
{0, 1, 5, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0, /* 12-bit lossy */
5, 4, 3, 6, 2, 7, 1, 0, 8, 9, 11, 10, 12},
{0, 1, 5, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0, /* 12-bit lossy after split */
0x39, 0x5a, 0x38, 0x27, 0x16, 5, 4, 3, 2, 1, 0, 11, 12, 12},
{0, 1, 4, 2, 3, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 12-bit lossless */
5, 4, 6, 3, 7, 2, 8, 1, 9, 0, 10, 11, 12},
{0, 1, 4, 3, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0, /* 14-bit lossy */
5, 6, 4, 7, 8, 3, 9, 2, 1, 0, 10, 11, 12, 13, 14},
{0, 1, 5, 1, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, /* 14-bit lossy after split */
8, 0x5c, 0x4b, 0x3a, 0x29, 7, 6, 5, 4, 3, 2, 1, 0, 13, 14},
{0, 1, 4, 2, 2, 3, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, /* 14-bit lossless */
7, 6, 8, 5, 9, 4, 10, 3, 11, 12, 2, 0, 1, 13, 14}};
ushort *huff, ver0, ver1, vpred[2][2], hpred[2], csize;
int i, min, max, step = 0, tree = 0, split = 0, row, col, len, shl, diff;
fseek(ifp, meta_offset, SEEK_SET);
ver0 = fgetc(ifp);
ver1 = fgetc(ifp);
if (ver0 == 0x49 || ver1 == 0x58)
fseek(ifp, 2110, SEEK_CUR);
if (ver0 == 0x46)
tree = 2;
if (tiff_bps == 14)
tree += 3;
read_shorts(vpred[0], 4);
max = 1 << tiff_bps & 0x7fff;
if ((csize = get2()) > 1)
step = max / (csize - 1);
if (ver0 == 0x44 && ver1 == 0x20 && step > 0)
{
for (i = 0; i < csize; i++)
curve[i * step] = get2();
for (i = 0; i < max; i++)
curve[i] = (curve[i - i % step] * (step - i % step) + curve[i - i % step + step] * (i % step)) / step;
fseek(ifp, meta_offset + 562, SEEK_SET);
split = get2();
}
else if (ver0 != 0x46 && csize <= 0x4001)
read_shorts(curve, max = csize);
while (curve[max - 2] == curve[max - 1])
max--;
huff = make_decoder(nikon_tree[tree]);
fseek(ifp, data_offset, SEEK_SET);
getbits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
for (min = row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (split && row == split)
{
free(huff);
huff = make_decoder(nikon_tree[tree + 1]);
max += (min = 16) << 1;
}
for (col = 0; col < raw_width; col++)
{
i = gethuff(huff);
len = i & 15;
shl = i >> 4;
diff = ((getbits(len - shl) << 1) + 1) << shl >> 1;
if ((diff & (1 << (len - 1))) == 0)
diff -= (1 << len) - !shl;
if (col < 2)
hpred[col] = vpred[row & 1][col] += diff;
else
hpred[col & 1] += diff;
if ((ushort)(hpred[col & 1] + min) >= max)
derror();
RAW(row, col) = curve[LIM((short)hpred[col & 1], 0, 0x3fff)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
free(huff);
throw;
}
#endif
free(huff);
}
void CLASS nikon_yuv_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
int row, col, yuv[4], rgb[3], b, c;
UINT64 bitbuf = 0;
float cmul[4];
FORC4 { cmul[c] = cam_mul[c] > 0.001f ? cam_mul[c] : 1.f; }
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < raw_width; col++)
{
if (!(b = col & 1))
{
bitbuf = 0;
FORC(6) bitbuf |= (UINT64)fgetc(ifp) << c * 8;
FORC(4) yuv[c] = (bitbuf >> c * 12 & 0xfff) - (c >> 1 << 11);
}
rgb[0] = yuv[b] + 1.370705 * yuv[3];
rgb[1] = yuv[b] - 0.337633 * yuv[2] - 0.698001 * yuv[3];
rgb[2] = yuv[b] + 1.732446 * yuv[2];
FORC3 image[row * width + col][c] = curve[LIM(rgb[c], 0, 0xfff)] / cmul[c];
}
}
}
/*
Returns 1 for a Coolpix 995, 0 for anything else.
*/
int CLASS nikon_e995()
{
int i, histo[256];
const uchar often[] = {0x00, 0x55, 0xaa, 0xff};
memset(histo, 0, sizeof histo);
fseek(ifp, -2000, SEEK_END);
for (i = 0; i < 2000; i++)
histo[fgetc(ifp)]++;
for (i = 0; i < 4; i++)
if (histo[often[i]] < 200)
return 0;
return 1;
}
/*
Returns 1 for a Coolpix 2100, 0 for anything else.
*/
int CLASS nikon_e2100()
{
uchar t[12];
int i;
fseek(ifp, 0, SEEK_SET);
for (i = 0; i < 1024; i++)
{
fread(t, 1, 12, ifp);
if (((t[2] & t[4] & t[7] & t[9]) >> 4 & t[1] & t[6] & t[8] & t[11] & 3) != 3)
return 0;
}
return 1;
}
void CLASS nikon_3700()
{
int bits, i;
uchar dp[24];
static const struct
{
int bits;
char t_make[12], t_model[15];
} table[] = {
{0x00, "Pentax", "Optio 33WR"}, {0x03, "Nikon", "E3200"}, {0x32, "Nikon", "E3700"}, {0x33, "Olympus", "C740UZ"}};
fseek(ifp, 3072, SEEK_SET);
fread(dp, 1, 24, ifp);
bits = (dp[8] & 3) << 4 | (dp[20] & 3);
for (i = 0; i < sizeof table / sizeof *table; i++)
if (bits == table[i].bits)
{
strcpy(make, table[i].t_make);
strcpy(model, table[i].t_model);
}
}
/*
Separates a Minolta DiMAGE Z2 from a Nikon E4300.
*/
int CLASS minolta_z2()
{
int i, nz;
char tail[424];
fseek(ifp, -sizeof tail, SEEK_END);
fread(tail, 1, sizeof tail, ifp);
for (nz = i = 0; i < sizeof tail; i++)
if (tail[i])
nz++;
return nz > 20;
}
//@end COMMON
void CLASS jpeg_thumb();
//@out COMMON
void CLASS ppm_thumb()
{
char *thumb;
thumb_length = thumb_width * thumb_height * 3;
thumb = (char *)malloc(thumb_length);
merror(thumb, "ppm_thumb()");
fprintf(ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
fread(thumb, 1, thumb_length, ifp);
fwrite(thumb, 1, thumb_length, ofp);
free(thumb);
}
void CLASS ppm16_thumb()
{
int i;
char *thumb;
thumb_length = thumb_width * thumb_height * 3;
thumb = (char *)calloc(thumb_length, 2);
merror(thumb, "ppm16_thumb()");
read_shorts((ushort *)thumb, thumb_length);
for (i = 0; i < thumb_length; i++)
thumb[i] = ((ushort *)thumb)[i] >> 8;
fprintf(ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
fwrite(thumb, 1, thumb_length, ofp);
free(thumb);
}
void CLASS layer_thumb()
{
int i, c;
char *thumb, map[][4] = {"012", "102"};
colors = thumb_misc >> 5 & 7;
thumb_length = thumb_width * thumb_height;
thumb = (char *)calloc(colors, thumb_length);
merror(thumb, "layer_thumb()");
fprintf(ofp, "P%d\n%d %d\n255\n", 5 + (colors >> 1), thumb_width, thumb_height);
fread(thumb, thumb_length, colors, ifp);
for (i = 0; i < thumb_length; i++)
FORCC putc(thumb[i + thumb_length * (map[thumb_misc >> 8][c] - '0')], ofp);
free(thumb);
}
void CLASS rollei_thumb()
{
unsigned i;
ushort *thumb;
thumb_length = thumb_width * thumb_height;
thumb = (ushort *)calloc(thumb_length, 2);
merror(thumb, "rollei_thumb()");
fprintf(ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
read_shorts(thumb, thumb_length);
for (i = 0; i < thumb_length; i++)
{
putc(thumb[i] << 3, ofp);
putc(thumb[i] >> 5 << 2, ofp);
putc(thumb[i] >> 11 << 3, ofp);
}
free(thumb);
}
void CLASS rollei_load_raw()
{
uchar pixel[10];
unsigned iten = 0, isix, i, buffer = 0, todo[16];
#ifdef LIBRAW_LIBRARY_BUILD
if(raw_width > 32767 || raw_height > 32767)
throw LIBRAW_EXCEPTION_IO_BADFILE;
#endif
unsigned maxpixel = raw_width*(raw_height+7);
isix = raw_width * raw_height * 5 / 8;
while (fread(pixel, 1, 10, ifp) == 10)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (i = 0; i < 10; i += 2)
{
todo[i] = iten++;
todo[i + 1] = pixel[i] << 8 | pixel[i + 1];
buffer = pixel[i] >> 2 | buffer << 6;
}
for (; i < 16; i += 2)
{
todo[i] = isix++;
todo[i + 1] = buffer >> (14 - i) * 5;
}
for (i = 0; i < 16; i += 2)
if(todo[i] < maxpixel)
raw_image[todo[i]] = (todo[i + 1] & 0x3ff);
else
derror();
}
maximum = 0x3ff;
}
int CLASS raw(unsigned row, unsigned col) { return (row < raw_height && col < raw_width) ? RAW(row, col) : 0; }
void CLASS phase_one_flat_field(int is_float, int nc)
{
ushort head[8];
unsigned wide, high, y, x, c, rend, cend, row, col;
float *mrow, num, mult[4];
read_shorts(head, 8);
if (head[2] * head[3] * head[4] * head[5] == 0)
return;
wide = head[2] / head[4] + (head[2] % head[4] != 0);
high = head[3] / head[5] + (head[3] % head[5] != 0);
mrow = (float *)calloc(nc * wide, sizeof *mrow);
merror(mrow, "phase_one_flat_field()");
for (y = 0; y < high; y++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (x = 0; x < wide; x++)
for (c = 0; c < nc; c += 2)
{
num = is_float ? getreal(11) : get2() / 32768.0;
if (y == 0)
mrow[c * wide + x] = num;
else
mrow[(c + 1) * wide + x] = (num - mrow[c * wide + x]) / head[5];
}
if (y == 0)
continue;
rend = head[1] + y * head[5];
for (row = rend - head[5]; row < raw_height && row < rend && row < head[1] + head[3] - head[5]; row++)
{
for (x = 1; x < wide; x++)
{
for (c = 0; c < nc; c += 2)
{
mult[c] = mrow[c * wide + x - 1];
mult[c + 1] = (mrow[c * wide + x] - mult[c]) / head[4];
}
cend = head[0] + x * head[4];
for (col = cend - head[4]; col < raw_width && col < cend && col < head[0] + head[2] - head[4]; col++)
{
c = nc > 2 ? FC(row - top_margin, col - left_margin) : 0;
if (!(c & 1))
{
c = RAW(row, col) * mult[c];
RAW(row, col) = LIM(c, 0, 65535);
}
for (c = 0; c < nc; c += 2)
mult[c] += mult[c + 1];
}
}
for (x = 0; x < wide; x++)
for (c = 0; c < nc; c += 2)
mrow[c * wide + x] += mrow[(c + 1) * wide + x];
}
}
free(mrow);
}
int CLASS phase_one_correct()
{
unsigned entries, tag, data, save, col, row, type;
int len, i, j, k, cip, val[4], dev[4], sum, max;
int head[9], diff, mindiff = INT_MAX, off_412 = 0;
/* static */ const signed char dir[12][2] = {{-1, -1}, {-1, 1}, {1, -1}, {1, 1}, {-2, 0}, {0, -2},
{0, 2}, {2, 0}, {-2, -2}, {-2, 2}, {2, -2}, {2, 2}};
float poly[8], num, cfrac, frac, mult[2], *yval[2] = {NULL, NULL};
ushort *xval[2];
int qmult_applied = 0, qlin_applied = 0;
#ifdef LIBRAW_LIBRARY_BUILD
if (!meta_length)
#else
if (half_size || !meta_length)
#endif
return 0;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Phase One correction...\n"));
#endif
fseek(ifp, meta_offset, SEEK_SET);
order = get2();
fseek(ifp, 6, SEEK_CUR);
fseek(ifp, meta_offset + get4(), SEEK_SET);
entries = get4();
get4();
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
while (entries--)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
tag = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek(ifp, meta_offset + data, SEEK_SET);
if (tag == 0x419)
{ /* Polynomial curve */
for (get4(), i = 0; i < 8; i++)
poly[i] = getreal(11);
poly[3] += (ph1.tag_210 - poly[7]) * poly[6] + 1;
for (i = 0; i < 0x10000; i++)
{
num = (poly[5] * i + poly[3]) * i + poly[1];
curve[i] = LIM(num, 0, 65535);
}
goto apply; /* apply to right half */
}
else if (tag == 0x41a)
{ /* Polynomial curve */
for (i = 0; i < 4; i++)
poly[i] = getreal(11);
for (i = 0; i < 0x10000; i++)
{
for (num = 0, j = 4; j--;)
num = num * i + poly[j];
curve[i] = LIM(num + i, 0, 65535);
}
apply: /* apply to whole image */
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = (tag & 1) * ph1.split_col; col < raw_width; col++)
RAW(row, col) = curve[RAW(row, col)];
}
}
else if (tag == 0x400)
{ /* Sensor defects */
while ((len -= 8) >= 0)
{
col = get2();
row = get2();
type = get2();
get2();
if (col >= raw_width)
continue;
if (type == 131 || type == 137) /* Bad column */
for (row = 0; row < raw_height; row++)
if (FC(row - top_margin, col - left_margin) == 1)
{
for (sum = i = 0; i < 4; i++)
sum += val[i] = raw(row + dir[i][0], col + dir[i][1]);
for (max = i = 0; i < 4; i++)
{
dev[i] = abs((val[i] << 2) - sum);
if (dev[max] < dev[i])
max = i;
}
RAW(row, col) = (sum - val[max]) / 3.0 + 0.5;
}
else
{
for (sum = 0, i = 8; i < 12; i++)
sum += raw(row + dir[i][0], col + dir[i][1]);
RAW(row, col) = 0.5 + sum * 0.0732233 + (raw(row, col - 2) + raw(row, col + 2)) * 0.3535534;
}
else if (type == 129)
{ /* Bad pixel */
if (row >= raw_height)
continue;
j = (FC(row - top_margin, col - left_margin) != 1) * 4;
for (sum = 0, i = j; i < j + 8; i++)
sum += raw(row + dir[i][0], col + dir[i][1]);
RAW(row, col) = (sum + 4) >> 3;
}
}
}
else if (tag == 0x401)
{ /* All-color flat fields */
phase_one_flat_field(1, 2);
}
else if (tag == 0x416 || tag == 0x410)
{
phase_one_flat_field(0, 2);
}
else if (tag == 0x40b)
{ /* Red+blue flat field */
phase_one_flat_field(0, 4);
}
else if (tag == 0x412)
{
fseek(ifp, 36, SEEK_CUR);
diff = abs(get2() - ph1.tag_21a);
if (mindiff > diff)
{
mindiff = diff;
off_412 = ftell(ifp) - 38;
}
}
else if (tag == 0x41f && !qlin_applied)
{ /* Quadrant linearization */
ushort lc[2][2][16], ref[16];
int qr, qc;
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
for (i = 0; i < 16; i++)
lc[qr][qc][i] = get4();
for (i = 0; i < 16; i++)
{
int v = 0;
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
v += lc[qr][qc][i];
ref[i] = (v + 2) >> 2;
}
for (qr = 0; qr < 2; qr++)
{
for (qc = 0; qc < 2; qc++)
{
int cx[19], cf[19];
for (i = 0; i < 16; i++)
{
cx[1 + i] = lc[qr][qc][i];
cf[1 + i] = ref[i];
}
cx[0] = cf[0] = 0;
cx[17] = cf[17] = ((unsigned int)ref[15] * 65535) / lc[qr][qc][15];
cf[18] = cx[18] = 65535;
cubic_spline(cx, cf, 19);
for (row = (qr ? ph1.split_row : 0); row < (qr ? raw_height : ph1.split_row); row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = (qc ? ph1.split_col : 0); col < (qc ? raw_width : ph1.split_col); col++)
RAW(row, col) = curve[RAW(row, col)];
}
}
}
qlin_applied = 1;
}
else if (tag == 0x41e && !qmult_applied)
{ /* Quadrant multipliers */
float qmult[2][2] = {{1, 1}, {1, 1}};
get4();
get4();
get4();
get4();
qmult[0][0] = 1.0 + getreal(11);
get4();
get4();
get4();
get4();
get4();
qmult[0][1] = 1.0 + getreal(11);
get4();
get4();
get4();
qmult[1][0] = 1.0 + getreal(11);
get4();
get4();
get4();
qmult[1][1] = 1.0 + getreal(11);
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < raw_width; col++)
{
i = qmult[row >= ph1.split_row][col >= ph1.split_col] * RAW(row, col);
RAW(row, col) = LIM(i, 0, 65535);
}
}
qmult_applied = 1;
}
else if (tag == 0x431 && !qmult_applied)
{ /* Quadrant combined */
ushort lc[2][2][7], ref[7];
int qr, qc;
for (i = 0; i < 7; i++)
ref[i] = get4();
for (qr = 0; qr < 2; qr++)
for (qc = 0; qc < 2; qc++)
for (i = 0; i < 7; i++)
lc[qr][qc][i] = get4();
for (qr = 0; qr < 2; qr++)
{
for (qc = 0; qc < 2; qc++)
{
int cx[9], cf[9];
for (i = 0; i < 7; i++)
{
cx[1 + i] = ref[i];
cf[1 + i] = ((unsigned)ref[i] * lc[qr][qc][i]) / 10000;
}
cx[0] = cf[0] = 0;
cx[8] = cf[8] = 65535;
cubic_spline(cx, cf, 9);
for (row = (qr ? ph1.split_row : 0); row < (qr ? raw_height : ph1.split_row); row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = (qc ? ph1.split_col : 0); col < (qc ? raw_width : ph1.split_col); col++)
RAW(row, col) = curve[RAW(row, col)];
}
}
}
qmult_applied = 1;
qlin_applied = 1;
}
fseek(ifp, save, SEEK_SET);
}
if (off_412)
{
fseek(ifp, off_412, SEEK_SET);
for (i = 0; i < 9; i++)
head[i] = get4() & 0x7fff;
yval[0] = (float *)calloc(head[1] * head[3] + head[2] * head[4], 6);
merror(yval[0], "phase_one_correct()");
yval[1] = (float *)(yval[0] + head[1] * head[3]);
xval[0] = (ushort *)(yval[1] + head[2] * head[4]);
xval[1] = (ushort *)(xval[0] + head[1] * head[3]);
get2();
for (i = 0; i < 2; i++)
for (j = 0; j < head[i + 1] * head[i + 3]; j++)
yval[i][j] = getreal(11);
for (i = 0; i < 2; i++)
for (j = 0; j < head[i + 1] * head[i + 3]; j++)
xval[i][j] = get2();
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < raw_width; col++)
{
cfrac = (float)col * head[3] / raw_width;
cfrac -= cip = cfrac;
num = RAW(row, col) * 0.5;
for (i = cip; i < cip + 2; i++)
{
for (k = j = 0; j < head[1]; j++)
if (num < xval[0][k = head[1] * i + j])
break;
frac = (j == 0 || j == head[1]) ? 0 : (xval[0][k] - num) / (xval[0][k] - xval[0][k - 1]);
mult[i - cip] = yval[0][k - 1] * frac + yval[0][k] * (1 - frac);
}
i = ((mult[0] * (1 - cfrac) + mult[1] * cfrac) * row + num) * 2;
RAW(row, col) = LIM(i, 0, 65535);
}
}
free(yval[0]);
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
if (yval[0])
free(yval[0]);
return LIBRAW_CANCELLED_BY_CALLBACK;
}
#endif
return 0;
}
void CLASS phase_one_load_raw()
{
int a, b, i;
ushort akey, bkey, t_mask;
fseek(ifp, ph1.key_off, SEEK_SET);
akey = get2();
bkey = get2();
t_mask = ph1.format == 1 ? 0x5555 : 0x1354;
#ifdef LIBRAW_LIBRARY_BUILD
if (ph1.black_col || ph1.black_row)
{
imgdata.rawdata.ph1_cblack = (short(*)[2])calloc(raw_height * 2, sizeof(ushort));
merror(imgdata.rawdata.ph1_cblack, "phase_one_load_raw()");
imgdata.rawdata.ph1_rblack = (short(*)[2])calloc(raw_width * 2, sizeof(ushort));
merror(imgdata.rawdata.ph1_rblack, "phase_one_load_raw()");
if (ph1.black_col)
{
fseek(ifp, ph1.black_col, SEEK_SET);
read_shorts((ushort *)imgdata.rawdata.ph1_cblack[0], raw_height * 2);
}
if (ph1.black_row)
{
fseek(ifp, ph1.black_row, SEEK_SET);
read_shorts((ushort *)imgdata.rawdata.ph1_rblack[0], raw_width * 2);
}
}
#endif
fseek(ifp, data_offset, SEEK_SET);
read_shorts(raw_image, raw_width * raw_height);
if (ph1.format)
for (i = 0; i < raw_width * raw_height; i += 2)
{
a = raw_image[i + 0] ^ akey;
b = raw_image[i + 1] ^ bkey;
raw_image[i + 0] = (a & t_mask) | (b & ~t_mask);
raw_image[i + 1] = (b & t_mask) | (a & ~t_mask);
}
}
unsigned CLASS ph1_bithuff(int nbits, ushort *huff)
{
#ifndef LIBRAW_NOTHREADS
#define bitbuf tls->ph1_bits.bitbuf
#define vbits tls->ph1_bits.vbits
#else
static UINT64 bitbuf = 0;
static int vbits = 0;
#endif
unsigned c;
if (nbits == -1)
return bitbuf = vbits = 0;
if (nbits == 0)
return 0;
if (vbits < nbits)
{
bitbuf = bitbuf << 32 | get4();
vbits += 32;
}
c = bitbuf << (64 - vbits) >> (64 - nbits);
if (huff)
{
vbits -= huff[c] >> 8;
return (uchar)huff[c];
}
vbits -= nbits;
return c;
#ifndef LIBRAW_NOTHREADS
#undef bitbuf
#undef vbits
#endif
}
#define ph1_bits(n) ph1_bithuff(n, 0)
#define ph1_huff(h) ph1_bithuff(*h, h + 1)
void CLASS phase_one_load_raw_c()
{
static const int length[] = {8, 7, 6, 9, 11, 10, 5, 12, 14, 13};
int *offset, len[2], pred[2], row, col, i, j;
ushort *pixel;
short(*c_black)[2], (*r_black)[2];
#ifdef LIBRAW_LIBRARY_BUILD
if (ph1.format == 6)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
pixel = (ushort *)calloc(raw_width * 3 + raw_height * 4, 2);
merror(pixel, "phase_one_load_raw_c()");
offset = (int *)(pixel + raw_width);
fseek(ifp, strip_offset, SEEK_SET);
for (row = 0; row < raw_height; row++)
offset[row] = get4();
c_black = (short(*)[2])(offset + raw_height);
fseek(ifp, ph1.black_col, SEEK_SET);
if (ph1.black_col)
read_shorts((ushort *)c_black[0], raw_height * 2);
r_black = c_black + raw_height;
fseek(ifp, ph1.black_row, SEEK_SET);
if (ph1.black_row)
read_shorts((ushort *)r_black[0], raw_width * 2);
#ifdef LIBRAW_LIBRARY_BUILD
// Copy data to internal copy (ever if not read)
if (ph1.black_col || ph1.black_row)
{
imgdata.rawdata.ph1_cblack = (short(*)[2])calloc(raw_height * 2, sizeof(ushort));
merror(imgdata.rawdata.ph1_cblack, "phase_one_load_raw_c()");
memmove(imgdata.rawdata.ph1_cblack, (ushort *)c_black[0], raw_height * 2 * sizeof(ushort));
imgdata.rawdata.ph1_rblack = (short(*)[2])calloc(raw_width * 2, sizeof(ushort));
merror(imgdata.rawdata.ph1_rblack, "phase_one_load_raw_c()");
memmove(imgdata.rawdata.ph1_rblack, (ushort *)r_black[0], raw_width * 2 * sizeof(ushort));
}
#endif
for (i = 0; i < 256; i++)
curve[i] = i * i / 3.969 + 0.5;
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek(ifp, data_offset + offset[row], SEEK_SET);
ph1_bits(-1);
pred[0] = pred[1] = 0;
for (col = 0; col < raw_width; col++)
{
if (col >= (raw_width & -8))
len[0] = len[1] = 14;
else if ((col & 7) == 0)
for (i = 0; i < 2; i++)
{
for (j = 0; j < 5 && !ph1_bits(1); j++)
;
if (j--)
len[i] = length[j * 2 + ph1_bits(1)];
}
if ((i = len[col & 1]) == 14)
pixel[col] = pred[col & 1] = ph1_bits(16);
else
pixel[col] = pred[col & 1] += ph1_bits(i) + 1 - (1 << (i - 1));
if (pred[col & 1] >> 16)
derror();
if (ph1.format == 5 && pixel[col] < 256)
pixel[col] = curve[pixel[col]];
}
#ifndef LIBRAW_LIBRARY_BUILD
for (col = 0; col < raw_width; col++)
{
int shift = ph1.format == 8 ? 0 : 2;
i = (pixel[col] << shift) - ph1.t_black + c_black[row][col >= ph1.split_col] +
r_black[col][row >= ph1.split_row];
if (i > 0)
RAW(row, col) = i;
}
#else
if (ph1.format == 8)
memmove(&RAW(row, 0), &pixel[0], raw_width * 2);
else
for (col = 0; col < raw_width; col++)
RAW(row, col) = pixel[col] << 2;
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
free(pixel);
throw;
}
#endif
free(pixel);
maximum = 0xfffc - ph1.t_black;
}
void CLASS hasselblad_load_raw()
{
struct jhead jh;
int shot, row, col, *back[5], len[2], diff[12], pred, sh, f, s, c;
unsigned upix, urow, ucol;
ushort *ip;
if (!ljpeg_start(&jh, 0))
return;
order = 0x4949;
ph1_bits(-1);
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
back[4] = (int *)calloc(raw_width, 3 * sizeof **back);
merror(back[4], "hasselblad_load_raw()");
FORC3 back[c] = back[4] + c * raw_width;
cblack[6] >>= sh = tiff_samples > 1;
shot = LIM(shot_select, 1, tiff_samples) - 1;
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
FORC4 back[(c + 3) & 3] = back[c];
for (col = 0; col < raw_width; col += 2)
{
for (s = 0; s < tiff_samples * 2; s += 2)
{
FORC(2) len[c] = ph1_huff(jh.huff[0]);
FORC(2)
{
diff[s + c] = ph1_bits(len[c]);
if ((diff[s + c] & (1 << (len[c] - 1))) == 0)
diff[s + c] -= (1 << len[c]) - 1;
if (diff[s + c] == 65535)
diff[s + c] = -32768;
}
}
for (s = col; s < col + 2; s++)
{
pred = 0x8000 + load_flags;
if (col)
pred = back[2][s - 2];
if (col && row > 1)
switch (jh.psv)
{
case 11:
pred += back[0][s] / 2 - back[0][s - 2] / 2;
break;
}
f = (row & 1) * 3 ^ ((col + s) & 1);
FORC(tiff_samples)
{
pred += diff[(s & 1) * tiff_samples + c];
upix = pred >> sh & 0xffff;
if (raw_image && c == shot)
RAW(row, s) = upix;
if (image)
{
urow = row - top_margin + (c & 1);
ucol = col - left_margin - ((c >> 1) & 1);
ip = &image[urow * width + ucol][f];
if (urow < height && ucol < width)
*ip = c < 4 ? upix : (*ip + upix) >> 1;
}
}
back[2][s] = pred;
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
free(back[4]);
ljpeg_end(&jh);
throw;
}
#endif
free(back[4]);
ljpeg_end(&jh);
if (image)
mix_green = 1;
}
void CLASS leaf_hdr_load_raw()
{
ushort *pixel = 0;
unsigned tile = 0, r, c, row, col;
if (!filters || !raw_image)
{
#ifdef LIBRAW_LIBRARY_BUILD
if(!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
pixel = (ushort *)calloc(raw_width, sizeof *pixel);
merror(pixel, "leaf_hdr_load_raw()");
}
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
FORC(tiff_samples)
for (r = 0; r < raw_height; r++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (r % tile_length == 0)
{
fseek(ifp, data_offset + 4 * tile++, SEEK_SET);
fseek(ifp, get4(), SEEK_SET);
}
if (filters && c != shot_select)
continue;
if (filters && raw_image)
pixel = raw_image + r * raw_width;
read_shorts(pixel, raw_width);
if (!filters && image && (row = r - top_margin) < height)
for (col = 0; col < width; col++)
image[row * width + col][c] = pixel[col + left_margin];
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
if (!filters)
free(pixel);
throw;
}
#endif
if (!filters)
{
maximum = 0xffff;
raw_color = 1;
free(pixel);
}
}
void CLASS unpacked_load_raw()
{
int row, col, bits = 0;
while (1 << ++bits < maximum)
;
read_shorts(raw_image, raw_width * raw_height);
fseek(ifp,-2,SEEK_CUR); // avoid EOF error
if (maximum < 0xffff || load_flags)
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < raw_width; col++)
if ((RAW(row, col) >>= load_flags) >> bits && (unsigned)(row - top_margin) < height &&
(unsigned)(col - left_margin) < width)
derror();
}
}
void CLASS unpacked_load_raw_reversed()
{
int row, col, bits = 0;
while (1 << ++bits < maximum)
;
for (row = raw_height - 1; row >= 0; row--)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
read_shorts(&raw_image[row * raw_width], raw_width);
for (col = 0; col < raw_width; col++)
if ((RAW(row, col) >>= load_flags) >> bits && (unsigned)(row - top_margin) < height &&
(unsigned)(col - left_margin) < width)
derror();
}
}
void CLASS sinar_4shot_load_raw()
{
ushort *pixel;
unsigned shot, row, col, r, c;
if (raw_image)
{
shot = LIM(shot_select, 1, 4) - 1;
fseek(ifp, data_offset + shot * 4, SEEK_SET);
fseek(ifp, get4(), SEEK_SET);
unpacked_load_raw();
return;
}
#ifdef LIBRAW_LIBRARY_BUILD
if (!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
pixel = (ushort *)calloc(raw_width, sizeof *pixel);
merror(pixel, "sinar_4shot_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
for (shot = 0; shot < 4; shot++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek(ifp, data_offset + shot * 4, SEEK_SET);
fseek(ifp, get4(), SEEK_SET);
for (row = 0; row < raw_height; row++)
{
read_shorts(pixel, raw_width);
if ((r = row - top_margin - (shot >> 1 & 1)) >= height)
continue;
for (col = 0; col < raw_width; col++)
{
if ((c = col - left_margin - (shot & 1)) >= width)
continue;
image[r * width + c][(row & 1) * 3 ^ (~col & 1)] = pixel[col];
}
}
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
free(pixel);
throw;
}
#endif
free(pixel);
mix_green = 1;
}
void CLASS imacon_full_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
int row, col;
#ifdef LIBRAW_LIBRARY_BUILD
unsigned short *buf = (unsigned short *)malloc(width * 3 * sizeof(unsigned short));
merror(buf, "imacon_full_load_raw");
#endif
for (row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
read_shorts(buf, width * 3);
unsigned short(*rowp)[4] = &image[row * width];
for (col = 0; col < width; col++)
{
rowp[col][0] = buf[col * 3];
rowp[col][1] = buf[col * 3 + 1];
rowp[col][2] = buf[col * 3 + 2];
rowp[col][3] = 0;
}
#else
for (col = 0; col < width; col++)
read_shorts(image[row * width + col], 3);
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
free(buf);
#endif
}
void CLASS packed_load_raw()
{
int vbits = 0, bwide, rbits, bite, half, irow, row, col, val, i;
UINT64 bitbuf = 0;
bwide = raw_width * tiff_bps / 8;
bwide += bwide & load_flags >> 7;
rbits = bwide * 8 - raw_width * tiff_bps;
if (load_flags & 1)
bwide = bwide * 16 / 15;
bite = 8 + (load_flags & 24);
half = (raw_height + 1) >> 1;
for (irow = 0; irow < raw_height; irow++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
row = irow;
if (load_flags & 2 && (row = irow % half * 2 + irow / half) == 1 && load_flags & 4)
{
if (vbits = 0, tiff_compress)
fseek(ifp, data_offset - (-half * bwide & -2048), SEEK_SET);
else
{
fseek(ifp, 0, SEEK_END);
fseek(ifp, ftell(ifp) >> 3 << 2, SEEK_SET);
}
}
if(feof(ifp)) throw LIBRAW_EXCEPTION_IO_EOF;
for (col = 0; col < raw_width; col++)
{
for (vbits -= tiff_bps; vbits < 0; vbits += bite)
{
bitbuf <<= bite;
for (i = 0; i < bite; i += 8)
bitbuf |= (unsigned)(fgetc(ifp) << i);
}
val = bitbuf << (64 - tiff_bps - vbits) >> (64 - tiff_bps);
RAW(row, col ^ (load_flags >> 6 & 1)) = val;
if (load_flags & 1 && (col % 10) == 9 && fgetc(ifp) && row < height + top_margin && col < width + left_margin)
derror();
}
vbits -= rbits;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
ushort raw_stride;
void CLASS parse_broadcom()
{
/* This structure is at offset 0xb0 from the 'BRCM' ident. */
struct
{
uint8_t umode[32];
uint16_t uwidth;
uint16_t uheight;
uint16_t padding_right;
uint16_t padding_down;
uint32_t unknown_block[6];
uint16_t transform;
uint16_t format;
uint8_t bayer_order;
uint8_t bayer_format;
} header;
header.bayer_order = 0;
fseek(ifp, 0xb0 - 0x20, SEEK_CUR);
fread(&header, 1, sizeof(header), ifp);
raw_stride = ((((((header.uwidth + header.padding_right) * 5) + 3) >> 2) + 0x1f) & (~0x1f));
raw_width = width = header.uwidth;
raw_height = height = header.uheight;
filters = 0x16161616; /* default Bayer order is 2, BGGR */
switch (header.bayer_order)
{
case 0: /* RGGB */
filters = 0x94949494;
break;
case 1: /* GBRG */
filters = 0x49494949;
break;
case 3: /* GRBG */
filters = 0x61616161;
break;
}
}
void CLASS broadcom_load_raw()
{
uchar *data, *dp;
int rev, row, col, c;
rev = 3 * (order == 0x4949);
data = (uchar *)malloc(raw_stride * 2);
merror(data, "broadcom_load_raw()");
for (row = 0; row < raw_height; row++)
{
if (fread(data + raw_stride, 1, raw_stride, ifp) < raw_stride)
derror();
FORC(raw_stride) data[c] = data[raw_stride + (c ^ rev)];
for (dp = data, col = 0; col < raw_width; dp += 5, col += 4)
FORC4 RAW(row, col + c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3);
}
free(data);
}
#endif
void CLASS nokia_load_raw()
{
uchar *data, *dp;
int rev, dwide, row, col, c;
double sum[] = {0, 0};
rev = 3 * (order == 0x4949);
dwide = (raw_width * 5 + 1) / 4;
data = (uchar *)malloc(dwide * 2);
merror(data, "nokia_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread(data + dwide, 1, dwide, ifp) < dwide)
derror();
FORC(dwide) data[c] = data[dwide + (c ^ rev)];
for (dp = data, col = 0; col < raw_width; dp += 5, col += 4)
FORC4 RAW(row, col + c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3);
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
free(data);
throw;
}
#endif
free(data);
maximum = 0x3ff;
if (strncmp(make, "OmniVision", 10))
return;
row = raw_height / 2;
FORC(width - 1)
{
sum[c & 1] += SQR(RAW(row, c) - RAW(row + 1, c + 1));
sum[~c & 1] += SQR(RAW(row + 1, c) - RAW(row, c + 1));
}
if (sum[1] > sum[0])
filters = 0x4b4b4b4b;
}
void CLASS android_tight_load_raw()
{
uchar *data, *dp;
int bwide, row, col, c;
bwide = -(-5 * raw_width >> 5) << 3;
data = (uchar *)malloc(bwide);
merror(data, "android_tight_load_raw()");
for (row = 0; row < raw_height; row++)
{
if (fread(data, 1, bwide, ifp) < bwide)
derror();
for (dp = data, col = 0; col < raw_width; dp += 5, col += 4)
FORC4 RAW(row, col + c) = (dp[c] << 2) | (dp[4] >> (c << 1) & 3);
}
free(data);
}
void CLASS android_loose_load_raw()
{
uchar *data, *dp;
int bwide, row, col, c;
UINT64 bitbuf = 0;
bwide = (raw_width + 5) / 6 << 3;
data = (uchar *)malloc(bwide);
merror(data, "android_loose_load_raw()");
for (row = 0; row < raw_height; row++)
{
if (fread(data, 1, bwide, ifp) < bwide)
derror();
for (dp = data, col = 0; col < raw_width; dp += 8, col += 6)
{
FORC(8) bitbuf = (bitbuf << 8) | dp[c ^ 7];
FORC(6) RAW(row, col + c) = (bitbuf >> c * 10) & 0x3ff;
}
}
free(data);
}
void CLASS canon_rmf_load_raw()
{
int row, col, bits, orow, ocol, c;
#ifdef LIBRAW_LIBRARY_BUILD
int *words = (int *)malloc(sizeof(int) * (raw_width / 3 + 1));
merror(words, "canon_rmf_load_raw");
#endif
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
fread(words, sizeof(int), raw_width / 3, ifp);
for (col = 0; col < raw_width - 2; col += 3)
{
bits = words[col / 3];
FORC3
{
orow = row;
if ((ocol = col + c - 4) < 0)
{
ocol += raw_width;
if ((orow -= 2) < 0)
orow += raw_height;
}
RAW(orow, ocol) = curve[bits >> (10 * c + 2) & 0x3ff];
}
}
#else
for (col = 0; col < raw_width - 2; col += 3)
{
bits = get4();
FORC3
{
orow = row;
if ((ocol = col + c - 4) < 0)
{
ocol += raw_width;
if ((orow -= 2) < 0)
orow += raw_height;
}
RAW(orow, ocol) = curve[bits >> (10 * c + 2) & 0x3ff];
}
}
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
free(words);
#endif
maximum = curve[0x3ff];
}
unsigned CLASS pana_data(int nb, unsigned *bytes)
{
#ifndef LIBRAW_NOTHREADS
#define vpos tls->pana_data.vpos
#define buf tls->pana_data.buf
#else
static uchar buf[0x4002];
static int vpos;
#endif
int byte;
if (!nb && !bytes)
return vpos = 0;
if (!vpos)
{
fread(buf + load_flags, 1, 0x4000 - load_flags, ifp);
fread(buf, 1, load_flags, ifp);
}
if (pana_encoding == 5)
{
for (byte = 0; byte < 16; byte++)
{
bytes[byte] = buf[vpos++];
vpos &= 0x3FFF;
}
}
else
{
vpos = (vpos - nb) & 0x1ffff;
byte = vpos >> 3 ^ 0x3ff0;
return (buf[byte] | buf[byte + 1] << 8) >> (vpos & 7) & ~((~0u) << nb);
}
return 0;
#ifndef LIBRAW_NOTHREADS
#undef vpos
#undef buf
#endif
}
void CLASS panasonic_load_raw()
{
int row, col, i, j, sh = 0, pred[2], nonz[2];
unsigned bytes[16];
ushort *raw_block_data;
int enc_blck_size = pana_bpp == 12 ? 10 : 9;
pana_data(0, 0);
if (pana_encoding == 5)
{
for (row = 0; row < raw_height; row++)
{
raw_block_data = raw_image + row * raw_width;
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < raw_width; col += enc_blck_size)
{
pana_data(0, bytes);
if (pana_bpp == 12)
{
raw_block_data[col] = ((bytes[1] & 0xF) << 8) + bytes[0];
raw_block_data[col + 1] = 16 * bytes[2] + (bytes[1] >> 4);
raw_block_data[col + 2] = ((bytes[4] & 0xF) << 8) + bytes[3];
raw_block_data[col + 3] = 16 * bytes[5] + (bytes[4] >> 4);
raw_block_data[col + 4] = ((bytes[7] & 0xF) << 8) + bytes[6];
raw_block_data[col + 5] = 16 * bytes[8] + (bytes[7] >> 4);
raw_block_data[col + 6] = ((bytes[10] & 0xF) << 8) + bytes[9];
raw_block_data[col + 7] = 16 * bytes[11] + (bytes[10] >> 4);
raw_block_data[col + 8] = ((bytes[13] & 0xF) << 8) + bytes[12];
raw_block_data[col + 9] = 16 * bytes[14] + (bytes[13] >> 4);
}
else if (pana_bpp == 14)
{
raw_block_data[col] = bytes[0] + ((bytes[1] & 0x3F) << 8);
raw_block_data[col + 1] = (bytes[1] >> 6) + 4 * (bytes[2]) +
((bytes[3] & 0xF) << 10);
raw_block_data[col + 2] = (bytes[3] >> 4) + 16 * (bytes[4]) +
((bytes[5] & 3) << 12);
raw_block_data[col + 3] = ((bytes[5] & 0xFC) >> 2) + (bytes[6] << 6);
raw_block_data[col + 4] = bytes[7] + ((bytes[8] & 0x3F) << 8);
raw_block_data[col + 5] = (bytes[8] >> 6) + 4 * bytes[9] + ((bytes[10] & 0xF) << 10);
raw_block_data[col + 6] = (bytes[10] >> 4) + 16 * bytes[11] + ((bytes[12] & 3) << 12);
raw_block_data[col + 7] = ((bytes[12] & 0xFC) >> 2) + (bytes[13] << 6);
raw_block_data[col + 8] = bytes[14] + ((bytes[15] & 0x3F) << 8);
}
}
}
}
else
{
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < raw_width; col++)
{
if ((i = col % 14) == 0)
pred[0] = pred[1] = nonz[0] = nonz[1] = 0;
if (i % 3 == 2)
sh = 4 >> (3 - pana_data(2, 0));
if (nonz[i & 1])
{
if ((j = pana_data(8, 0)))
{
if ((pred[i & 1] -= 0x80 << sh) < 0 || sh == 4)
pred[i & 1] &= ~((~0u) << sh);
pred[i & 1] += j << sh;
}
}
else if ((nonz[i & 1] = pana_data(8, 0)) || i > 11)
pred[i & 1] = nonz[i & 1] << 4 | pana_data(4, 0);
if ((RAW(row, col) = pred[col & 1]) > 4098 && col < width && row < height)
derror();
}
}
}
}
void CLASS olympus_load_raw()
{
ushort huff[4096];
int row, col, nbits, sign, low, high, i, c, w, n, nw;
int acarry[2][3], *carry, pred, diff;
huff[n = 0] = 0xc0c;
for (i = 12; i--;)
FORC(2048 >> i) huff[++n] = (i + 1) << 8 | i;
fseek(ifp, 7, SEEK_CUR);
getbits(-1);
for (row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
memset(acarry, 0, sizeof acarry);
for (col = 0; col < raw_width; col++)
{
carry = acarry[col & 1];
i = 2 * (carry[2] < 3);
for (nbits = 2 + i; (ushort)carry[0] >> (nbits + i); nbits++)
;
low = (sign = getbits(3)) & 3;
sign = sign << 29 >> 31;
if ((high = getbithuff(12, huff)) == 12)
high = getbits(16 - nbits) >> 1;
carry[0] = (high << nbits) | getbits(nbits);
diff = (carry[0] ^ sign) + carry[1];
carry[1] = (diff * 3 + carry[1]) >> 5;
carry[2] = carry[0] > 16 ? 0 : carry[2] + 1;
if (col >= width)
continue;
if (row < 2 && col < 2)
pred = 0;
else if (row < 2)
pred = RAW(row, col - 2);
else if (col < 2)
pred = RAW(row - 2, col);
else
{
w = RAW(row, col - 2);
n = RAW(row - 2, col);
nw = RAW(row - 2, col - 2);
if ((w < nw && nw < n) || (n < nw && nw < w))
{
if (ABS(w - nw) > 32 || ABS(n - nw) > 32)
pred = w + n - nw;
else
pred = (w + n) >> 1;
}
else
pred = ABS(w - nw) > ABS(n - nw) ? w : n;
}
if ((RAW(row, col) = pred + ((diff << 2) | low)) >> 12)
derror();
}
}
}
void CLASS minolta_rd175_load_raw()
{
uchar pixel[768];
unsigned irow, box, row, col;
for (irow = 0; irow < 1481; irow++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread(pixel, 1, 768, ifp) < 768)
derror();
box = irow / 82;
row = irow % 82 * 12 + ((box < 12) ? box | 1 : (box - 12) * 2);
switch (irow)
{
case 1477:
case 1479:
continue;
case 1476:
row = 984;
break;
case 1480:
row = 985;
break;
case 1478:
row = 985;
box = 1;
}
if ((box < 12) && (box & 1))
{
for (col = 0; col < 1533; col++, row ^= 1)
if (col != 1)
RAW(row, col) = (col + 1) & 2 ? pixel[col / 2 - 1] + pixel[col / 2 + 1] : pixel[col / 2] << 1;
RAW(row, 1) = pixel[1] << 1;
RAW(row, 1533) = pixel[765] << 1;
}
else
for (col = row & 1; col < 1534; col += 2)
RAW(row, col) = pixel[col / 2] << 1;
}
maximum = 0xff << 1;
}
void CLASS quicktake_100_load_raw()
{
uchar pixel[484][644];
static const short gstep[16] = {-89, -60, -44, -32, -22, -15, -8, -2, 2, 8, 15, 22, 32, 44, 60, 89};
static const short rstep[6][4] = {{-3, -1, 1, 3}, {-5, -1, 1, 5}, {-8, -2, 2, 8},
{-13, -3, 3, 13}, {-19, -4, 4, 19}, {-28, -6, 6, 28}};
static const short t_curve[256] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
69, 70, 71, 72, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 86, 88, 90, 92, 94, 97, 99,
101, 103, 105, 107, 110, 112, 114, 116, 118, 120, 123, 125, 127, 129, 131, 134, 136, 138, 140, 142, 144, 147,
149, 151, 153, 155, 158, 160, 162, 164, 166, 168, 171, 173, 175, 177, 179, 181, 184, 186, 188, 190, 192, 195,
197, 199, 201, 203, 205, 208, 210, 212, 214, 216, 218, 221, 223, 226, 230, 235, 239, 244, 248, 252, 257, 261,
265, 270, 274, 278, 283, 287, 291, 296, 300, 305, 309, 313, 318, 322, 326, 331, 335, 339, 344, 348, 352, 357,
361, 365, 370, 374, 379, 383, 387, 392, 396, 400, 405, 409, 413, 418, 422, 426, 431, 435, 440, 444, 448, 453,
457, 461, 466, 470, 474, 479, 483, 487, 492, 496, 500, 508, 519, 531, 542, 553, 564, 575, 587, 598, 609, 620,
631, 643, 654, 665, 676, 687, 698, 710, 721, 732, 743, 754, 766, 777, 788, 799, 810, 822, 833, 844, 855, 866,
878, 889, 900, 911, 922, 933, 945, 956, 967, 978, 989, 1001, 1012, 1023};
int rb, row, col, sharp, val = 0;
#ifdef LIBRAW_LIBRARY_BUILD
if(width>640 || height > 480)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
getbits(-1);
memset(pixel, 0x80, sizeof pixel);
for (row = 2; row < height + 2; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 2 + (row & 1); col < width + 2; col += 2)
{
val = ((pixel[row - 1][col - 1] + 2 * pixel[row - 1][col + 1] + pixel[row][col - 2]) >> 2) + gstep[getbits(4)];
pixel[row][col] = val = LIM(val, 0, 255);
if (col < 4)
pixel[row][col - 2] = pixel[row + 1][~row & 1] = val;
if (row == 2)
pixel[row - 1][col + 1] = pixel[row - 1][col + 3] = val;
}
pixel[row][col] = val;
}
for (rb = 0; rb < 2; rb++)
for (row = 2 + rb; row < height + 2; row += 2)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 3 - (row & 1); col < width + 2; col += 2)
{
if (row < 4 || col < 4)
sharp = 2;
else
{
val = ABS(pixel[row - 2][col] - pixel[row][col - 2]) + ABS(pixel[row - 2][col] - pixel[row - 2][col - 2]) +
ABS(pixel[row][col - 2] - pixel[row - 2][col - 2]);
sharp = val < 4 ? 0 : val < 8 ? 1 : val < 16 ? 2 : val < 32 ? 3 : val < 48 ? 4 : 5;
}
val = ((pixel[row - 2][col] + pixel[row][col - 2]) >> 1) + rstep[sharp][getbits(2)];
pixel[row][col] = val = LIM(val, 0, 255);
if (row < 4)
pixel[row - 2][col + 2] = val;
if (col < 4)
pixel[row + 2][col - 2] = val;
}
}
for (row = 2; row < height + 2; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 3 - (row & 1); col < width + 2; col += 2)
{
val = ((pixel[row][col - 1] + (pixel[row][col] << 2) + pixel[row][col + 1]) >> 1) - 0x100;
pixel[row][col] = LIM(val, 0, 255);
}
}
for (row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < width; col++)
RAW(row, col) = t_curve[pixel[row + 2][col + 2]];
}
maximum = 0x3ff;
}
#define radc_token(tree) ((signed char)getbithuff(8, huff[tree]))
#define FORYX \
for (y = 1; y < 3; y++) \
for (x = col + 1; x >= col; x--)
#define PREDICTOR \
(c ? (buf[c][y - 1][x] + buf[c][y][x + 1]) / 2 : (buf[c][y - 1][x + 1] + 2 * buf[c][y - 1][x] + buf[c][y][x + 1]) / 4)
#ifdef __GNUC__
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
#pragma GCC optimize("no-aggressive-loop-optimizations")
#endif
#endif
void CLASS kodak_radc_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
// All kodak radc images are 768x512
if (width > 768 || raw_width > 768 || height > 512 || raw_height > 512)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
static const signed char src[] = {
1, 1, 2, 3, 3, 4, 4, 2, 5, 7, 6, 5, 7, 6, 7, 8, 1, 0, 2, 1, 3, 3, 4, 4, 5, 2, 6, 7, 7, 6,
8, 5, 8, 8, 2, 1, 2, 3, 3, 0, 3, 2, 3, 4, 4, 6, 5, 5, 6, 7, 6, 8, 2, 0, 2, 1, 2, 3, 3, 2,
4, 4, 5, 6, 6, 7, 7, 5, 7, 8, 2, 1, 2, 4, 3, 0, 3, 2, 3, 3, 4, 7, 5, 5, 6, 6, 6, 8, 2, 3,
3, 1, 3, 2, 3, 4, 3, 5, 3, 6, 4, 7, 5, 0, 5, 8, 2, 3, 2, 6, 3, 0, 3, 1, 4, 4, 4, 5, 4, 7,
5, 2, 5, 8, 2, 4, 2, 7, 3, 3, 3, 6, 4, 1, 4, 2, 4, 5, 5, 0, 5, 8, 2, 6, 3, 1, 3, 3, 3, 5,
3, 7, 3, 8, 4, 0, 5, 2, 5, 4, 2, 0, 2, 1, 3, 2, 3, 3, 4, 4, 4, 5, 5, 6, 5, 7, 4, 8, 1, 0,
2, 2, 2, -2, 1, -3, 1, 3, 2, -17, 2, -5, 2, 5, 2, 17, 2, -7, 2, 2, 2, 9, 2, 18, 2, -18, 2, -9, 2, -2,
2, 7, 2, -28, 2, 28, 3, -49, 3, -9, 3, 9, 4, 49, 5, -79, 5, 79, 2, -1, 2, 13, 2, 26, 3, 39, 4, -16, 5, 55,
6, -37, 6, 76, 2, -26, 2, -13, 2, 1, 3, -39, 4, 16, 5, -55, 6, -76, 6, 37};
ushort huff[19][256];
int row, col, tree, nreps, rep, step, i, c, s, r, x, y, val;
short last[3] = {16, 16, 16}, mul[3], buf[3][3][386];
static const ushort pt[] = {0, 0, 1280, 1344, 2320, 3616, 3328, 8000, 4095, 16383, 65535, 16383};
for (i = 2; i < 12; i += 2)
for (c = pt[i - 2]; c <= pt[i]; c++)
curve[c] = (float)(c - pt[i - 2]) / (pt[i] - pt[i - 2]) * (pt[i + 1] - pt[i - 1]) + pt[i - 1] + 0.5;
for (s = i = 0; i < sizeof src; i += 2)
FORC(256 >> src[i])
((ushort *)huff)[s++] = src[i] << 8 | (uchar)src[i + 1];
s = kodak_cbpp == 243 ? 2 : 3;
FORC(256) huff[18][c] = (8 - s) << 8 | c >> s << s | 1 << (s - 1);
getbits(-1);
for (i = 0; i < sizeof(buf) / sizeof(short); i++)
((short *)buf)[i] = 2048;
for (row = 0; row < height; row += 4)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
FORC3 mul[c] = getbits(6);
#ifdef LIBRAW_LIBRARY_BUILD
if (!mul[0] || !mul[1] || !mul[2])
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
FORC3
{
val = ((0x1000000 / last[c] + 0x7ff) >> 12) * mul[c];
s = val > 65564 ? 10 : 12;
x = ~((~0u) << (s - 1));
val <<= 12 - s;
for (i = 0; i < sizeof(buf[0]) / sizeof(short); i++)
((short *)buf[c])[i] = (((short *)buf[c])[i] * val + x) >> s;
last[c] = mul[c];
for (r = 0; r <= !c; r++)
{
buf[c][1][width / 2] = buf[c][2][width / 2] = mul[c] << 7;
for (tree = 1, col = width / 2; col > 0;)
{
if ((tree = radc_token(tree)))
{
col -= 2;
if(col>=0)
{
if (tree == 8)
FORYX buf[c][y][x] = (uchar)radc_token(18) * mul[c];
else
FORYX buf[c][y][x] = radc_token(tree + 10) * 16 + PREDICTOR;
}
}
else
do
{
nreps = (col > 2) ? radc_token(9) + 1 : 1;
for (rep = 0; rep < 8 && rep < nreps && col > 0; rep++)
{
col -= 2;
if(col>=0)
FORYX buf[c][y][x] = PREDICTOR;
if (rep & 1)
{
step = radc_token(10) << 4;
FORYX buf[c][y][x] += step;
}
}
} while (nreps == 9);
}
for (y = 0; y < 2; y++)
for (x = 0; x < width / 2; x++)
{
val = (buf[c][y + 1][x] << 4) / mul[c];
if (val < 0)
val = 0;
if (c)
RAW(row + y * 2 + c - 1, x * 2 + 2 - c) = val;
else
RAW(row + r * 2 + y, x * 2 + y) = val;
}
memcpy(buf[c][0] + !c, buf[c][2], sizeof buf[c][0] - 2 * !c);
}
}
for (y = row; y < row + 4; y++)
for (x = 0; x < width; x++)
if ((x + y) & 1)
{
r = x ? x - 1 : x + 1;
s = x + 1 < width ? x + 1 : x - 1;
val = (RAW(y, x) - 2048) * 2 + (RAW(y, r) + RAW(y, s)) / 2;
if (val < 0)
val = 0;
RAW(y, x) = val;
}
}
for (i = 0; i < height * width; i++)
raw_image[i] = curve[raw_image[i]];
maximum = 0x3fff;
}
#undef FORYX
#undef PREDICTOR
#ifdef NO_JPEG
void CLASS kodak_jpeg_load_raw() {}
void CLASS lossy_dng_load_raw() {}
#else
#ifndef LIBRAW_LIBRARY_BUILD
METHODDEF(boolean)
fill_input_buffer(j_decompress_ptr cinfo)
{
static uchar jpeg_buffer[4096];
size_t nbytes;
nbytes = fread(jpeg_buffer, 1, 4096, ifp);
swab(jpeg_buffer, jpeg_buffer, nbytes);
cinfo->src->next_input_byte = jpeg_buffer;
cinfo->src->bytes_in_buffer = nbytes;
return TRUE;
}
void CLASS kodak_jpeg_load_raw()
{
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE(*pixel)[3];
int row, col;
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, ifp);
cinfo.src->fill_input_buffer = fill_input_buffer;
jpeg_read_header(&cinfo, TRUE);
jpeg_start_decompress(&cinfo);
if ((cinfo.output_width != width) || (cinfo.output_height * 2 != height) || (cinfo.output_components != 3))
{
fprintf(stderr, _("%s: incorrect JPEG dimensions\n"), ifname);
jpeg_destroy_decompress(&cinfo);
longjmp(failure, 3);
}
buf = (*cinfo.mem->alloc_sarray)((j_common_ptr)&cinfo, JPOOL_IMAGE, width * 3, 1);
while (cinfo.output_scanline < cinfo.output_height)
{
row = cinfo.output_scanline * 2;
jpeg_read_scanlines(&cinfo, buf, 1);
pixel = (JSAMPLE(*)[3])buf[0];
for (col = 0; col < width; col += 2)
{
RAW(row + 0, col + 0) = pixel[col + 0][1] << 1;
RAW(row + 1, col + 1) = pixel[col + 1][1] << 1;
RAW(row + 0, col + 1) = pixel[col][0] + pixel[col + 1][0];
RAW(row + 1, col + 0) = pixel[col][2] + pixel[col + 1][2];
}
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
maximum = 0xff << 1;
}
#else
struct jpegErrorManager
{
struct jpeg_error_mgr pub;
};
static void jpegErrorExit(j_common_ptr cinfo)
{
jpegErrorManager *myerr = (jpegErrorManager *)cinfo->err;
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
// LibRaw's Kodak_jpeg_load_raw
void CLASS kodak_jpeg_load_raw()
{
if (data_size < 1)
throw LIBRAW_EXCEPTION_DECODE_JPEG;
int row, col;
jpegErrorManager jerr;
struct jpeg_decompress_struct cinfo;
cinfo.err = jpeg_std_error(&jerr.pub);
jerr.pub.error_exit = jpegErrorExit;
unsigned char *jpg_buf = (unsigned char *)malloc(data_size);
merror(jpg_buf, "kodak_jpeg_load_raw");
unsigned char *pixel_buf = (unsigned char *)malloc(width * 3);
jpeg_create_decompress(&cinfo);
merror(pixel_buf, "kodak_jpeg_load_raw");
fread(jpg_buf, data_size, 1, ifp);
swab((char *)jpg_buf, (char *)jpg_buf, data_size);
try
{
jpeg_mem_src(&cinfo, jpg_buf, data_size);
int rc = jpeg_read_header(&cinfo, TRUE);
if (rc != 1)
throw LIBRAW_EXCEPTION_DECODE_JPEG;
jpeg_start_decompress(&cinfo);
if ((cinfo.output_width != width) || (cinfo.output_height * 2 != height) || (cinfo.output_components != 3))
{
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
unsigned char *buf[1];
buf[0] = pixel_buf;
while (cinfo.output_scanline < cinfo.output_height)
{
checkCancel();
row = cinfo.output_scanline * 2;
jpeg_read_scanlines(&cinfo, buf, 1);
unsigned char(*pixel)[3] = (unsigned char(*)[3])buf[0];
for (col = 0; col < width; col += 2)
{
RAW(row + 0, col + 0) = pixel[col + 0][1] << 1;
RAW(row + 1, col + 1) = pixel[col + 1][1] << 1;
RAW(row + 0, col + 1) = pixel[col][0] + pixel[col + 1][0];
RAW(row + 1, col + 0) = pixel[col][2] + pixel[col + 1][2];
}
}
}
catch (...)
{
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
free(jpg_buf);
free(pixel_buf);
throw;
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
free(jpg_buf);
free(pixel_buf);
maximum = 0xff << 1;
}
#endif
#ifndef LIBRAW_LIBRARY_BUILD
void CLASS gamma_curve(double pwr, double ts, int mode, int imax);
#endif
void CLASS lossy_dng_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPARRAY buf;
JSAMPLE(*pixel)[3];
unsigned sorder = order, ntags, opcode, deg, i, j, c;
unsigned save = data_offset - 4, trow = 0, tcol = 0, row, col;
ushort cur[3][256];
double coeff[9], tot;
if (meta_offset)
{
fseek(ifp, meta_offset, SEEK_SET);
order = 0x4d4d;
ntags = get4();
while (ntags--)
{
opcode = get4();
get4();
get4();
if (opcode != 8)
{
fseek(ifp, get4(), SEEK_CUR);
continue;
}
fseek(ifp, 20, SEEK_CUR);
if ((c = get4()) > 2)
break;
fseek(ifp, 12, SEEK_CUR);
if ((deg = get4()) > 8)
break;
for (i = 0; i <= deg && i < 9; i++)
coeff[i] = getreal(12);
for (i = 0; i < 256; i++)
{
for (tot = j = 0; j <= deg; j++)
tot += coeff[j] * pow(i / 255.0, (int)j);
cur[c][i] = tot * 0xffff;
}
}
order = sorder;
}
else
{
gamma_curve(1 / 2.4, 12.92, 1, 255);
FORC3 memcpy(cur[c], curve, sizeof cur[0]);
}
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
while (trow < raw_height)
{
fseek(ifp, save += 4, SEEK_SET);
if (tile_length < INT_MAX)
fseek(ifp, get4(), SEEK_SET);
#ifdef LIBRAW_LIBRARY_BUILD
if (libraw_internal_data.internal_data.input->jpeg_src(&cinfo) == -1)
{
jpeg_destroy_decompress(&cinfo);
throw LIBRAW_EXCEPTION_DECODE_JPEG;
}
#else
jpeg_stdio_src(&cinfo, ifp);
#endif
jpeg_read_header(&cinfo, TRUE);
jpeg_start_decompress(&cinfo);
buf = (*cinfo.mem->alloc_sarray)((j_common_ptr)&cinfo, JPOOL_IMAGE, cinfo.output_width * 3, 1);
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
while (cinfo.output_scanline < cinfo.output_height && (row = trow + cinfo.output_scanline) < height)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
jpeg_read_scanlines(&cinfo, buf, 1);
pixel = (JSAMPLE(*)[3])buf[0];
for (col = 0; col < cinfo.output_width && tcol + col < width; col++)
{
FORC3 image[row * width + tcol + col][c] = cur[c][pixel[col][c]];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
jpeg_destroy_decompress(&cinfo);
throw;
}
#endif
jpeg_abort_decompress(&cinfo);
if ((tcol += tile_width) >= raw_width)
trow += tile_length + (tcol = 0);
}
jpeg_destroy_decompress(&cinfo);
maximum = 0xffff;
}
#endif
void CLASS kodak_dc120_load_raw()
{
static const int mul[4] = {162, 192, 187, 92};
static const int add[4] = {0, 636, 424, 212};
uchar pixel[848];
int row, shift, col;
for (row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread(pixel, 1, 848, ifp) < 848)
derror();
shift = row * mul[row & 3] + add[row & 3];
for (col = 0; col < width; col++)
RAW(row, col) = (ushort)pixel[(col + shift) % 848];
}
maximum = 0xff;
}
void CLASS eight_bit_load_raw()
{
uchar *pixel;
unsigned row, col;
pixel = (uchar *)calloc(raw_width, sizeof *pixel);
merror(pixel, "eight_bit_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread(pixel, 1, raw_width, ifp) < raw_width)
derror();
for (col = 0; col < raw_width; col++)
RAW(row, col) = curve[pixel[col]];
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
free(pixel);
throw;
}
#endif
free(pixel);
maximum = curve[0xff];
}
void CLASS kodak_c330_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
uchar *pixel;
int row, col, y, cb, cr, rgb[3], c;
pixel = (uchar *)calloc(raw_width, 2 * sizeof *pixel);
merror(pixel, "kodak_c330_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
for (row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (fread(pixel, raw_width, 2, ifp) < 2)
derror();
if (load_flags && (row & 31) == 31)
fseek(ifp, raw_width * 32, SEEK_CUR);
for (col = 0; col < width; col++)
{
y = pixel[col * 2];
cb = pixel[(col * 2 & -4) | 1] - 128;
cr = pixel[(col * 2 & -4) | 3] - 128;
rgb[1] = y - ((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
FORC3 image[row * width + col][c] = curve[LIM(rgb[c], 0, 255)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
free(pixel);
throw;
}
#endif
free(pixel);
maximum = curve[0xff];
}
void CLASS kodak_c603_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
uchar *pixel;
int row, col, y, cb, cr, rgb[3], c;
pixel = (uchar *)calloc(raw_width, 3 * sizeof *pixel);
merror(pixel, "kodak_c603_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
for (row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if (~row & 1)
if (fread(pixel, raw_width, 3, ifp) < 3)
derror();
for (col = 0; col < width; col++)
{
y = pixel[width * 2 * (row & 1) + col];
cb = pixel[width + (col & -2)] - 128;
cr = pixel[width + (col & -2) + 1] - 128;
rgb[1] = y - ((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
FORC3 image[row * width + col][c] = curve[LIM(rgb[c], 0, 255)];
}
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
free(pixel);
throw;
}
#endif
free(pixel);
maximum = curve[0xff];
}
void CLASS kodak_262_load_raw()
{
static const uchar kodak_tree[2][26] = {
{0, 1, 5, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
{0, 3, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9}};
ushort *huff[2];
uchar *pixel;
int *strip, ns, c, row, col, chess, pi = 0, pi1, pi2, pred, val;
FORC(2) huff[c] = make_decoder(kodak_tree[c]);
ns = (raw_height + 63) >> 5;
pixel = (uchar *)malloc(raw_width * 32 + ns * 4);
merror(pixel, "kodak_262_load_raw()");
strip = (int *)(pixel + raw_width * 32);
order = 0x4d4d;
FORC(ns) strip[c] = get4();
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
if ((row & 31) == 0)
{
fseek(ifp, strip[row >> 5], SEEK_SET);
getbits(-1);
pi = 0;
}
for (col = 0; col < raw_width; col++)
{
chess = (row + col) & 1;
pi1 = chess ? pi - 2 : pi - raw_width - 1;
pi2 = chess ? pi - 2 * raw_width : pi - raw_width + 1;
if (col <= chess)
pi1 = -1;
if (pi1 < 0)
pi1 = pi2;
if (pi2 < 0)
pi2 = pi1;
if (pi1 < 0 && col > 1)
pi1 = pi2 = pi - 2;
pred = (pi1 < 0) ? 0 : (pixel[pi1] + pixel[pi2]) >> 1;
pixel[pi] = val = pred + ljpeg_diff(huff[chess]);
if (val >> 8)
derror();
val = curve[pixel[pi++]];
RAW(row, col) = val;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
free(pixel);
throw;
}
#endif
free(pixel);
FORC(2) free(huff[c]);
}
int CLASS kodak_65000_decode(short *out, int bsize)
{
uchar c, blen[768];
ushort raw[6];
INT64 bitbuf = 0;
int save, bits = 0, i, j, len, diff;
save = ftell(ifp);
bsize = (bsize + 3) & -4;
for (i = 0; i < bsize; i += 2)
{
c = fgetc(ifp);
if ((blen[i] = c & 15) > 12 || (blen[i + 1] = c >> 4) > 12)
{
fseek(ifp, save, SEEK_SET);
for (i = 0; i < bsize; i += 8)
{
read_shorts(raw, 6);
out[i] = raw[0] >> 12 << 8 | raw[2] >> 12 << 4 | raw[4] >> 12;
out[i + 1] = raw[1] >> 12 << 8 | raw[3] >> 12 << 4 | raw[5] >> 12;
for (j = 0; j < 6; j++)
out[i + 2 + j] = raw[j] & 0xfff;
}
return 1;
}
}
if ((bsize & 7) == 4)
{
bitbuf = fgetc(ifp) << 8;
bitbuf += fgetc(ifp);
bits = 16;
}
for (i = 0; i < bsize; i++)
{
len = blen[i];
if (bits < len)
{
for (j = 0; j < 32; j += 8)
bitbuf += (INT64)fgetc(ifp) << (bits + (j ^ 8));
bits += 32;
}
diff = bitbuf & (0xffff >> (16 - len));
bitbuf >>= len;
bits -= len;
if ((diff & (1 << (len - 1))) == 0)
diff -= (1 << len) - 1;
out[i] = diff;
}
return 0;
}
void CLASS kodak_65000_load_raw()
{
short buf[272]; /* 264 looks enough */
int row, col, len, pred[2], ret, i;
for (row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < width; col += 256)
{
pred[0] = pred[1] = 0;
len = MIN(256, width - col);
ret = kodak_65000_decode(buf, len);
for (i = 0; i < len; i++)
{
int idx = ret ? buf[i] : (pred[i & 1] += buf[i]);
if (idx >= 0 && idx < 0xffff)
{
if ((RAW(row, col + i) = curve[idx]) >> 12)
derror();
}
else
derror();
}
}
}
}
void CLASS kodak_ycbcr_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
short buf[384], *bp;
int row, col, len, c, i, j, k, y[2][2], cb, cr, rgb[3];
ushort *ip;
unsigned int bits = (load_flags && load_flags > 9 && load_flags < 17) ? load_flags : 10;
for (row = 0; row < height; row += 2)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < width; col += 128)
{
len = MIN(128, width - col);
kodak_65000_decode(buf, len * 3);
y[0][1] = y[1][1] = cb = cr = 0;
for (bp = buf, i = 0; i < len; i += 2, bp += 2)
{
cb += bp[4];
cr += bp[5];
rgb[1] = -((cb + cr + 2) >> 2);
rgb[2] = rgb[1] + cb;
rgb[0] = rgb[1] + cr;
for (j = 0; j < 2; j++)
for (k = 0; k < 2; k++)
{
if ((y[j][k] = y[j][k ^ 1] + *bp++) >> bits)
derror();
ip = image[(row + j) * width + col + i + k];
FORC3 ip[c] = curve[LIM(y[j][k] + rgb[c], 0, 0xfff)];
}
}
}
}
}
void CLASS kodak_rgb_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
short buf[768], *bp;
int row, col, len, c, i, rgb[3], ret;
ushort *ip = image[0];
for (row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < width; col += 256)
{
len = MIN(256, width - col);
ret = kodak_65000_decode(buf, len * 3);
memset(rgb, 0, sizeof rgb);
for (bp = buf, i = 0; i < len; i++, ip += 4)
#ifdef LIBRAW_LIBRARY_BUILD
if (load_flags == 12)
{
FORC3 ip[c] = ret ? (*bp++) : (rgb[c] += *bp++);
}
else
#endif
FORC3 if ((ip[c] = ret ? (*bp++) : (rgb[c] += *bp++)) >> 12) derror();
}
}
}
void CLASS kodak_thumb_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
int row, col;
colors = thumb_misc >> 5;
for (row = 0; row < height; row++)
for (col = 0; col < width; col++)
read_shorts(image[row * width + col], colors);
maximum = (1 << (thumb_misc & 31)) - 1;
}
void CLASS sony_decrypt(unsigned *data, int len, int start, int key)
{
#ifndef LIBRAW_NOTHREADS
#define pad tls->sony_decrypt.pad
#define p tls->sony_decrypt.p
#else
static unsigned pad[128], p;
#endif
if (start)
{
for (p = 0; p < 4; p++)
pad[p] = key = key * 48828125 + 1;
pad[3] = pad[3] << 1 | (pad[0] ^ pad[2]) >> 31;
for (p = 4; p < 127; p++)
pad[p] = (pad[p - 4] ^ pad[p - 2]) << 1 | (pad[p - 3] ^ pad[p - 1]) >> 31;
for (p = 0; p < 127; p++)
pad[p] = htonl(pad[p]);
}
while (len--)
{
*data++ ^= pad[p & 127] = pad[(p + 1) & 127] ^ pad[(p + 65) & 127];
p++;
}
#ifndef LIBRAW_NOTHREADS
#undef pad
#undef p
#endif
}
void CLASS sony_load_raw()
{
uchar head[40];
ushort *pixel;
unsigned i, key, row, col;
fseek(ifp, 200896, SEEK_SET);
fseek(ifp, (unsigned)fgetc(ifp) * 4 - 1, SEEK_CUR);
order = 0x4d4d;
key = get4();
fseek(ifp, 164600, SEEK_SET);
fread(head, 1, 40, ifp);
sony_decrypt((unsigned *)head, 10, 1, key);
for (i = 26; i-- > 22;)
key = key << 8 | head[i];
fseek(ifp, data_offset, SEEK_SET);
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pixel = raw_image + row * raw_width;
if (fread(pixel, 2, raw_width, ifp) < raw_width)
derror();
sony_decrypt((unsigned *)pixel, raw_width / 2, !row, key);
for (col = 0; col < raw_width; col++)
if ((pixel[col] = ntohs(pixel[col])) >> 14)
derror();
}
maximum = 0x3ff0;
}
void CLASS sony_arw_load_raw()
{
ushort huff[32770];
static const ushort tab[18] = {0xf11, 0xf10, 0xe0f, 0xd0e, 0xc0d, 0xb0c, 0xa0b, 0x90a, 0x809,
0x708, 0x607, 0x506, 0x405, 0x304, 0x303, 0x300, 0x202, 0x201};
int i, c, n, col, row, sum = 0;
huff[0] = 15;
for (n = i = 0; i < 18; i++)
FORC(32768 >> (tab[i] >> 8)) huff[++n] = tab[i];
getbits(-1);
for (col = raw_width; col--;)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (row = 0; row < raw_height + 1; row += 2)
{
if (row == raw_height)
row = 1;
if ((sum += ljpeg_diff(huff)) >> 12)
derror();
if (row < height)
RAW(row, col) = sum;
}
}
}
void CLASS sony_arw2_load_raw()
{
uchar *data, *dp;
ushort pix[16];
int row, col, val, max, min, imax, imin, sh, bit, i;
data = (uchar *)malloc(raw_width + 1);
merror(data, "sony_arw2_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
try
{
#endif
for (row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fread(data, 1, raw_width, ifp);
for (dp = data, col = 0; col < raw_width - 30; dp += 16)
{
max = 0x7ff & (val = sget4(dp));
min = 0x7ff & val >> 11;
imax = 0x0f & val >> 22;
imin = 0x0f & val >> 26;
for (sh = 0; sh < 4 && 0x80 << sh <= max - min; sh++)
;
#ifdef LIBRAW_LIBRARY_BUILD
/* flag checks if outside of loop */
if (!(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_ALLFLAGS) // no flag set
|| (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTATOVALUE))
{
for (bit = 30, i = 0; i < 16; i++)
if (i == imax)
pix[i] = max;
else if (i == imin)
pix[i] = min;
else
{
pix[i] = ((sget2(dp + (bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff)
pix[i] = 0x7ff;
bit += 7;
}
}
else if (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_BASEONLY)
{
for (bit = 30, i = 0; i < 16; i++)
if (i == imax)
pix[i] = max;
else if (i == imin)
pix[i] = min;
else
pix[i] = 0;
}
else if (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTAONLY)
{
for (bit = 30, i = 0; i < 16; i++)
if (i == imax)
pix[i] = 0;
else if (i == imin)
pix[i] = 0;
else
{
pix[i] = ((sget2(dp + (bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff)
pix[i] = 0x7ff;
bit += 7;
}
}
else if (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTAZEROBASE)
{
for (bit = 30, i = 0; i < 16; i++)
if (i == imax)
pix[i] = 0;
else if (i == imin)
pix[i] = 0;
else
{
pix[i] = ((sget2(dp + (bit >> 3)) >> (bit & 7) & 0x7f) << sh);
if (pix[i] > 0x7ff)
pix[i] = 0x7ff;
bit += 7;
}
}
#else
/* unaltered dcraw processing */
for (bit = 30, i = 0; i < 16; i++)
if (i == imax)
pix[i] = max;
else if (i == imin)
pix[i] = min;
else
{
pix[i] = ((sget2(dp + (bit >> 3)) >> (bit & 7) & 0x7f) << sh) + min;
if (pix[i] > 0x7ff)
pix[i] = 0x7ff;
bit += 7;
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTATOVALUE)
{
for (i = 0; i < 16; i++, col += 2)
{
unsigned slope = pix[i] < 1001 ? 2 : curve[pix[i] << 1] - curve[(pix[i] << 1) - 2];
unsigned step = 1 << sh;
RAW(row, col) = curve[pix[i] << 1] > black + imgdata.params.sony_arw2_posterization_thr
? LIM(((slope * step * 1000) / (curve[pix[i] << 1] - black)), 0, 10000)
: 0;
}
}
else
{
for (i = 0; i < 16; i++, col += 2)
RAW(row, col) = curve[pix[i] << 1];
}
#else
for (i = 0; i < 16; i++, col += 2)
RAW(row, col) = curve[pix[i] << 1] >> 2;
#endif
col -= col & 1 ? 1 : 31;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
free(data);
throw;
}
if (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_SONYARW2_DELTATOVALUE)
maximum = 10000;
#endif
free(data);
}
void CLASS samsung_load_raw()
{
int row, col, c, i, dir, op[4], len[4];
#ifdef LIBRAW_LIBRARY_BUILD
if(raw_width> 32768 || raw_height > 32768) // definitely too much for old samsung
throw LIBRAW_EXCEPTION_IO_BADFILE;
#endif
unsigned maxpixels = raw_width*(raw_height+7);
order = 0x4949;
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek(ifp, strip_offset + row * 4, SEEK_SET);
fseek(ifp, data_offset + get4(), SEEK_SET);
ph1_bits(-1);
FORC4 len[c] = row < 2 ? 7 : 4;
for (col = 0; col < raw_width; col += 16)
{
dir = ph1_bits(1);
FORC4 op[c] = ph1_bits(2);
FORC4 switch (op[c])
{
case 3:
len[c] = ph1_bits(4);
break;
case 2:
len[c]--;
break;
case 1:
len[c]++;
}
for (c = 0; c < 16; c += 2)
{
i = len[((c & 1) << 1) | (c >> 3)];
unsigned idest = RAWINDEX(row, col + c);
unsigned isrc = (dir ? RAWINDEX(row + (~c | -2), col + c) : col ? RAWINDEX(row, col + (c | -2)) : 0);
if(idest < maxpixels && isrc < maxpixels) // less than zero is handled by unsigned conversion
RAW(row, col + c) = ((signed)ph1_bits(i) << (32 - i) >> (32 - i)) + (dir ? RAW(row + (~c | -2), col + c) : col ? RAW(row, col + (c | -2)) : 128);
else
derror();
if (c == 14)
c = -1;
}
}
}
for (row = 0; row < raw_height - 1; row += 2)
for (col = 0; col < raw_width - 1; col += 2)
SWAP(RAW(row, col + 1), RAW(row + 1, col));
}
void CLASS samsung2_load_raw()
{
static const ushort tab[14] = {0x304, 0x307, 0x206, 0x205, 0x403, 0x600, 0x709,
0x80a, 0x90b, 0xa0c, 0xa0d, 0x501, 0x408, 0x402};
ushort huff[1026], vpred[2][2] = {{0, 0}, {0, 0}}, hpred[2];
int i, c, n, row, col, diff;
huff[0] = 10;
for (n = i = 0; i < 14; i++)
FORC(1024 >> (tab[i] >> 8)) huff[++n] = tab[i];
getbits(-1);
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < raw_width; col++)
{
diff = ljpeg_diff(huff);
if (col < 2)
hpred[col] = vpred[row & 1][col] += diff;
else
hpred[col & 1] += diff;
RAW(row, col) = hpred[col & 1];
if (hpred[col & 1] >> tiff_bps)
derror();
}
}
}
void CLASS samsung3_load_raw()
{
int opt, init, mag, pmode, row, tab, col, pred, diff, i, c;
ushort lent[3][2], len[4], *prow[2];
order = 0x4949;
fseek(ifp, 9, SEEK_CUR);
opt = fgetc(ifp);
init = (get2(), get2());
for (row = 0; row < raw_height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
fseek(ifp, (data_offset - ftell(ifp)) & 15, SEEK_CUR);
ph1_bits(-1);
mag = 0;
pmode = 7;
FORC(6)((ushort *)lent)[c] = row < 2 ? 7 : 4;
prow[row & 1] = &RAW(row - 1, 1 - ((row & 1) << 1)); // green
prow[~row & 1] = &RAW(row - 2, 0); // red and blue
for (tab = 0; tab + 15 < raw_width; tab += 16)
{
if (~opt & 4 && !(tab & 63))
{
i = ph1_bits(2);
mag = i < 3 ? mag - '2' + "204"[i] : ph1_bits(12);
}
if (opt & 2)
pmode = 7 - 4 * ph1_bits(1);
else if (!ph1_bits(1))
pmode = ph1_bits(3);
if (opt & 1 || !ph1_bits(1))
{
FORC4 len[c] = ph1_bits(2);
FORC4
{
i = ((row & 1) << 1 | (c & 1)) % 3;
len[c] = len[c] < 3 ? lent[i][0] - '1' + "120"[len[c]] : ph1_bits(4);
lent[i][0] = lent[i][1];
lent[i][1] = len[c];
}
}
FORC(16)
{
col = tab + (((c & 7) << 1) ^ (c >> 3) ^ (row & 1));
pred =
(pmode == 7 || row < 2)
? (tab ? RAW(row, tab - 2 + (col & 1)) : init)
: (prow[col & 1][col - '4' + "0224468"[pmode]] + prow[col & 1][col - '4' + "0244668"[pmode]] + 1) >> 1;
diff = ph1_bits(i = len[c >> 2]);
if (diff >> (i - 1))
diff -= 1 << i;
diff = diff * (mag * 2 + 1) + mag;
RAW(row, col) = pred + diff;
}
}
}
}
#define HOLE(row) ((holes >> (((row)-raw_height) & 7)) & 1)
/* Kudos to Rich Taylor for figuring out SMaL's compression algorithm. */
void CLASS smal_decode_segment(unsigned seg[2][2], int holes)
{
uchar hist[3][13] = {{7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0},
{7, 7, 0, 0, 63, 55, 47, 39, 31, 23, 15, 7, 0},
{3, 3, 0, 0, 63, 47, 31, 15, 0}};
int low, high = 0xff, carry = 0, nbits = 8;
int pix, s, count, bin, next, i, sym[3];
uchar diff, pred[] = {0, 0};
ushort data = 0, range = 0;
fseek(ifp, seg[0][1] + 1, SEEK_SET);
getbits(-1);
if (seg[1][0] > raw_width * raw_height)
seg[1][0] = raw_width * raw_height;
for (pix = seg[0][0]; pix < seg[1][0]; pix++)
{
for (s = 0; s < 3; s++)
{
data = data << nbits | getbits(nbits);
if (carry < 0)
carry = (nbits += carry + 1) < 1 ? nbits - 1 : 0;
while (--nbits >= 0)
if ((data >> nbits & 0xff) == 0xff)
break;
if (nbits > 0)
data = ((data & ((1 << (nbits - 1)) - 1)) << 1) |
((data + (((data & (1 << (nbits - 1)))) << 1)) & ((~0u) << nbits));
if (nbits >= 0)
{
data += getbits(1);
carry = nbits - 8;
}
count = ((((data - range + 1) & 0xffff) << 2) - 1) / (high >> 4);
for (bin = 0; hist[s][bin + 5] > count; bin++)
;
low = hist[s][bin + 5] * (high >> 4) >> 2;
if (bin)
high = hist[s][bin + 4] * (high >> 4) >> 2;
high -= low;
for (nbits = 0; high << nbits < 128; nbits++)
;
range = (range + low) << nbits;
high <<= nbits;
next = hist[s][1];
if (++hist[s][2] > hist[s][3])
{
next = (next + 1) & hist[s][0];
hist[s][3] = (hist[s][next + 4] - hist[s][next + 5]) >> 2;
hist[s][2] = 1;
}
if (hist[s][hist[s][1] + 4] - hist[s][hist[s][1] + 5] > 1)
{
if (bin < hist[s][1])
for (i = bin; i < hist[s][1]; i++)
hist[s][i + 5]--;
else if (next <= bin)
for (i = hist[s][1]; i < bin; i++)
hist[s][i + 5]++;
}
hist[s][1] = next;
sym[s] = bin;
}
diff = sym[2] << 5 | sym[1] << 2 | (sym[0] & 3);
if (sym[0] & 4)
diff = diff ? -diff : 0x80;
if (ftell(ifp) + 12 >= seg[1][1])
diff = 0;
#ifdef LIBRAW_LIBRARY_BUILD
if (pix >= raw_width * raw_height)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
raw_image[pix] = pred[pix & 1] += diff;
if (!(pix & 1) && HOLE(pix / raw_width))
pix += 2;
}
maximum = 0xff;
}
void CLASS smal_v6_load_raw()
{
unsigned seg[2][2];
fseek(ifp, 16, SEEK_SET);
seg[0][0] = 0;
seg[0][1] = get2();
seg[1][0] = raw_width * raw_height;
seg[1][1] = INT_MAX;
smal_decode_segment(seg, 0);
}
int CLASS median4(int *p)
{
int min, max, sum, i;
min = max = sum = p[0];
for (i = 1; i < 4; i++)
{
sum += p[i];
if (min > p[i])
min = p[i];
if (max < p[i])
max = p[i];
}
return (sum - min - max) >> 1;
}
void CLASS fill_holes(int holes)
{
int row, col, val[4];
for (row = 2; row < height - 2; row++)
{
if (!HOLE(row))
continue;
for (col = 1; col < width - 1; col += 4)
{
val[0] = RAW(row - 1, col - 1);
val[1] = RAW(row - 1, col + 1);
val[2] = RAW(row + 1, col - 1);
val[3] = RAW(row + 1, col + 1);
RAW(row, col) = median4(val);
}
for (col = 2; col < width - 2; col += 4)
if (HOLE(row - 2) || HOLE(row + 2))
RAW(row, col) = (RAW(row, col - 2) + RAW(row, col + 2)) >> 1;
else
{
val[0] = RAW(row, col - 2);
val[1] = RAW(row, col + 2);
val[2] = RAW(row - 2, col);
val[3] = RAW(row + 2, col);
RAW(row, col) = median4(val);
}
}
}
void CLASS smal_v9_load_raw()
{
unsigned seg[256][2], offset, nseg, holes, i;
fseek(ifp, 67, SEEK_SET);
offset = get4();
nseg = (uchar)fgetc(ifp);
fseek(ifp, offset, SEEK_SET);
for (i = 0; i < nseg * 2; i++)
((unsigned *)seg)[i] = get4() + data_offset * (i & 1);
fseek(ifp, 78, SEEK_SET);
holes = fgetc(ifp);
fseek(ifp, 88, SEEK_SET);
seg[nseg][0] = raw_height * raw_width;
seg[nseg][1] = get4() + data_offset;
for (i = 0; i < nseg; i++)
smal_decode_segment(seg + i, holes);
if (holes)
fill_holes(holes);
}
void CLASS redcine_load_raw()
{
#ifndef NO_JASPER
int c, row, col;
jas_stream_t *in;
jas_image_t *jimg;
jas_matrix_t *jmat;
jas_seqent_t *data;
ushort *img, *pix;
jas_init();
#ifndef LIBRAW_LIBRARY_BUILD
in = jas_stream_fopen(ifname, "rb");
#else
in = (jas_stream_t *)ifp->make_jas_stream();
if (!in)
throw LIBRAW_EXCEPTION_DECODE_JPEG2000;
#endif
jas_stream_seek(in, data_offset + 20, SEEK_SET);
jimg = jas_image_decode(in, -1, 0);
#ifndef LIBRAW_LIBRARY_BUILD
if (!jimg)
longjmp(failure, 3);
#else
if (!jimg)
{
jas_stream_close(in);
throw LIBRAW_EXCEPTION_DECODE_JPEG2000;
}
#endif
jmat = jas_matrix_create(height / 2, width / 2);
merror(jmat, "redcine_load_raw()");
img = (ushort *)calloc((height + 2), (width + 2) * 2);
merror(img, "redcine_load_raw()");
#ifdef LIBRAW_LIBRARY_BUILD
bool fastexitflag = false;
try
{
#endif
FORC4
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
jas_image_readcmpt(jimg, c, 0, 0, width / 2, height / 2, jmat);
data = jas_matrix_getref(jmat, 0, 0);
for (row = c >> 1; row < height; row += 2)
for (col = c & 1; col < width; col += 2)
img[(row + 1) * (width + 2) + col + 1] = data[(row / 2) * (width / 2) + col / 2];
}
for (col = 1; col <= width; col++)
{
img[col] = img[2 * (width + 2) + col];
img[(height + 1) * (width + 2) + col] = img[(height - 1) * (width + 2) + col];
}
for (row = 0; row < height + 2; row++)
{
img[row * (width + 2)] = img[row * (width + 2) + 2];
img[(row + 1) * (width + 2) - 1] = img[(row + 1) * (width + 2) - 3];
}
for (row = 1; row <= height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
pix = img + row * (width + 2) + (col = 1 + (FC(row, 1) & 1));
for (; col <= width; col += 2, pix += 2)
{
c = (((pix[0] - 0x800) << 3) + pix[-(width + 2)] + pix[width + 2] + pix[-1] + pix[1]) >> 2;
pix[0] = LIM(c, 0, 4095);
}
}
for (row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < width; col++)
RAW(row, col) = curve[img[(row + 1) * (width + 2) + col + 1]];
}
#ifdef LIBRAW_LIBRARY_BUILD
}
catch (...)
{
fastexitflag = true;
}
#endif
free(img);
jas_matrix_destroy(jmat);
jas_image_destroy(jimg);
jas_stream_close(in);
#ifdef LIBRAW_LIBRARY_BUILD
if (fastexitflag)
throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK;
#endif
#endif
}
//@end COMMON
/* RESTRICTED code starts here */
void CLASS foveon_decoder(unsigned size, unsigned code)
{
static unsigned huff[1024];
struct decode *cur;
int i, len;
if (!code)
{
for (i = 0; i < size; i++)
huff[i] = get4();
memset(first_decode, 0, sizeof first_decode);
free_decode = first_decode;
}
cur = free_decode++;
if (free_decode > first_decode + 2048)
{
fprintf(stderr, _("%s: decoder table overflow\n"), ifname);
longjmp(failure, 2);
}
if (code)
for (i = 0; i < size; i++)
if (huff[i] == code)
{
cur->leaf = i;
return;
}
if ((len = code >> 27) > 26)
return;
code = (len + 1) << 27 | (code & 0x3ffffff) << 1;
cur->branch[0] = free_decode;
foveon_decoder(size, code);
cur->branch[1] = free_decode;
foveon_decoder(size, code + 1);
}
void CLASS foveon_thumb()
{
unsigned bwide, row, col, bitbuf = 0, bit = 1, c, i;
char *buf;
struct decode *dindex;
short pred[3];
bwide = get4();
fprintf(ofp, "P6\n%d %d\n255\n", thumb_width, thumb_height);
if (bwide > 0)
{
if (bwide < thumb_width * 3)
return;
buf = (char *)malloc(bwide);
merror(buf, "foveon_thumb()");
for (row = 0; row < thumb_height; row++)
{
fread(buf, 1, bwide, ifp);
fwrite(buf, 3, thumb_width, ofp);
}
free(buf);
return;
}
foveon_decoder(256, 0);
for (row = 0; row < thumb_height; row++)
{
memset(pred, 0, sizeof pred);
if (!bit)
get4();
for (bit = col = 0; col < thumb_width; col++)
FORC3
{
for (dindex = first_decode; dindex->branch[0];)
{
if ((bit = (bit - 1) & 31) == 31)
for (i = 0; i < 4; i++)
bitbuf = (bitbuf << 8) + fgetc(ifp);
dindex = dindex->branch[bitbuf >> bit & 1];
}
pred[c] += dindex->leaf;
fputc(pred[c], ofp);
}
}
}
void CLASS foveon_sd_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
struct decode *dindex;
short diff[1024];
unsigned bitbuf = 0;
int pred[3], row, col, bit = -1, c, i;
read_shorts((ushort *)diff, 1024);
if (!load_flags)
foveon_decoder(1024, 0);
for (row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
memset(pred, 0, sizeof pred);
if (!bit && !load_flags && atoi(model + 2) < 14)
get4();
for (col = bit = 0; col < width; col++)
{
if (load_flags)
{
bitbuf = get4();
FORC3 pred[2 - c] += diff[bitbuf >> c * 10 & 0x3ff];
}
else
FORC3
{
for (dindex = first_decode; dindex->branch[0];)
{
if ((bit = (bit - 1) & 31) == 31)
for (i = 0; i < 4; i++)
bitbuf = (bitbuf << 8) + fgetc(ifp);
dindex = dindex->branch[bitbuf >> bit & 1];
}
pred[c] += diff[dindex->leaf];
if (pred[c] >> 16 && ~pred[c] >> 16)
derror();
}
FORC3 image[row * width + col][c] = pred[c];
}
}
}
void CLASS foveon_huff(ushort *huff)
{
int i, j, clen, code;
huff[0] = 8;
for (i = 0; i < 13; i++)
{
clen = getc(ifp);
code = getc(ifp);
for (j = 0; j<256>> clen;)
huff[code + ++j] = clen << 8 | i;
}
get2();
}
void CLASS foveon_dp_load_raw()
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!image)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
unsigned c, roff[4], row, col, diff;
ushort huff[512], vpred[2][2], hpred[2];
fseek(ifp, 8, SEEK_CUR);
foveon_huff(huff);
roff[0] = 48;
FORC3 roff[c + 1] = -(-(roff[c] + get4()) & -16);
FORC3
{
fseek(ifp, data_offset + roff[c], SEEK_SET);
getbits(-1);
vpred[0][0] = vpred[0][1] = vpred[1][0] = vpred[1][1] = 512;
for (row = 0; row < height; row++)
{
#ifdef LIBRAW_LIBRARY_BUILD
checkCancel();
#endif
for (col = 0; col < width; col++)
{
diff = ljpeg_diff(huff);
if (col < 2)
hpred[col] = vpred[row & 1][col] += diff;
else
hpred[col & 1] += diff;
image[row * width + col][c] = hpred[col & 1];
}
}
}
}
void CLASS foveon_load_camf()
{
unsigned type, wide, high, i, j, row, col, diff;
ushort huff[258], vpred[2][2] = {{512, 512}, {512, 512}}, hpred[2];
fseek(ifp, meta_offset, SEEK_SET);
type = get4();
get4();
get4();
wide = get4();
high = get4();
if (type == 2)
{
fread(meta_data, 1, meta_length, ifp);
for (i = 0; i < meta_length; i++)
{
high = (high * 1597 + 51749) % 244944;
wide = high * (INT64)301593171 >> 24;
meta_data[i] ^= ((((high << 8) - wide) >> 1) + wide) >> 17;
}
}
else if (type == 4)
{
free(meta_data);
meta_data = (char *)malloc(meta_length = wide * high * 3 / 2);
merror(meta_data, "foveon_load_camf()");
foveon_huff(huff);
get4();
getbits(-1);
for (j = row = 0; row < high; row++)
{
for (col = 0; col < wide; col++)
{
diff = ljpeg_diff(huff);
if (col < 2)
hpred[col] = vpred[row & 1][col] += diff;
else
hpred[col & 1] += diff;
if (col & 1)
{
meta_data[j++] = hpred[0] >> 4;
meta_data[j++] = hpred[0] << 4 | hpred[1] >> 8;
meta_data[j++] = hpred[1];
}
}
}
}
#ifdef DCRAW_VERBOSE
else
fprintf(stderr, _("%s has unknown CAMF type %d.\n"), ifname, type);
#endif
}
const char *CLASS foveon_camf_param(const char *block, const char *param)
{
unsigned idx, num;
char *pos, *cp, *dp;
for (idx = 0; idx < meta_length; idx += sget4(pos + 8))
{
pos = meta_data + idx;
if (strncmp(pos, "CMb", 3))
break;
if (pos[3] != 'P')
continue;
if (strcmp(block, pos + sget4(pos + 12)))
continue;
cp = pos + sget4(pos + 16);
num = sget4(cp);
dp = pos + sget4(cp + 4);
while (num--)
{
cp += 8;
if (!strcmp(param, dp + sget4(cp)))
return dp + sget4(cp + 4);
}
}
return 0;
}
void *CLASS foveon_camf_matrix(unsigned dim[3], const char *name)
{
unsigned i, idx, type, ndim, size, *mat;
char *pos, *cp, *dp;
double dsize;
for (idx = 0; idx < meta_length; idx += sget4(pos + 8))
{
pos = meta_data + idx;
if (strncmp(pos, "CMb", 3))
break;
if (pos[3] != 'M')
continue;
if (strcmp(name, pos + sget4(pos + 12)))
continue;
dim[0] = dim[1] = dim[2] = 1;
cp = pos + sget4(pos + 16);
type = sget4(cp);
if ((ndim = sget4(cp + 4)) > 3)
break;
dp = pos + sget4(cp + 8);
for (i = ndim; i--;)
{
cp += 12;
dim[i] = sget4(cp);
}
if ((dsize = (double)dim[0] * dim[1] * dim[2]) > meta_length / 4)
break;
mat = (unsigned *)malloc((size = dsize) * 4);
merror(mat, "foveon_camf_matrix()");
for (i = 0; i < size; i++)
if (type && type != 6)
mat[i] = sget4(dp + i * 4);
else
mat[i] = sget4(dp + i * 2) & 0xffff;
return mat;
}
#ifdef DCRAW_VERBOSE
fprintf(stderr, _("%s: \"%s\" matrix not found!\n"), ifname, name);
#endif
return 0;
}
int CLASS foveon_fixed(void *ptr, int size, const char *name)
{
void *dp;
unsigned dim[3];
if (!name)
return 0;
dp = foveon_camf_matrix(dim, name);
if (!dp)
return 0;
memcpy(ptr, dp, size * 4);
free(dp);
return 1;
}
float CLASS foveon_avg(short *pix, int range[2], float cfilt)
{
int i;
float val, min = FLT_MAX, max = -FLT_MAX, sum = 0;
for (i = range[0]; i <= range[1]; i++)
{
sum += val = pix[i * 4] + (pix[i * 4] - pix[(i - 1) * 4]) * cfilt;
if (min > val)
min = val;
if (max < val)
max = val;
}
if (range[1] - range[0] == 1)
return sum / 2;
return (sum - min - max) / (range[1] - range[0] - 1);
}
short *CLASS foveon_make_curve(double max, double mul, double filt)
{
short *curve;
unsigned i, size;
double x;
if (!filt)
filt = 0.8;
size = 4 * M_PI * max / filt;
if (size == UINT_MAX)
size--;
curve = (short *)calloc(size + 1, sizeof *curve);
merror(curve, "foveon_make_curve()");
curve[0] = size;
for (i = 0; i < size; i++)
{
x = i * filt / max / 4;
curve[i + 1] = (cos(x) + 1) / 2 * tanh(i * filt / mul) * mul + 0.5;
}
return curve;
}
void CLASS foveon_make_curves(short **curvep, float dq[3], float div[3], float filt)
{
double mul[3], max = 0;
int c;
FORC3 mul[c] = dq[c] / div[c];
FORC3 if (max < mul[c]) max = mul[c];
FORC3 curvep[c] = foveon_make_curve(max, mul[c], filt);
}
int CLASS foveon_apply_curve(short *curve, int i)
{
if (abs(i) >= curve[0])
return 0;
return i < 0 ? -curve[1 - i] : curve[1 + i];
}
#define image ((short(*)[4])image)
void CLASS foveon_interpolate()
{
static const short hood[] = {-1, -1, -1, 0, -1, 1, 0, -1, 0, 1, 1, -1, 1, 0, 1, 1};
short *pix, prev[3], *curve[8], (*shrink)[3];
float cfilt = 0, ddft[3][3][2], ppm[3][3][3];
float cam_xyz[3][3], correct[3][3], last[3][3], trans[3][3];
float chroma_dq[3], color_dq[3], diag[3][3], div[3];
float(*black)[3], (*sgain)[3], (*sgrow)[3];
float fsum[3], val, frow, num;
int row, col, c, i, j, diff, sgx, irow, sum, min, max, limit;
int dscr[2][2], dstb[4], (*smrow[7])[3], total[4], ipix[3];
int work[3][3], smlast, smred, smred_p = 0, dev[3];
int satlev[3], keep[4], active[4];
unsigned dim[3], *badpix;
double dsum = 0, trsum[3];
char str[128];
const char *cp;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Foveon interpolation...\n"));
#endif
foveon_load_camf();
foveon_fixed(dscr, 4, "DarkShieldColRange");
foveon_fixed(ppm[0][0], 27, "PostPolyMatrix");
foveon_fixed(satlev, 3, "SaturationLevel");
foveon_fixed(keep, 4, "KeepImageArea");
foveon_fixed(active, 4, "ActiveImageArea");
foveon_fixed(chroma_dq, 3, "ChromaDQ");
foveon_fixed(color_dq, 3, foveon_camf_param("IncludeBlocks", "ColorDQ") ? "ColorDQ" : "ColorDQCamRGB");
if (foveon_camf_param("IncludeBlocks", "ColumnFilter"))
foveon_fixed(&cfilt, 1, "ColumnFilter");
memset(ddft, 0, sizeof ddft);
if (!foveon_camf_param("IncludeBlocks", "DarkDrift") || !foveon_fixed(ddft[1][0], 12, "DarkDrift"))
for (i = 0; i < 2; i++)
{
foveon_fixed(dstb, 4, i ? "DarkShieldBottom" : "DarkShieldTop");
for (row = dstb[1]; row <= dstb[3]; row++)
for (col = dstb[0]; col <= dstb[2]; col++)
FORC3 ddft[i + 1][c][1] += (short)image[row * width + col][c];
FORC3 ddft[i + 1][c][1] /= (dstb[3] - dstb[1] + 1) * (dstb[2] - dstb[0] + 1);
}
if (!(cp = foveon_camf_param("WhiteBalanceIlluminants", model2)))
{
#ifdef DCRAW_VERBOSE
fprintf(stderr, _("%s: Invalid white balance \"%s\"\n"), ifname, model2);
#endif
return;
}
foveon_fixed(cam_xyz, 9, cp);
foveon_fixed(correct, 9, foveon_camf_param("WhiteBalanceCorrections", model2));
memset(last, 0, sizeof last);
for (i = 0; i < 3; i++)
for (j = 0; j < 3; j++)
FORC3 last[i][j] += correct[i][c] * cam_xyz[c][j];
#define LAST(x, y) last[(i + x) % 3][(c + y) % 3]
for (i = 0; i < 3; i++)
FORC3 diag[c][i] = LAST(1, 1) * LAST(2, 2) - LAST(1, 2) * LAST(2, 1);
#undef LAST
FORC3 div[c] = diag[c][0] * 0.3127 + diag[c][1] * 0.329 + diag[c][2] * 0.3583;
sprintf(str, "%sRGBNeutral", model2);
if (foveon_camf_param("IncludeBlocks", str))
foveon_fixed(div, 3, str);
num = 0;
FORC3 if (num < div[c]) num = div[c];
FORC3 div[c] /= num;
memset(trans, 0, sizeof trans);
for (i = 0; i < 3; i++)
for (j = 0; j < 3; j++)
FORC3 trans[i][j] += rgb_cam[i][c] * last[c][j] * div[j];
FORC3 trsum[c] = trans[c][0] + trans[c][1] + trans[c][2];
dsum = (6 * trsum[0] + 11 * trsum[1] + 3 * trsum[2]) / 20;
for (i = 0; i < 3; i++)
FORC3 last[i][c] = trans[i][c] * dsum / trsum[i];
memset(trans, 0, sizeof trans);
for (i = 0; i < 3; i++)
for (j = 0; j < 3; j++)
FORC3 trans[i][j] += (i == c ? 32 : -1) * last[c][j] / 30;
foveon_make_curves(curve, color_dq, div, cfilt);
FORC3 chroma_dq[c] /= 3;
foveon_make_curves(curve + 3, chroma_dq, div, cfilt);
FORC3 dsum += chroma_dq[c] / div[c];
curve[6] = foveon_make_curve(dsum, dsum, cfilt);
curve[7] = foveon_make_curve(dsum * 2, dsum * 2, cfilt);
sgain = (float(*)[3])foveon_camf_matrix(dim, "SpatialGain");
if (!sgain)
return;
sgrow = (float(*)[3])calloc(dim[1], sizeof *sgrow);
sgx = (width + dim[1] - 2) / (dim[1] - 1);
black = (float(*)[3])calloc(height, sizeof *black);
for (row = 0; row < height; row++)
{
for (i = 0; i < 6; i++)
((float *)ddft[0])[i] =
((float *)ddft[1])[i] + row / (height - 1.0) * (((float *)ddft[2])[i] - ((float *)ddft[1])[i]);
FORC3 black[row][c] = (foveon_avg(image[row * width] + c, dscr[0], cfilt) +
foveon_avg(image[row * width] + c, dscr[1], cfilt) * 3 - ddft[0][c][0]) /
4 -
ddft[0][c][1];
}
memcpy(black, black + 8, sizeof *black * 8);
memcpy(black + height - 11, black + height - 22, 11 * sizeof *black);
memcpy(last, black, sizeof last);
for (row = 1; row < height - 1; row++)
{
FORC3 if (last[1][c] > last[0][c])
{
if (last[1][c] > last[2][c])
black[row][c] = (last[0][c] > last[2][c]) ? last[0][c] : last[2][c];
}
else if (last[1][c] < last[2][c]) black[row][c] = (last[0][c] < last[2][c]) ? last[0][c] : last[2][c];
memmove(last, last + 1, 2 * sizeof last[0]);
memcpy(last[2], black[row + 1], sizeof last[2]);
}
FORC3 black[row][c] = (last[0][c] + last[1][c]) / 2;
FORC3 black[0][c] = (black[1][c] + black[3][c]) / 2;
val = 1 - exp(-1 / 24.0);
memcpy(fsum, black, sizeof fsum);
for (row = 1; row < height; row++)
FORC3 fsum[c] += black[row][c] = (black[row][c] - black[row - 1][c]) * val + black[row - 1][c];
memcpy(last[0], black[height - 1], sizeof last[0]);
FORC3 fsum[c] /= height;
for (row = height; row--;)
FORC3 last[0][c] = black[row][c] = (black[row][c] - fsum[c] - last[0][c]) * val + last[0][c];
memset(total, 0, sizeof total);
for (row = 2; row < height; row += 4)
for (col = 2; col < width; col += 4)
{
FORC3 total[c] += (short)image[row * width + col][c];
total[3]++;
}
for (row = 0; row < height; row++)
FORC3 black[row][c] += fsum[c] / 2 + total[c] / (total[3] * 100.0);
for (row = 0; row < height; row++)
{
for (i = 0; i < 6; i++)
((float *)ddft[0])[i] =
((float *)ddft[1])[i] + row / (height - 1.0) * (((float *)ddft[2])[i] - ((float *)ddft[1])[i]);
pix = image[row * width];
memcpy(prev, pix, sizeof prev);
frow = row / (height - 1.0) * (dim[2] - 1);
if ((irow = frow) == dim[2] - 1)
irow--;
frow -= irow;
for (i = 0; i < dim[1]; i++)
FORC3 sgrow[i][c] = sgain[irow * dim[1] + i][c] * (1 - frow) + sgain[(irow + 1) * dim[1] + i][c] * frow;
for (col = 0; col < width; col++)
{
FORC3
{
diff = pix[c] - prev[c];
prev[c] = pix[c];
ipix[c] = pix[c] + floor((diff + (diff * diff >> 14)) * cfilt - ddft[0][c][1] -
ddft[0][c][0] * ((float)col / width - 0.5) - black[row][c]);
}
FORC3
{
work[0][c] = ipix[c] * ipix[c] >> 14;
work[2][c] = ipix[c] * work[0][c] >> 14;
work[1][2 - c] = ipix[(c + 1) % 3] * ipix[(c + 2) % 3] >> 14;
}
FORC3
{
for (val = i = 0; i < 3; i++)
for (j = 0; j < 3; j++)
val += ppm[c][i][j] * work[i][j];
ipix[c] =
floor((ipix[c] + floor(val)) *
(sgrow[col / sgx][c] * (sgx - col % sgx) + sgrow[col / sgx + 1][c] * (col % sgx)) / sgx / div[c]);
if (ipix[c] > 32000)
ipix[c] = 32000;
pix[c] = ipix[c];
}
pix += 4;
}
}
free(black);
free(sgrow);
free(sgain);
if ((badpix = (unsigned *)foveon_camf_matrix(dim, "BadPixels")))
{
for (i = 0; i < dim[0]; i++)
{
col = (badpix[i] >> 8 & 0xfff) - keep[0];
row = (badpix[i] >> 20) - keep[1];
if ((unsigned)(row - 1) > height - 3 || (unsigned)(col - 1) > width - 3)
continue;
memset(fsum, 0, sizeof fsum);
for (sum = j = 0; j < 8; j++)
if (badpix[i] & (1 << j))
{
FORC3 fsum[c] += (short)image[(row + hood[j * 2]) * width + col + hood[j * 2 + 1]][c];
sum++;
}
if (sum)
FORC3 image[row * width + col][c] = fsum[c] / sum;
}
free(badpix);
}
/* Array for 5x5 Gaussian averaging of red values */
smrow[6] = (int(*)[3])calloc(width * 5, sizeof **smrow);
merror(smrow[6], "foveon_interpolate()");
for (i = 0; i < 5; i++)
smrow[i] = smrow[6] + i * width;
/* Sharpen the reds against these Gaussian averages */
for (smlast = -1, row = 2; row < height - 2; row++)
{
while (smlast < row + 2)
{
for (i = 0; i < 6; i++)
smrow[(i + 5) % 6] = smrow[i];
pix = image[++smlast * width + 2];
for (col = 2; col < width - 2; col++)
{
smrow[4][col][0] = (pix[0] * 6 + (pix[-4] + pix[4]) * 4 + pix[-8] + pix[8] + 8) >> 4;
pix += 4;
}
}
pix = image[row * width + 2];
for (col = 2; col < width - 2; col++)
{
smred = (6 * smrow[2][col][0] + 4 * (smrow[1][col][0] + smrow[3][col][0]) + smrow[0][col][0] + smrow[4][col][0] +
8) >>
4;
if (col == 2)
smred_p = smred;
i = pix[0] + ((pix[0] - ((smred * 7 + smred_p) >> 3)) >> 3);
if (i > 32000)
i = 32000;
pix[0] = i;
smred_p = smred;
pix += 4;
}
}
/* Adjust the brighter pixels for better linearity */
min = 0xffff;
FORC3
{
i = satlev[c] / div[c];
if (min > i)
min = i;
}
limit = min * 9 >> 4;
for (pix = image[0]; pix < image[height * width]; pix += 4)
{
if (pix[0] <= limit || pix[1] <= limit || pix[2] <= limit)
continue;
min = max = pix[0];
for (c = 1; c < 3; c++)
{
if (min > pix[c])
min = pix[c];
if (max < pix[c])
max = pix[c];
}
if (min >= limit * 2)
{
pix[0] = pix[1] = pix[2] = max;
}
else
{
i = 0x4000 - ((min - limit) << 14) / limit;
i = 0x4000 - (i * i >> 14);
i = i * i >> 14;
FORC3 pix[c] += (max - pix[c]) * i >> 14;
}
}
/*
Because photons that miss one detector often hit another,
the sum R+G+B is much less noisy than the individual colors.
So smooth the hues without smoothing the total.
*/
for (smlast = -1, row = 2; row < height - 2; row++)
{
while (smlast < row + 2)
{
for (i = 0; i < 6; i++)
smrow[(i + 5) % 6] = smrow[i];
pix = image[++smlast * width + 2];
for (col = 2; col < width - 2; col++)
{
FORC3 smrow[4][col][c] = (pix[c - 4] + 2 * pix[c] + pix[c + 4] + 2) >> 2;
pix += 4;
}
}
pix = image[row * width + 2];
for (col = 2; col < width - 2; col++)
{
FORC3 dev[c] =
-foveon_apply_curve(curve[7], pix[c] - ((smrow[1][col][c] + 2 * smrow[2][col][c] + smrow[3][col][c]) >> 2));
sum = (dev[0] + dev[1] + dev[2]) >> 3;
FORC3 pix[c] += dev[c] - sum;
pix += 4;
}
}
for (smlast = -1, row = 2; row < height - 2; row++)
{
while (smlast < row + 2)
{
for (i = 0; i < 6; i++)
smrow[(i + 5) % 6] = smrow[i];
pix = image[++smlast * width + 2];
for (col = 2; col < width - 2; col++)
{
FORC3 smrow[4][col][c] = (pix[c - 8] + pix[c - 4] + pix[c] + pix[c + 4] + pix[c + 8] + 2) >> 2;
pix += 4;
}
}
pix = image[row * width + 2];
for (col = 2; col < width - 2; col++)
{
for (total[3] = 375, sum = 60, c = 0; c < 3; c++)
{
for (total[c] = i = 0; i < 5; i++)
total[c] += smrow[i][col][c];
total[3] += total[c];
sum += pix[c];
}
if (sum < 0)
sum = 0;
j = total[3] > 375 ? (sum << 16) / total[3] : sum * 174;
FORC3 pix[c] += foveon_apply_curve(curve[6], ((j * total[c] + 0x8000) >> 16) - pix[c]);
pix += 4;
}
}
/* Transform the image to a different colorspace */
for (pix = image[0]; pix < image[height * width]; pix += 4)
{
FORC3 pix[c] -= foveon_apply_curve(curve[c], pix[c]);
sum = (pix[0] + pix[1] + pix[1] + pix[2]) >> 2;
FORC3 pix[c] -= foveon_apply_curve(curve[c], pix[c] - sum);
FORC3
{
for (dsum = i = 0; i < 3; i++)
dsum += trans[c][i] * pix[i];
if (dsum < 0)
dsum = 0;
if (dsum > 24000)
dsum = 24000;
ipix[c] = dsum + 0.5;
}
FORC3 pix[c] = ipix[c];
}
/* Smooth the image bottom-to-top and save at 1/4 scale */
shrink = (short(*)[3])calloc((height / 4), (width / 4) * sizeof *shrink);
merror(shrink, "foveon_interpolate()");
for (row = height / 4; row--;)
for (col = 0; col < width / 4; col++)
{
ipix[0] = ipix[1] = ipix[2] = 0;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
FORC3 ipix[c] += image[(row * 4 + i) * width + col * 4 + j][c];
FORC3
if (row + 2 > height / 4)
shrink[row * (width / 4) + col][c] = ipix[c] >> 4;
else
shrink[row * (width / 4) + col][c] =
(shrink[(row + 1) * (width / 4) + col][c] * 1840 + ipix[c] * 141 + 2048) >> 12;
}
/* From the 1/4-scale image, smooth right-to-left */
for (row = 0; row < (height & ~3); row++)
{
ipix[0] = ipix[1] = ipix[2] = 0;
if ((row & 3) == 0)
for (col = width & ~3; col--;)
FORC3 smrow[0][col][c] = ipix[c] =
(shrink[(row / 4) * (width / 4) + col / 4][c] * 1485 + ipix[c] * 6707 + 4096) >> 13;
/* Then smooth left-to-right */
ipix[0] = ipix[1] = ipix[2] = 0;
for (col = 0; col < (width & ~3); col++)
FORC3 smrow[1][col][c] = ipix[c] = (smrow[0][col][c] * 1485 + ipix[c] * 6707 + 4096) >> 13;
/* Smooth top-to-bottom */
if (row == 0)
memcpy(smrow[2], smrow[1], sizeof **smrow * width);
else
for (col = 0; col < (width & ~3); col++)
FORC3 smrow[2][col][c] = (smrow[2][col][c] * 6707 + smrow[1][col][c] * 1485 + 4096) >> 13;
/* Adjust the chroma toward the smooth values */
for (col = 0; col < (width & ~3); col++)
{
for (i = j = 30, c = 0; c < 3; c++)
{
i += smrow[2][col][c];
j += image[row * width + col][c];
}
j = (j << 16) / i;
for (sum = c = 0; c < 3; c++)
{
ipix[c] =
foveon_apply_curve(curve[c + 3], ((smrow[2][col][c] * j + 0x8000) >> 16) - image[row * width + col][c]);
sum += ipix[c];
}
sum >>= 3;
FORC3
{
i = image[row * width + col][c] + ipix[c] - sum;
if (i < 0)
i = 0;
image[row * width + col][c] = i;
}
}
}
free(shrink);
free(smrow[6]);
for (i = 0; i < 8; i++)
free(curve[i]);
/* Trim off the black border */
active[1] -= keep[1];
active[3] -= 2;
i = active[2] - active[0];
for (row = 0; row < active[3] - active[1]; row++)
memcpy(image[row * i], image[(row + active[1]) * width + active[0]], i * sizeof *image);
width = i;
height = row;
}
#undef image
/* RESTRICTED code ends here */
//@out COMMON
void CLASS crop_masked_pixels()
{
int row, col;
unsigned
#ifndef LIBRAW_LIBRARY_BUILD
r,
raw_pitch = raw_width * 2, c, m, mblack[8], zero, val;
#else
c,
m, zero, val;
#define mblack imgdata.color.black_stat
#endif
#ifndef LIBRAW_LIBRARY_BUILD
if (load_raw == &CLASS phase_one_load_raw || load_raw == &CLASS phase_one_load_raw_c)
phase_one_correct();
if (fuji_width)
{
for (row = 0; row < raw_height - top_margin * 2; row++)
{
for (col = 0; col < fuji_width << !fuji_layout; col++)
{
if (fuji_layout)
{
r = fuji_width - 1 - col + (row >> 1);
c = col + ((row + 1) >> 1);
}
else
{
r = fuji_width - 1 + row - (col >> 1);
c = row + ((col + 1) >> 1);
}
if (r < height && c < width)
BAYER(r, c) = RAW(row + top_margin, col + left_margin);
}
}
}
else
{
for (row = 0; row < height; row++)
for (col = 0; col < width; col++)
BAYER2(row, col) = RAW(row + top_margin, col + left_margin);
}
#endif
if (mask[0][3] > 0)
goto mask_set;
if (load_raw == &CLASS canon_load_raw || load_raw == &CLASS lossless_jpeg_load_raw)
{
mask[0][1] = mask[1][1] += 2;
mask[0][3] -= 2;
goto sides;
}
if (load_raw == &CLASS canon_600_load_raw || load_raw == &CLASS sony_load_raw ||
(load_raw == &CLASS eight_bit_load_raw && strncmp(model, "DC2", 3)) || load_raw == &CLASS kodak_262_load_raw ||
(load_raw == &CLASS packed_load_raw && (load_flags & 32)))
{
sides:
mask[0][0] = mask[1][0] = top_margin;
mask[0][2] = mask[1][2] = top_margin + height;
mask[0][3] += left_margin;
mask[1][1] += left_margin + width;
mask[1][3] += raw_width;
}
if (load_raw == &CLASS nokia_load_raw)
{
mask[0][2] = top_margin;
mask[0][3] = width;
}
#ifdef LIBRAW_LIBRARY_BUILD
if (load_raw == &CLASS broadcom_load_raw)
{
mask[0][2] = top_margin;
mask[0][3] = width;
}
#endif
mask_set:
memset(mblack, 0, sizeof mblack);
for (zero = m = 0; m < 8; m++)
for (row = MAX(mask[m][0], 0); row < MIN(mask[m][2], raw_height); row++)
for (col = MAX(mask[m][1], 0); col < MIN(mask[m][3], raw_width); col++)
{
c = FC(row - top_margin, col - left_margin);
mblack[c] += val = raw_image[(row)*raw_pitch / 2 + (col)];
mblack[4 + c]++;
zero += !val;
}
if (load_raw == &CLASS canon_600_load_raw && width < raw_width)
{
black = (mblack[0] + mblack[1] + mblack[2] + mblack[3]) / (mblack[4] + mblack[5] + mblack[6] + mblack[7]) - 4;
#ifndef LIBRAW_LIBRARY_BUILD
canon_600_correct();
#endif
}
else if (zero < mblack[4] && mblack[5] && mblack[6] && mblack[7])
{
FORC4 cblack[c] = mblack[c] / mblack[4 + c];
black = cblack[4] = cblack[5] = cblack[6] = 0;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
#undef mblack
#endif
void CLASS remove_zeroes()
{
unsigned row, col, tot, n, r, c;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES, 0, 2);
#endif
for (row = 0; row < height; row++)
for (col = 0; col < width; col++)
if (BAYER(row, col) == 0)
{
tot = n = 0;
for (r = row - 2; r <= row + 2; r++)
for (c = col - 2; c <= col + 2; c++)
if (r < height && c < width && FC(r, c) == FC(row, col) && BAYER(r, c))
tot += (n++, BAYER(r, c));
if (n)
BAYER(row, col) = tot / n;
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_REMOVE_ZEROES, 1, 2);
#endif
}
//@end COMMON
/* @out FILEIO
#include <math.h>
#define CLASS LibRaw::
#include "libraw/libraw_types.h"
#define LIBRAW_LIBRARY_BUILD
#include "libraw/libraw.h"
#include "internal/defines.h"
#include "internal/var_defines.h"
@end FILEIO */
// @out FILEIO
/*
Search from the current directory up to the root looking for
a ".badpixels" file, and fix those pixels now.
*/
void CLASS bad_pixels(const char *cfname)
{
FILE *fp = NULL;
#ifndef LIBRAW_LIBRARY_BUILD
char *fname, *cp, line[128];
int len, time, row, col, r, c, rad, tot, n, fixed = 0;
#else
char *cp, line[128];
int time, row, col, r, c, rad, tot, n;
#ifdef DCRAW_VERBOSE
int fixed = 0;
#endif
#endif
if (!filters)
return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS, 0, 2);
#endif
if (cfname)
fp = fopen(cfname, "r");
// @end FILEIO
else
{
for (len = 32;; len *= 2)
{
fname = (char *)malloc(len);
if (!fname)
return;
if (getcwd(fname, len - 16))
break;
free(fname);
if (errno != ERANGE)
return;
}
#if defined(WIN32) || defined(DJGPP)
if (fname[1] == ':')
memmove(fname, fname + 2, len - 2);
for (cp = fname; *cp; cp++)
if (*cp == '\\')
*cp = '/';
#endif
cp = fname + strlen(fname);
if (cp[-1] == '/')
cp--;
while (*fname == '/')
{
strcpy(cp, "/.badpixels");
if ((fp = fopen(fname, "r")))
break;
if (cp == fname)
break;
while (*--cp != '/')
;
}
free(fname);
}
// @out FILEIO
if (!fp)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_BADPIXELMAP;
#endif
return;
}
while (fgets(line, 128, fp))
{
cp = strchr(line, '#');
if (cp)
*cp = 0;
if (sscanf(line, "%d %d %d", &col, &row, &time) != 3)
continue;
if ((unsigned)col >= width || (unsigned)row >= height)
continue;
if (time > timestamp)
continue;
for (tot = n = 0, rad = 1; rad < 3 && n == 0; rad++)
for (r = row - rad; r <= row + rad; r++)
for (c = col - rad; c <= col + rad; c++)
if ((unsigned)r < height && (unsigned)c < width && (r != row || c != col) && fcol(r, c) == fcol(row, col))
{
tot += BAYER2(r, c);
n++;
}
BAYER2(row, col) = tot / n;
#ifdef DCRAW_VERBOSE
if (verbose)
{
if (!fixed++)
fprintf(stderr, _("Fixed dead pixels at:"));
fprintf(stderr, " %d,%d", col, row);
}
#endif
}
#ifdef DCRAW_VERBOSE
if (fixed)
fputc('\n', stderr);
#endif
fclose(fp);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_BAD_PIXELS, 1, 2);
#endif
}
void CLASS subtract(const char *fname)
{
FILE *fp;
int dim[3] = {0, 0, 0}, comment = 0, number = 0, error = 0, nd = 0, c, row, col;
ushort *pixel;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME, 0, 2);
#endif
if (!(fp = fopen(fname, "rb")))
{
#ifdef DCRAW_VERBOSE
perror(fname);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_FILE;
#endif
return;
}
if (fgetc(fp) != 'P' || fgetc(fp) != '5')
error = 1;
while (!error && nd < 3 && (c = fgetc(fp)) != EOF)
{
if (c == '#')
comment = 1;
if (c == '\n')
comment = 0;
if (comment)
continue;
if (isdigit(c))
number = 1;
if (number)
{
if (isdigit(c))
dim[nd] = dim[nd] * 10 + c - '0';
else if (isspace(c))
{
number = 0;
nd++;
}
else
error = 1;
}
}
if (error || nd < 3)
{
#ifdef DCRAW_VERBOSE
fprintf(stderr, _("%s is not a valid PGM file!\n"), fname);
#endif
fclose(fp);
return;
}
else if (dim[0] != width || dim[1] != height || dim[2] != 65535)
{
#ifdef DCRAW_VERBOSE
fprintf(stderr, _("%s has the wrong dimensions!\n"), fname);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_DARKFRAME_DIM;
#endif
fclose(fp);
return;
}
pixel = (ushort *)calloc(width, sizeof *pixel);
merror(pixel, "subtract()");
for (row = 0; row < height; row++)
{
fread(pixel, 2, width, fp);
for (col = 0; col < width; col++)
BAYER(row, col) = MAX(BAYER(row, col) - ntohs(pixel[col]), 0);
}
free(pixel);
fclose(fp);
memset(cblack, 0, sizeof cblack);
black = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_DARK_FRAME, 1, 2);
#endif
}
//@end FILEIO
//@out COMMON
static const uchar xlat[2][256] = {
{0xc1, 0xbf, 0x6d, 0x0d, 0x59, 0xc5, 0x13, 0x9d, 0x83, 0x61, 0x6b, 0x4f, 0xc7, 0x7f, 0x3d, 0x3d, 0x53, 0x59, 0xe3,
0xc7, 0xe9, 0x2f, 0x95, 0xa7, 0x95, 0x1f, 0xdf, 0x7f, 0x2b, 0x29, 0xc7, 0x0d, 0xdf, 0x07, 0xef, 0x71, 0x89, 0x3d,
0x13, 0x3d, 0x3b, 0x13, 0xfb, 0x0d, 0x89, 0xc1, 0x65, 0x1f, 0xb3, 0x0d, 0x6b, 0x29, 0xe3, 0xfb, 0xef, 0xa3, 0x6b,
0x47, 0x7f, 0x95, 0x35, 0xa7, 0x47, 0x4f, 0xc7, 0xf1, 0x59, 0x95, 0x35, 0x11, 0x29, 0x61, 0xf1, 0x3d, 0xb3, 0x2b,
0x0d, 0x43, 0x89, 0xc1, 0x9d, 0x9d, 0x89, 0x65, 0xf1, 0xe9, 0xdf, 0xbf, 0x3d, 0x7f, 0x53, 0x97, 0xe5, 0xe9, 0x95,
0x17, 0x1d, 0x3d, 0x8b, 0xfb, 0xc7, 0xe3, 0x67, 0xa7, 0x07, 0xf1, 0x71, 0xa7, 0x53, 0xb5, 0x29, 0x89, 0xe5, 0x2b,
0xa7, 0x17, 0x29, 0xe9, 0x4f, 0xc5, 0x65, 0x6d, 0x6b, 0xef, 0x0d, 0x89, 0x49, 0x2f, 0xb3, 0x43, 0x53, 0x65, 0x1d,
0x49, 0xa3, 0x13, 0x89, 0x59, 0xef, 0x6b, 0xef, 0x65, 0x1d, 0x0b, 0x59, 0x13, 0xe3, 0x4f, 0x9d, 0xb3, 0x29, 0x43,
0x2b, 0x07, 0x1d, 0x95, 0x59, 0x59, 0x47, 0xfb, 0xe5, 0xe9, 0x61, 0x47, 0x2f, 0x35, 0x7f, 0x17, 0x7f, 0xef, 0x7f,
0x95, 0x95, 0x71, 0xd3, 0xa3, 0x0b, 0x71, 0xa3, 0xad, 0x0b, 0x3b, 0xb5, 0xfb, 0xa3, 0xbf, 0x4f, 0x83, 0x1d, 0xad,
0xe9, 0x2f, 0x71, 0x65, 0xa3, 0xe5, 0x07, 0x35, 0x3d, 0x0d, 0xb5, 0xe9, 0xe5, 0x47, 0x3b, 0x9d, 0xef, 0x35, 0xa3,
0xbf, 0xb3, 0xdf, 0x53, 0xd3, 0x97, 0x53, 0x49, 0x71, 0x07, 0x35, 0x61, 0x71, 0x2f, 0x43, 0x2f, 0x11, 0xdf, 0x17,
0x97, 0xfb, 0x95, 0x3b, 0x7f, 0x6b, 0xd3, 0x25, 0xbf, 0xad, 0xc7, 0xc5, 0xc5, 0xb5, 0x8b, 0xef, 0x2f, 0xd3, 0x07,
0x6b, 0x25, 0x49, 0x95, 0x25, 0x49, 0x6d, 0x71, 0xc7},
{0xa7, 0xbc, 0xc9, 0xad, 0x91, 0xdf, 0x85, 0xe5, 0xd4, 0x78, 0xd5, 0x17, 0x46, 0x7c, 0x29, 0x4c, 0x4d, 0x03, 0xe9,
0x25, 0x68, 0x11, 0x86, 0xb3, 0xbd, 0xf7, 0x6f, 0x61, 0x22, 0xa2, 0x26, 0x34, 0x2a, 0xbe, 0x1e, 0x46, 0x14, 0x68,
0x9d, 0x44, 0x18, 0xc2, 0x40, 0xf4, 0x7e, 0x5f, 0x1b, 0xad, 0x0b, 0x94, 0xb6, 0x67, 0xb4, 0x0b, 0xe1, 0xea, 0x95,
0x9c, 0x66, 0xdc, 0xe7, 0x5d, 0x6c, 0x05, 0xda, 0xd5, 0xdf, 0x7a, 0xef, 0xf6, 0xdb, 0x1f, 0x82, 0x4c, 0xc0, 0x68,
0x47, 0xa1, 0xbd, 0xee, 0x39, 0x50, 0x56, 0x4a, 0xdd, 0xdf, 0xa5, 0xf8, 0xc6, 0xda, 0xca, 0x90, 0xca, 0x01, 0x42,
0x9d, 0x8b, 0x0c, 0x73, 0x43, 0x75, 0x05, 0x94, 0xde, 0x24, 0xb3, 0x80, 0x34, 0xe5, 0x2c, 0xdc, 0x9b, 0x3f, 0xca,
0x33, 0x45, 0xd0, 0xdb, 0x5f, 0xf5, 0x52, 0xc3, 0x21, 0xda, 0xe2, 0x22, 0x72, 0x6b, 0x3e, 0xd0, 0x5b, 0xa8, 0x87,
0x8c, 0x06, 0x5d, 0x0f, 0xdd, 0x09, 0x19, 0x93, 0xd0, 0xb9, 0xfc, 0x8b, 0x0f, 0x84, 0x60, 0x33, 0x1c, 0x9b, 0x45,
0xf1, 0xf0, 0xa3, 0x94, 0x3a, 0x12, 0x77, 0x33, 0x4d, 0x44, 0x78, 0x28, 0x3c, 0x9e, 0xfd, 0x65, 0x57, 0x16, 0x94,
0x6b, 0xfb, 0x59, 0xd0, 0xc8, 0x22, 0x36, 0xdb, 0xd2, 0x63, 0x98, 0x43, 0xa1, 0x04, 0x87, 0x86, 0xf7, 0xa6, 0x26,
0xbb, 0xd6, 0x59, 0x4d, 0xbf, 0x6a, 0x2e, 0xaa, 0x2b, 0xef, 0xe6, 0x78, 0xb6, 0x4e, 0xe0, 0x2f, 0xdc, 0x7c, 0xbe,
0x57, 0x19, 0x32, 0x7e, 0x2a, 0xd0, 0xb8, 0xba, 0x29, 0x00, 0x3c, 0x52, 0x7d, 0xa8, 0x49, 0x3b, 0x2d, 0xeb, 0x25,
0x49, 0xfa, 0xa3, 0xaa, 0x39, 0xa7, 0xc5, 0xa7, 0x50, 0x11, 0x36, 0xfb, 0xc6, 0x67, 0x4a, 0xf5, 0xa5, 0x12, 0x65,
0x7e, 0xb0, 0xdf, 0xaf, 0x4e, 0xb3, 0x61, 0x7f, 0x2f}};
void CLASS gamma_curve(double pwr, double ts, int mode, int imax)
{
int i;
double g[6], bnd[2] = {0, 0}, r;
g[0] = pwr;
g[1] = ts;
g[2] = g[3] = g[4] = 0;
bnd[g[1] >= 1] = 1;
if (g[1] && (g[1] - 1) * (g[0] - 1) <= 0)
{
for (i = 0; i < 48; i++)
{
g[2] = (bnd[0] + bnd[1]) / 2;
if (g[0])
bnd[(pow(g[2] / g[1], -g[0]) - 1) / g[0] - 1 / g[2] > -1] = g[2];
else
bnd[g[2] / exp(1 - 1 / g[2]) < g[1]] = g[2];
}
g[3] = g[2] / g[1];
if (g[0])
g[4] = g[2] * (1 / g[0] - 1);
}
if (g[0])
g[5] = 1 / (g[1] * SQR(g[3]) / 2 - g[4] * (1 - g[3]) + (1 - pow(g[3], 1 + g[0])) * (1 + g[4]) / (1 + g[0])) - 1;
else
g[5] = 1 / (g[1] * SQR(g[3]) / 2 + 1 - g[2] - g[3] - g[2] * g[3] * (log(g[3]) - 1)) - 1;
if (!mode--)
{
memcpy(gamm, g, sizeof gamm);
return;
}
for (i = 0; i < 0x10000; i++)
{
curve[i] = 0xffff;
if ((r = (double)i / imax) < 1)
curve[i] = 0x10000 *
(mode ? (r < g[3] ? r * g[1] : (g[0] ? pow(r, g[0]) * (1 + g[4]) - g[4] : log(r) * g[2] + 1))
: (r < g[2] ? r / g[1] : (g[0] ? pow((r + g[4]) / (1 + g[4]), 1 / g[0]) : exp((r - 1) / g[2]))));
}
}
void CLASS pseudoinverse(double (*in)[3], double (*out)[3], int size)
{
double work[3][6], num;
int i, j, k;
for (i = 0; i < 3; i++)
{
for (j = 0; j < 6; j++)
work[i][j] = j == i + 3;
for (j = 0; j < 3; j++)
for (k = 0; k < size; k++)
work[i][j] += in[k][i] * in[k][j];
}
for (i = 0; i < 3; i++)
{
num = work[i][i];
for (j = 0; j < 6; j++)
if(fabs(num)>0.00001f)
work[i][j] /= num;
for (k = 0; k < 3; k++)
{
if (k == i)
continue;
num = work[k][i];
for (j = 0; j < 6; j++)
work[k][j] -= work[i][j] * num;
}
}
for (i = 0; i < size; i++)
for (j = 0; j < 3; j++)
for (out[i][j] = k = 0; k < 3; k++)
out[i][j] += work[j][k + 3] * in[i][k];
}
void CLASS cam_xyz_coeff(float _rgb_cam[3][4], double cam_xyz[4][3])
{
double cam_rgb[4][3], inverse[4][3], num;
int i, j, k;
for (i = 0; i < colors; i++) /* Multiply out XYZ colorspace */
for (j = 0; j < 3; j++)
for (cam_rgb[i][j] = k = 0; k < 3; k++)
cam_rgb[i][j] += cam_xyz[i][k] * xyz_rgb[k][j];
for (i = 0; i < colors; i++)
{ /* Normalize cam_rgb so that */
for (num = j = 0; j < 3; j++) /* cam_rgb * (1,1,1) is (1,1,1,1) */
num += cam_rgb[i][j];
if (num > 0.00001)
{
for (j = 0; j < 3; j++)
cam_rgb[i][j] /= num;
pre_mul[i] = 1 / num;
}
else
{
for (j = 0; j < 3; j++)
cam_rgb[i][j] = 0.0;
pre_mul[i] = 1.0;
}
}
pseudoinverse(cam_rgb, inverse, colors);
for (i = 0; i < 3; i++)
for (j = 0; j < colors; j++)
_rgb_cam[i][j] = inverse[j][i];
}
#ifdef COLORCHECK
void CLASS colorcheck()
{
#define NSQ 24
// Coordinates of the GretagMacbeth ColorChecker squares
// width, height, 1st_column, 1st_row
int cut[NSQ][4]; // you must set these
// ColorChecker Chart under 6500-kelvin illumination
static const double gmb_xyY[NSQ][3] = {{0.400, 0.350, 10.1}, // Dark Skin
{0.377, 0.345, 35.8}, // Light Skin
{0.247, 0.251, 19.3}, // Blue Sky
{0.337, 0.422, 13.3}, // Foliage
{0.265, 0.240, 24.3}, // Blue Flower
{0.261, 0.343, 43.1}, // Bluish Green
{0.506, 0.407, 30.1}, // Orange
{0.211, 0.175, 12.0}, // Purplish Blue
{0.453, 0.306, 19.8}, // Moderate Red
{0.285, 0.202, 6.6}, // Purple
{0.380, 0.489, 44.3}, // Yellow Green
{0.473, 0.438, 43.1}, // Orange Yellow
{0.187, 0.129, 6.1}, // Blue
{0.305, 0.478, 23.4}, // Green
{0.539, 0.313, 12.0}, // Red
{0.448, 0.470, 59.1}, // Yellow
{0.364, 0.233, 19.8}, // Magenta
{0.196, 0.252, 19.8}, // Cyan
{0.310, 0.316, 90.0}, // White
{0.310, 0.316, 59.1}, // Neutral 8
{0.310, 0.316, 36.2}, // Neutral 6.5
{0.310, 0.316, 19.8}, // Neutral 5
{0.310, 0.316, 9.0}, // Neutral 3.5
{0.310, 0.316, 3.1}}; // Black
double gmb_cam[NSQ][4], gmb_xyz[NSQ][3];
double inverse[NSQ][3], cam_xyz[4][3], balance[4], num;
int c, i, j, k, sq, row, col, pass, count[4];
memset(gmb_cam, 0, sizeof gmb_cam);
for (sq = 0; sq < NSQ; sq++)
{
FORCC count[c] = 0;
for (row = cut[sq][3]; row < cut[sq][3] + cut[sq][1]; row++)
for (col = cut[sq][2]; col < cut[sq][2] + cut[sq][0]; col++)
{
c = FC(row, col);
if (c >= colors)
c -= 2;
gmb_cam[sq][c] += BAYER2(row, col);
BAYER2(row, col) = black + (BAYER2(row, col) - black) / 2;
count[c]++;
}
FORCC gmb_cam[sq][c] = gmb_cam[sq][c] / count[c] - black;
gmb_xyz[sq][0] = gmb_xyY[sq][2] * gmb_xyY[sq][0] / gmb_xyY[sq][1];
gmb_xyz[sq][1] = gmb_xyY[sq][2];
gmb_xyz[sq][2] = gmb_xyY[sq][2] * (1 - gmb_xyY[sq][0] - gmb_xyY[sq][1]) / gmb_xyY[sq][1];
}
pseudoinverse(gmb_xyz, inverse, NSQ);
for (pass = 0; pass < 2; pass++)
{
for (raw_color = i = 0; i < colors; i++)
for (j = 0; j < 3; j++)
for (cam_xyz[i][j] = k = 0; k < NSQ; k++)
cam_xyz[i][j] += gmb_cam[k][i] * inverse[k][j];
cam_xyz_coeff(rgb_cam, cam_xyz);
FORCC balance[c] = pre_mul[c] * gmb_cam[20][c];
for (sq = 0; sq < NSQ; sq++)
FORCC gmb_cam[sq][c] *= balance[c];
}
if (verbose)
{
printf(" { \"%s %s\", %d,\n\t{", make, model, black);
num = 10000 / (cam_xyz[1][0] + cam_xyz[1][1] + cam_xyz[1][2]);
FORCC for (j = 0; j < 3; j++) printf("%c%d", (c | j) ? ',' : ' ', (int)(cam_xyz[c][j] * num + 0.5));
puts(" } },");
}
#undef NSQ
}
#endif
void CLASS hat_transform(float *temp, float *base, int st, int size, int sc)
{
int i;
for (i = 0; i < sc; i++)
temp[i] = 2 * base[st * i] + base[st * (sc - i)] + base[st * (i + sc)];
for (; i + sc < size; i++)
temp[i] = 2 * base[st * i] + base[st * (i - sc)] + base[st * (i + sc)];
for (; i < size; i++)
temp[i] = 2 * base[st * i] + base[st * (i - sc)] + base[st * (2 * size - 2 - (i + sc))];
}
#if !defined(LIBRAW_USE_OPENMP)
void CLASS wavelet_denoise()
{
float *fimg = 0, *temp, thold, mul[2], avg, diff;
int scale = 1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] = {0.8002, 0.2735, 0.1202, 0.0585, 0.0291, 0.0152, 0.0080, 0.0044};
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Wavelet denoising...\n"));
#endif
while (maximum << scale < 0x10000)
scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight * iwidth) < 0x15550000)
fimg = (float *)malloc((size * 3 + iheight + iwidth) * sizeof *fimg);
merror(fimg, "wavelet_denoise()");
temp = fimg + size * 3;
if ((nc = colors) == 3 && filters)
nc++;
FORC(nc)
{ /* denoise R,G1,B,G3 individually */
for (i = 0; i < size; i++)
fimg[i] = 256 * sqrt((double)(image[i][c] << scale));
for (hpass = lev = 0; lev < 5; lev++)
{
lpass = size * ((lev & 1) + 1);
for (row = 0; row < iheight; row++)
{
hat_transform(temp, fimg + hpass + row * iwidth, 1, iwidth, 1 << lev);
for (col = 0; col < iwidth; col++)
fimg[lpass + row * iwidth + col] = temp[col] * 0.25;
}
for (col = 0; col < iwidth; col++)
{
hat_transform(temp, fimg + lpass + col, iwidth, iheight, 1 << lev);
for (row = 0; row < iheight; row++)
fimg[lpass + row * iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
for (i = 0; i < size; i++)
{
fimg[hpass + i] -= fimg[lpass + i];
if (fimg[hpass + i] < -thold)
fimg[hpass + i] += thold;
else if (fimg[hpass + i] > thold)
fimg[hpass + i] -= thold;
else
fimg[hpass + i] = 0;
if (hpass)
fimg[i] += fimg[hpass + i];
}
hpass = lpass;
}
for (i = 0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i] + fimg[lpass + i]) / 0x10000);
}
if (filters && colors == 3)
{ /* pull G1 and G3 closer together */
for (row = 0; row < 2; row++)
{
mul[row] = 0.125 * pre_mul[FC(row + 1, 0) | 1] / pre_mul[FC(row, 0) | 1];
blk[row] = cblack[FC(row, 0) | 1];
}
for (i = 0; i < 4; i++)
window[i] = (ushort *)fimg + width * i;
for (wlast = -1, row = 1; row < height - 1; row++)
{
while (wlast < row + 1)
{
for (wlast++, i = 0; i < 4; i++)
window[(i + 3) & 3] = window[i];
for (col = FC(wlast, 1) & 1; col < width; col += 2)
window[2][col] = BAYER(wlast, col);
}
thold = threshold / 512;
for (col = (FC(row, 0) & 1) + 1; col < width - 1; col += 2)
{
avg = (window[0][col - 1] + window[0][col + 1] + window[2][col - 1] + window[2][col + 1] - blk[~row & 1] * 4) *
mul[row & 1] +
(window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt((double)BAYER(row, col)) - avg;
if (diff < -thold)
diff += thold;
else if (diff > thold)
diff -= thold;
else
diff = 0;
BAYER(row, col) = CLIP(SQR(avg + diff) + 0.5);
}
}
}
free(fimg);
}
#else /* LIBRAW_USE_OPENMP */
void CLASS wavelet_denoise()
{
float *fimg = 0, *temp, thold, mul[2], avg, diff;
int scale = 1, size, lev, hpass, lpass, row, col, nc, c, i, wlast, blk[2];
ushort *window[4];
static const float noise[] = {0.8002, 0.2735, 0.1202, 0.0585, 0.0291, 0.0152, 0.0080, 0.0044};
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Wavelet denoising...\n"));
#endif
while (maximum << scale < 0x10000)
scale++;
maximum <<= --scale;
black <<= scale;
FORC4 cblack[c] <<= scale;
if ((size = iheight * iwidth) < 0x15550000)
fimg = (float *)malloc((size * 3 + iheight + iwidth) * sizeof *fimg);
merror(fimg, "wavelet_denoise()");
temp = fimg + size * 3;
if ((nc = colors) == 3 && filters)
nc++;
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp parallel default(shared) private(i, col, row, thold, lev, lpass, hpass, temp, c) firstprivate(scale, size)
#endif
{
temp = (float *)malloc((iheight + iwidth) * sizeof *fimg);
FORC(nc)
{ /* denoise R,G1,B,G3 individually */
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i = 0; i < size; i++)
fimg[i] = 256 * sqrt((double)(image[i][c] << scale));
for (hpass = lev = 0; lev < 5; lev++)
{
lpass = size * ((lev & 1) + 1);
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (row = 0; row < iheight; row++)
{
hat_transform(temp, fimg + hpass + row * iwidth, 1, iwidth, 1 << lev);
for (col = 0; col < iwidth; col++)
fimg[lpass + row * iwidth + col] = temp[col] * 0.25;
}
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (col = 0; col < iwidth; col++)
{
hat_transform(temp, fimg + lpass + col, iwidth, iheight, 1 << lev);
for (row = 0; row < iheight; row++)
fimg[lpass + row * iwidth + col] = temp[row] * 0.25;
}
thold = threshold * noise[lev];
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i = 0; i < size; i++)
{
fimg[hpass + i] -= fimg[lpass + i];
if (fimg[hpass + i] < -thold)
fimg[hpass + i] += thold;
else if (fimg[hpass + i] > thold)
fimg[hpass + i] -= thold;
else
fimg[hpass + i] = 0;
if (hpass)
fimg[i] += fimg[hpass + i];
}
hpass = lpass;
}
#ifdef LIBRAW_LIBRARY_BUILD
#pragma omp for
#endif
for (i = 0; i < size; i++)
image[i][c] = CLIP(SQR(fimg[i] + fimg[lpass + i]) / 0x10000);
}
free(temp);
} /* end omp parallel */
/* the following loops are hard to parallize, no idea yes,
* problem is wlast which is carrying dependency
* second part should be easyer, but did not yet get it right.
*/
if (filters && colors == 3)
{ /* pull G1 and G3 closer together */
for (row = 0; row < 2; row++)
{
mul[row] = 0.125 * pre_mul[FC(row + 1, 0) | 1] / pre_mul[FC(row, 0) | 1];
blk[row] = cblack[FC(row, 0) | 1];
}
for (i = 0; i < 4; i++)
window[i] = (ushort *)fimg + width * i;
for (wlast = -1, row = 1; row < height - 1; row++)
{
while (wlast < row + 1)
{
for (wlast++, i = 0; i < 4; i++)
window[(i + 3) & 3] = window[i];
for (col = FC(wlast, 1) & 1; col < width; col += 2)
window[2][col] = BAYER(wlast, col);
}
thold = threshold / 512;
for (col = (FC(row, 0) & 1) + 1; col < width - 1; col += 2)
{
avg = (window[0][col - 1] + window[0][col + 1] + window[2][col - 1] + window[2][col + 1] - blk[~row & 1] * 4) *
mul[row & 1] +
(window[1][col] + blk[row & 1]) * 0.5;
avg = avg < 0 ? 0 : sqrt(avg);
diff = sqrt((double)BAYER(row, col)) - avg;
if (diff < -thold)
diff += thold;
else if (diff > thold)
diff -= thold;
else
diff = 0;
BAYER(row, col) = CLIP(SQR(avg + diff) + 0.5);
}
}
}
free(fimg);
}
#endif
// green equilibration
void CLASS green_matching()
{
int i, j;
double m1, m2, c1, c2;
int o1_1, o1_2, o1_3, o1_4;
int o2_1, o2_2, o2_3, o2_4;
ushort(*img)[4];
const int margin = 3;
int oj = 2, oi = 2;
float f;
const float thr = 0.01f;
if (half_size || shrink)
return;
if (FC(oj, oi) != 3)
oj++;
if (FC(oj, oi) != 3)
oi++;
if (FC(oj, oi) != 3)
oj--;
img = (ushort(*)[4])calloc(height * width, sizeof *image);
merror(img, "green_matching()");
memcpy(img, image, height * width * sizeof *image);
for (j = oj; j < height - margin; j += 2)
for (i = oi; i < width - margin; i += 2)
{
o1_1 = img[(j - 1) * width + i - 1][1];
o1_2 = img[(j - 1) * width + i + 1][1];
o1_3 = img[(j + 1) * width + i - 1][1];
o1_4 = img[(j + 1) * width + i + 1][1];
o2_1 = img[(j - 2) * width + i][3];
o2_2 = img[(j + 2) * width + i][3];
o2_3 = img[j * width + i - 2][3];
o2_4 = img[j * width + i + 2][3];
m1 = (o1_1 + o1_2 + o1_3 + o1_4) / 4.0;
m2 = (o2_1 + o2_2 + o2_3 + o2_4) / 4.0;
c1 = (abs(o1_1 - o1_2) + abs(o1_1 - o1_3) + abs(o1_1 - o1_4) + abs(o1_2 - o1_3) + abs(o1_3 - o1_4) +
abs(o1_2 - o1_4)) /
6.0;
c2 = (abs(o2_1 - o2_2) + abs(o2_1 - o2_3) + abs(o2_1 - o2_4) + abs(o2_2 - o2_3) + abs(o2_3 - o2_4) +
abs(o2_2 - o2_4)) /
6.0;
if ((img[j * width + i][3] < maximum * 0.95) && (c1 < maximum * thr) && (c2 < maximum * thr))
{
f = image[j * width + i][3] * m1 / m2;
image[j * width + i][3] = f > 0xffff ? 0xffff : f;
}
}
free(img);
}
void CLASS scale_colors()
{
unsigned bottom, right, size, row, col, ur, uc, i, x, y, c, sum[8];
int val, dark, sat;
double dsum[8], dmin, dmax;
float scale_mul[4], fr, fc;
ushort *img = 0, *pix;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS, 0, 2);
#endif
if (user_mul[0])
memcpy(pre_mul, user_mul, sizeof pre_mul);
if (use_auto_wb || (use_camera_wb && cam_mul[0] == -1))
{
memset(dsum, 0, sizeof dsum);
bottom = MIN(greybox[1] + greybox[3], height);
right = MIN(greybox[0] + greybox[2], width);
for (row = greybox[1]; row < bottom; row += 8)
for (col = greybox[0]; col < right; col += 8)
{
memset(sum, 0, sizeof sum);
for (y = row; y < row + 8 && y < bottom; y++)
for (x = col; x < col + 8 && x < right; x++)
FORC4
{
if (filters)
{
c = fcol(y, x);
val = BAYER2(y, x);
}
else
val = image[y * width + x][c];
if (val > maximum - 25)
goto skip_block;
if ((val -= cblack[c]) < 0)
val = 0;
sum[c] += val;
sum[c + 4]++;
if (filters)
break;
}
FORC(8) dsum[c] += sum[c];
skip_block:;
}
FORC4 if (dsum[c]) pre_mul[c] = dsum[c + 4] / dsum[c];
}
if (use_camera_wb && cam_mul[0] != -1)
{
memset(sum, 0, sizeof sum);
for (row = 0; row < 8; row++)
for (col = 0; col < 8; col++)
{
c = FC(row, col);
if ((val = white[row][col] - cblack[c]) > 0)
sum[c] += val;
sum[c + 4]++;
}
#ifdef LIBRAW_LIBRARY_BUILD
if (load_raw == &LibRaw::nikon_load_sraw)
{
// Nikon sRAW: camera WB already applied:
pre_mul[0] = pre_mul[1] = pre_mul[2] = pre_mul[3] = 1.0;
}
else
#endif
if (sum[0] && sum[1] && sum[2] && sum[3])
FORC4 pre_mul[c] = (float)sum[c + 4] / sum[c];
else if (cam_mul[0] && cam_mul[2])
memcpy(pre_mul, cam_mul, sizeof pre_mul);
else
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_CAMERA_WB;
#endif
#ifdef DCRAW_VERBOSE
fprintf(stderr, _("%s: Cannot use camera white balance.\n"), ifname);
#endif
}
}
#ifdef LIBRAW_LIBRARY_BUILD
// Nikon sRAW, daylight
if (load_raw == &LibRaw::nikon_load_sraw && !use_camera_wb && !use_auto_wb && cam_mul[0] > 0.001f &&
cam_mul[1] > 0.001f && cam_mul[2] > 0.001f)
{
for (c = 0; c < 3; c++)
pre_mul[c] /= cam_mul[c];
}
#endif
if (pre_mul[1] == 0)
pre_mul[1] = 1;
if (pre_mul[3] == 0)
pre_mul[3] = colors < 4 ? pre_mul[1] : 1;
dark = black;
sat = maximum;
if (threshold)
wavelet_denoise();
maximum -= black;
for (dmin = DBL_MAX, dmax = c = 0; c < 4; c++)
{
if (dmin > pre_mul[c])
dmin = pre_mul[c];
if (dmax < pre_mul[c])
dmax = pre_mul[c];
}
if (!highlight)
dmax = dmin;
FORC4 scale_mul[c] = (pre_mul[c] /= dmax) * 65535.0 / maximum;
#ifdef DCRAW_VERBOSE
if (verbose)
{
fprintf(stderr, _("Scaling with darkness %d, saturation %d, and\nmultipliers"), dark, sat);
FORC4 fprintf(stderr, " %f", pre_mul[c]);
fputc('\n', stderr);
}
#endif
if (filters > 1000 && (cblack[4] + 1) / 2 == 1 && (cblack[5] + 1) / 2 == 1)
{
FORC4 cblack[FC(c / 2, c % 2)] += cblack[6 + c / 2 % cblack[4] * cblack[5] + c % 2 % cblack[5]];
cblack[4] = cblack[5] = 0;
}
size = iheight * iwidth;
#ifdef LIBRAW_LIBRARY_BUILD
scale_colors_loop(scale_mul);
#else
for (i = 0; i < size * 4; i++)
{
if (!(val = ((ushort *)image)[i]))
continue;
if (cblack[4] && cblack[5])
val -= cblack[6 + i / 4 / iwidth % cblack[4] * cblack[5] + i / 4 % iwidth % cblack[5]];
val -= cblack[i & 3];
val *= scale_mul[i & 3];
((ushort *)image)[i] = CLIP(val);
}
#endif
if ((aber[0] != 1 || aber[2] != 1) && colors == 3)
{
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Correcting chromatic aberration...\n"));
#endif
for (c = 0; c < 4; c += 2)
{
if (aber[c] == 1)
continue;
img = (ushort *)malloc(size * sizeof *img);
merror(img, "scale_colors()");
for (i = 0; i < size; i++)
img[i] = image[i][c];
for (row = 0; row < iheight; row++)
{
ur = fr = (row - iheight * 0.5) * aber[c] + iheight * 0.5;
if (ur > iheight - 2)
continue;
fr -= ur;
for (col = 0; col < iwidth; col++)
{
uc = fc = (col - iwidth * 0.5) * aber[c] + iwidth * 0.5;
if (uc > iwidth - 2)
continue;
fc -= uc;
pix = img + ur * iwidth + uc;
image[row * iwidth + col][c] =
(pix[0] * (1 - fc) + pix[1] * fc) * (1 - fr) + (pix[iwidth] * (1 - fc) + pix[iwidth + 1] * fc) * fr;
}
}
free(img);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_SCALE_COLORS, 1, 2);
#endif
}
void CLASS pre_interpolate()
{
ushort(*img)[4];
int row, col, c;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE, 0, 2);
#endif
if (shrink)
{
if (half_size)
{
height = iheight;
width = iwidth;
if (filters == 9)
{
for (row = 0; row < 3; row++)
for (col = 1; col < 4; col++)
if (!(image[row * width + col][0] | image[row * width + col][2]))
goto break2;
break2:
for (; row < height; row += 3)
for (col = (col - 1) % 3 + 1; col < width - 1; col += 3)
{
img = image + row * width + col;
for (c = 0; c < 3; c += 2)
img[0][c] = (img[-1][c] + img[1][c]) >> 1;
}
}
}
else
{
img = (ushort(*)[4])calloc(height, width * sizeof *img);
merror(img, "pre_interpolate()");
for (row = 0; row < height; row++)
for (col = 0; col < width; col++)
{
c = fcol(row, col);
img[row * width + col][c] = image[(row >> 1) * iwidth + (col >> 1)][c];
}
free(image);
image = img;
shrink = 0;
}
}
if (filters > 1000 && colors == 3)
{
mix_green = four_color_rgb ^ half_size;
if (four_color_rgb | half_size)
colors++;
else
{
for (row = FC(1, 0) >> 1; row < height; row += 2)
for (col = FC(row, 1) & 1; col < width; col += 2)
image[row * width + col][1] = image[row * width + col][3];
filters &= ~((filters & 0x55555555U) << 1);
}
}
if (half_size)
filters = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_PRE_INTERPOLATE, 1, 2);
#endif
}
void CLASS border_interpolate(int border)
{
unsigned row, col, y, x, f, c, sum[8];
for (row = 0; row < height; row++)
for (col = 0; col < width; col++)
{
if (col == border && row >= border && row < height - border)
col = width - border;
memset(sum, 0, sizeof sum);
for (y = row - 1; y != row + 2; y++)
for (x = col - 1; x != col + 2; x++)
if (y < height && x < width)
{
f = fcol(y, x);
sum[f] += image[y * width + x][f];
sum[f + 4]++;
}
f = fcol(row, col);
FORCC if (c != f && sum[c + 4]) image[row * width + col][c] = sum[c] / sum[c + 4];
}
}
void CLASS lin_interpolate_loop(int code[16][16][32], int size)
{
int row;
for (row = 1; row < height - 1; row++)
{
int col, *ip;
ushort *pix;
for (col = 1; col < width - 1; col++)
{
int i;
int sum[4];
pix = image[row * width + col];
ip = code[row % size][col % size];
memset(sum, 0, sizeof sum);
for (i = *ip++; i--; ip += 3)
sum[ip[2]] += pix[ip[0]] << ip[1];
for (i = colors; --i; ip += 2)
pix[ip[0]] = sum[ip[0]] * ip[1] >> 8;
}
}
}
void CLASS lin_interpolate()
{
int code[16][16][32], size = 16, *ip, sum[4];
int f, c, x, y, row, col, shift, color;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Bilinear interpolation...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE, 0, 3);
#endif
if (filters == 9)
size = 6;
border_interpolate(1);
for (row = 0; row < size; row++)
for (col = 0; col < size; col++)
{
ip = code[row][col] + 1;
f = fcol(row, col);
memset(sum, 0, sizeof sum);
for (y = -1; y <= 1; y++)
for (x = -1; x <= 1; x++)
{
shift = (y == 0) + (x == 0);
color = fcol(row + y, col + x);
if (color == f)
continue;
*ip++ = (width * y + x) * 4 + color;
*ip++ = shift;
*ip++ = color;
sum[color] += 1 << shift;
}
code[row][col][0] = (ip - code[row][col]) / 3;
FORCC
if (c != f)
{
*ip++ = c;
*ip++ = sum[c] > 0 ? 256 / sum[c] : 0;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE, 1, 3);
#endif
lin_interpolate_loop(code, size);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE, 2, 3);
#endif
}
/*
This algorithm is officially called:
"Interpolation using a Threshold-based variable number of gradients"
described in http://scien.stanford.edu/pages/labsite/1999/psych221/projects/99/tingchen/algodep/vargra.html
I've extended the basic idea to work with non-Bayer filter arrays.
Gradients are numbered clockwise from NW=0 to W=7.
*/
void CLASS vng_interpolate()
{
static const signed char *cp,
terms[] = {-2, -2, +0, -1, 0, 0x01, -2, -2, +0, +0, 1, 0x01, -2, -1, -1, +0, 0, 0x01, -2, -1, +0, -1, 0, 0x02,
-2, -1, +0, +0, 0, 0x03, -2, -1, +0, +1, 1, 0x01, -2, +0, +0, -1, 0, 0x06, -2, +0, +0, +0, 1, 0x02,
-2, +0, +0, +1, 0, 0x03, -2, +1, -1, +0, 0, 0x04, -2, +1, +0, -1, 1, 0x04, -2, +1, +0, +0, 0, 0x06,
-2, +1, +0, +1, 0, 0x02, -2, +2, +0, +0, 1, 0x04, -2, +2, +0, +1, 0, 0x04, -1, -2, -1, +0, 0, -128,
-1, -2, +0, -1, 0, 0x01, -1, -2, +1, -1, 0, 0x01, -1, -2, +1, +0, 1, 0x01, -1, -1, -1, +1, 0, -120,
-1, -1, +1, -2, 0, 0x40, -1, -1, +1, -1, 0, 0x22, -1, -1, +1, +0, 0, 0x33, -1, -1, +1, +1, 1, 0x11,
-1, +0, -1, +2, 0, 0x08, -1, +0, +0, -1, 0, 0x44, -1, +0, +0, +1, 0, 0x11, -1, +0, +1, -2, 1, 0x40,
-1, +0, +1, -1, 0, 0x66, -1, +0, +1, +0, 1, 0x22, -1, +0, +1, +1, 0, 0x33, -1, +0, +1, +2, 1, 0x10,
-1, +1, +1, -1, 1, 0x44, -1, +1, +1, +0, 0, 0x66, -1, +1, +1, +1, 0, 0x22, -1, +1, +1, +2, 0, 0x10,
-1, +2, +0, +1, 0, 0x04, -1, +2, +1, +0, 1, 0x04, -1, +2, +1, +1, 0, 0x04, +0, -2, +0, +0, 1, -128,
+0, -1, +0, +1, 1, -120, +0, -1, +1, -2, 0, 0x40, +0, -1, +1, +0, 0, 0x11, +0, -1, +2, -2, 0, 0x40,
+0, -1, +2, -1, 0, 0x20, +0, -1, +2, +0, 0, 0x30, +0, -1, +2, +1, 1, 0x10, +0, +0, +0, +2, 1, 0x08,
+0, +0, +2, -2, 1, 0x40, +0, +0, +2, -1, 0, 0x60, +0, +0, +2, +0, 1, 0x20, +0, +0, +2, +1, 0, 0x30,
+0, +0, +2, +2, 1, 0x10, +0, +1, +1, +0, 0, 0x44, +0, +1, +1, +2, 0, 0x10, +0, +1, +2, -1, 1, 0x40,
+0, +1, +2, +0, 0, 0x60, +0, +1, +2, +1, 0, 0x20, +0, +1, +2, +2, 0, 0x10, +1, -2, +1, +0, 0, -128,
+1, -1, +1, +1, 0, -120, +1, +0, +1, +2, 0, 0x08, +1, +0, +2, -1, 0, 0x40, +1, +0, +2, +1, 0, 0x10},
chood[] = {-1, -1, -1, 0, -1, +1, 0, +1, +1, +1, +1, 0, +1, -1, 0, -1};
ushort(*brow[5])[4], *pix;
int prow = 8, pcol = 2, *ip, *code[16][16], gval[8], gmin, gmax, sum[4];
int row, col, x, y, x1, x2, y1, y2, t, weight, grads, color, diag;
int g, diff, thold, num, c;
lin_interpolate();
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("VNG interpolation...\n"));
#endif
if (filters == 1)
prow = pcol = 16;
if (filters == 9)
prow = pcol = 6;
ip = (int *)calloc(prow * pcol, 1280);
merror(ip, "vng_interpolate()");
for (row = 0; row < prow; row++) /* Precalculate for VNG */
for (col = 0; col < pcol; col++)
{
code[row][col] = ip;
for (cp = terms, t = 0; t < 64; t++)
{
y1 = *cp++;
x1 = *cp++;
y2 = *cp++;
x2 = *cp++;
weight = *cp++;
grads = *cp++;
color = fcol(row + y1, col + x1);
if (fcol(row + y2, col + x2) != color)
continue;
diag = (fcol(row, col + 1) == color && fcol(row + 1, col) == color) ? 2 : 1;
if (abs(y1 - y2) == diag && abs(x1 - x2) == diag)
continue;
*ip++ = (y1 * width + x1) * 4 + color;
*ip++ = (y2 * width + x2) * 4 + color;
*ip++ = weight;
for (g = 0; g < 8; g++)
if (grads & 1 << g)
*ip++ = g;
*ip++ = -1;
}
*ip++ = INT_MAX;
for (cp = chood, g = 0; g < 8; g++)
{
y = *cp++;
x = *cp++;
*ip++ = (y * width + x) * 4;
color = fcol(row, col);
if (fcol(row + y, col + x) != color && fcol(row + y * 2, col + x * 2) == color)
*ip++ = (y * width + x) * 8 + color;
else
*ip++ = 0;
}
}
brow[4] = (ushort(*)[4])calloc(width * 3, sizeof **brow);
merror(brow[4], "vng_interpolate()");
for (row = 0; row < 3; row++)
brow[row] = brow[4] + row * width;
for (row = 2; row < height - 2; row++)
{ /* Do VNG interpolation */
#ifdef LIBRAW_LIBRARY_BUILD
if (!((row - 2) % 256))
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE, (row - 2) / 256 + 1, ((height - 3) / 256) + 1);
#endif
for (col = 2; col < width - 2; col++)
{
pix = image[row * width + col];
ip = code[row % prow][col % pcol];
memset(gval, 0, sizeof gval);
while ((g = ip[0]) != INT_MAX)
{ /* Calculate gradients */
diff = ABS(pix[g] - pix[ip[1]]) << ip[2];
gval[ip[3]] += diff;
ip += 5;
if ((g = ip[-1]) == -1)
continue;
gval[g] += diff;
while ((g = *ip++) != -1)
gval[g] += diff;
}
ip++;
gmin = gmax = gval[0]; /* Choose a threshold */
for (g = 1; g < 8; g++)
{
if (gmin > gval[g])
gmin = gval[g];
if (gmax < gval[g])
gmax = gval[g];
}
if (gmax == 0)
{
memcpy(brow[2][col], pix, sizeof *image);
continue;
}
thold = gmin + (gmax >> 1);
memset(sum, 0, sizeof sum);
color = fcol(row, col);
for (num = g = 0; g < 8; g++, ip += 2)
{ /* Average the neighbors */
if (gval[g] <= thold)
{
FORCC
if (c == color && ip[1])
sum[c] += (pix[c] + pix[ip[1]]) >> 1;
else
sum[c] += pix[ip[0] + c];
num++;
}
}
FORCC
{ /* Save to buffer */
t = pix[color];
if (c != color)
t += (sum[c] - sum[color]) / num;
brow[2][col][c] = CLIP(t);
}
}
if (row > 3) /* Write buffer to image */
memcpy(image[(row - 2) * width + 2], brow[0] + 2, (width - 4) * sizeof *image);
for (g = 0; g < 4; g++)
brow[(g - 1) & 3] = brow[g];
}
memcpy(image[(row - 2) * width + 2], brow[0] + 2, (width - 4) * sizeof *image);
memcpy(image[(row - 1) * width + 2], brow[1] + 2, (width - 4) * sizeof *image);
free(brow[4]);
free(code[0][0]);
}
/*
Patterned Pixel Grouping Interpolation by Alain Desbiolles
*/
void CLASS ppg_interpolate()
{
int dir[5] = {1, width, -1, -width, 1};
int row, col, diff[2], guess[2], c, d, i;
ushort(*pix)[4];
border_interpolate(3);
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("PPG interpolation...\n"));
#endif
/* Fill in the green layer with gradients and pattern recognition: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE, 0, 3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row = 3; row < height - 3; row++)
for (col = 3 + (FC(row, 3) & 1), c = FC(row, col); col < width - 3; col += 2)
{
pix = image + row * width + col;
for (i = 0; (d = dir[i]) > 0; i++)
{
guess[i] = (pix[-d][1] + pix[0][c] + pix[d][1]) * 2 - pix[-2 * d][c] - pix[2 * d][c];
diff[i] = (ABS(pix[-2 * d][c] - pix[0][c]) + ABS(pix[2 * d][c] - pix[0][c]) + ABS(pix[-d][1] - pix[d][1])) * 3 +
(ABS(pix[3 * d][1] - pix[d][1]) + ABS(pix[-3 * d][1] - pix[-d][1])) * 2;
}
d = dir[i = diff[0] > diff[1]];
pix[0][1] = ULIM(guess[i] >> 2, pix[d][1], pix[-d][1]);
}
/* Calculate red and blue for each green pixel: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE, 1, 3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row = 1; row < height - 1; row++)
for (col = 1 + (FC(row, 2) & 1), c = FC(row, col + 1); col < width - 1; col += 2)
{
pix = image + row * width + col;
for (i = 0; (d = dir[i]) > 0; c = 2 - c, i++)
pix[0][c] = CLIP((pix[-d][c] + pix[d][c] + 2 * pix[0][1] - pix[-d][1] - pix[d][1]) >> 1);
}
/* Calculate blue for red pixels and vice versa: */
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_INTERPOLATE, 2, 3);
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel for default(shared) private(guess, diff, row, col, d, c, i, pix) schedule(static)
#endif
#endif
for (row = 1; row < height - 1; row++)
for (col = 1 + (FC(row, 1) & 1), c = 2 - FC(row, col); col < width - 1; col += 2)
{
pix = image + row * width + col;
for (i = 0; (d = dir[i] + dir[i + 1]) > 0; i++)
{
diff[i] = ABS(pix[-d][c] - pix[d][c]) + ABS(pix[-d][1] - pix[0][1]) + ABS(pix[d][1] - pix[0][1]);
guess[i] = pix[-d][c] + pix[d][c] + 2 * pix[0][1] - pix[-d][1] - pix[d][1];
}
if (diff[0] != diff[1])
pix[0][c] = CLIP(guess[diff[0] > diff[1]] >> 1);
else
pix[0][c] = CLIP((guess[0] + guess[1]) >> 2);
}
}
void CLASS cielab(ushort rgb[3], short lab[3])
{
int c, i, j, k;
float r, xyz[3];
#ifdef LIBRAW_NOTHREADS
static float cbrt[0x10000], xyz_cam[3][4];
#else
#define cbrt tls->ahd_data.cbrt
#define xyz_cam tls->ahd_data.xyz_cam
#endif
if (!rgb)
{
#ifndef LIBRAW_NOTHREADS
if (cbrt[0] < -1.0f)
#endif
for (i = 0; i < 0x10000; i++)
{
r = i / 65535.0;
cbrt[i] = r > 0.008856 ? pow(r, 1.f / 3.0f) : 7.787f * r + 16.f / 116.0f;
}
for (i = 0; i < 3; i++)
for (j = 0; j < colors; j++)
for (xyz_cam[i][j] = k = 0; k < 3; k++)
xyz_cam[i][j] += xyz_rgb[i][k] * rgb_cam[k][j] / d65_white[i];
return;
}
xyz[0] = xyz[1] = xyz[2] = 0.5;
FORCC
{
xyz[0] += xyz_cam[0][c] * rgb[c];
xyz[1] += xyz_cam[1][c] * rgb[c];
xyz[2] += xyz_cam[2][c] * rgb[c];
}
xyz[0] = cbrt[CLIP((int)xyz[0])];
xyz[1] = cbrt[CLIP((int)xyz[1])];
xyz[2] = cbrt[CLIP((int)xyz[2])];
lab[0] = 64 * (116 * xyz[1] - 16);
lab[1] = 64 * 500 * (xyz[0] - xyz[1]);
lab[2] = 64 * 200 * (xyz[1] - xyz[2]);
#ifndef LIBRAW_NOTHREADS
#undef cbrt
#undef xyz_cam
#endif
}
#define TS 512 /* Tile Size */
#define fcol(row, col) xtrans[(row + 6) % 6][(col + 6) % 6]
/*
Frank Markesteijn's algorithm for Fuji X-Trans sensors
*/
void CLASS xtrans_interpolate(int passes)
{
int c, d, f, g, h, i, v, ng, row, col, top, left, mrow, mcol;
#ifdef LIBRAW_LIBRARY_BUILD
int cstat[4] = {0, 0, 0, 0};
#endif
int val, ndir, pass, hm[8], avg[4], color[3][8];
static const short orth[12] = {1, 0, 0, 1, -1, 0, 0, -1, 1, 0, 0, 1},
patt[2][16] = {{0, 1, 0, -1, 2, 0, -1, 0, 1, 1, 1, -1, 0, 0, 0, 0},
{0, 1, 0, -2, 1, 0, -2, 0, 1, 1, -2, -2, 1, -1, -1, 1}},
dir[4] = {1, TS, TS + 1, TS - 1};
short allhex[3][3][2][8], *hex;
ushort min, max, sgrow, sgcol;
ushort(*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short(*lab)[TS][3], (*lix)[3];
float(*drv)[TS][TS], diff[6], tr;
char(*homo)[TS][TS], *buffer;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("%d-pass X-Trans interpolation...\n"), passes);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if (width < TS || height < TS)
throw LIBRAW_EXCEPTION_IO_CORRUPT; // too small image
/* Check against right pattern */
for (row = 0; row < 6; row++)
for (col = 0; col < 6; col++)
cstat[fcol(row, col)]++;
if (cstat[0] < 6 || cstat[0] > 10 || cstat[1] < 16 || cstat[1] > 24 || cstat[2] < 6 || cstat[2] > 10 || cstat[3])
throw LIBRAW_EXCEPTION_IO_CORRUPT;
// Init allhex table to unreasonable values
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++)
for (int k = 0; k < 2; k++)
for (int l = 0; l < 8; l++)
allhex[i][j][k][l] = 32700;
#endif
cielab(0, 0);
ndir = 4 << (passes > 1);
buffer = (char *)malloc(TS * TS * (ndir * 11 + 6));
merror(buffer, "xtrans_interpolate()");
rgb = (ushort(*)[TS][TS][3])buffer;
lab = (short(*)[TS][3])(buffer + TS * TS * (ndir * 6));
drv = (float(*)[TS][TS])(buffer + TS * TS * (ndir * 6 + 6));
homo = (char(*)[TS][TS])(buffer + TS * TS * (ndir * 10 + 6));
int minv = 0, maxv = 0, minh = 0, maxh = 0;
/* Map a green hexagon around each non-green pixel and vice versa: */
for (row = 0; row < 3; row++)
for (col = 0; col < 3; col++)
for (ng = d = 0; d < 10; d += 2)
{
g = fcol(row, col) == 1;
if (fcol(row + orth[d], col + orth[d + 2]) == 1)
ng = 0;
else
ng++;
if (ng == 4)
{
sgrow = row;
sgcol = col;
}
if (ng == g + 1)
FORC(8)
{
v = orth[d] * patt[g][c * 2] + orth[d + 1] * patt[g][c * 2 + 1];
h = orth[d + 2] * patt[g][c * 2] + orth[d + 3] * patt[g][c * 2 + 1];
minv = MIN(v, minv);
maxv = MAX(v, maxv);
minh = MIN(v, minh);
maxh = MAX(v, maxh);
allhex[row][col][0][c ^ (g * 2 & d)] = h + v * width;
allhex[row][col][1][c ^ (g * 2 & d)] = h + v * TS;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
// Check allhex table initialization
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++)
for (int k = 0; k < 2; k++)
for (int l = 0; l < 8; l++)
if (allhex[i][j][k][l] > maxh + maxv * width + 1 || allhex[i][j][k][l] < minh + minv * width - 1)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
int retrycount = 0;
#endif
/* Set green1 and green3 to the minimum and maximum allowed values: */
for (row = 2; row < height - 2; row++)
for (min = ~(max = 0), col = 2; col < width - 2; col++)
{
if (fcol(row, col) == 1 && (min = ~(max = 0)))
continue;
pix = image + row * width + col;
hex = allhex[row % 3][col % 3][0];
if (!max)
FORC(6)
{
val = pix[hex[c]][1];
if (min > val)
min = val;
if (max < val)
max = val;
}
pix[0][1] = min;
pix[0][3] = max;
switch ((row - sgrow) % 3)
{
case 1:
if (row < height - 3)
{
row++;
col--;
}
break;
case 2:
if ((min = ~(max = 0)) && (col += 2) < width - 3 && row > 2)
{
row--;
#ifdef LIBRAW_LIBRARY_BUILD
if (retrycount++ > width * height)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
}
}
}
for (top = 3; top < height - 19; top += TS - 16)
for (left = 3; left < width - 19; left += TS - 16)
{
mrow = MIN(top + TS, height - 3);
mcol = MIN(left + TS, width - 3);
for (row = top; row < mrow; row++)
for (col = left; col < mcol; col++)
memcpy(rgb[0][row - top][col - left], image[row * width + col], 6);
FORC3 memcpy(rgb[c + 1], rgb[0], sizeof *rgb);
/* Interpolate green horizontally, vertically, and along both diagonals: */
for (row = top; row < mrow; row++)
for (col = left; col < mcol; col++)
{
if ((f = fcol(row, col)) == 1)
continue;
pix = image + row * width + col;
hex = allhex[row % 3][col % 3][0];
color[1][0] = 174 * (pix[hex[1]][1] + pix[hex[0]][1]) - 46 * (pix[2 * hex[1]][1] + pix[2 * hex[0]][1]);
color[1][1] = 223 * pix[hex[3]][1] + pix[hex[2]][1] * 33 + 92 * (pix[0][f] - pix[-hex[2]][f]);
FORC(2)
color[1][2 + c] = 164 * pix[hex[4 + c]][1] + 92 * pix[-2 * hex[4 + c]][1] +
33 * (2 * pix[0][f] - pix[3 * hex[4 + c]][f] - pix[-3 * hex[4 + c]][f]);
FORC4 rgb[c ^ !((row - sgrow) % 3)][row - top][col - left][1] = LIM(color[1][c] >> 8, pix[0][1], pix[0][3]);
}
for (pass = 0; pass < passes; pass++)
{
if (pass == 1)
memcpy(rgb += 4, buffer, 4 * sizeof *rgb);
/* Recalculate green from interpolated values of closer pixels: */
if (pass)
{
for (row = top + 2; row < mrow - 2; row++)
for (col = left + 2; col < mcol - 2; col++)
{
if ((f = fcol(row, col)) == 1)
continue;
pix = image + row * width + col;
hex = allhex[row % 3][col % 3][1];
for (d = 3; d < 6; d++)
{
rix = &rgb[(d - 2) ^ !((row - sgrow) % 3)][row - top][col - left];
val =
rix[-2 * hex[d]][1] + 2 * rix[hex[d]][1] - rix[-2 * hex[d]][f] - 2 * rix[hex[d]][f] + 3 * rix[0][f];
rix[0][1] = LIM(val / 3, pix[0][1], pix[0][3]);
}
}
}
/* Interpolate red and blue values for solitary green pixels: */
for (row = (top - sgrow + 4) / 3 * 3 + sgrow; row < mrow - 2; row += 3)
for (col = (left - sgcol + 4) / 3 * 3 + sgcol; col < mcol - 2; col += 3)
{
rix = &rgb[0][row - top][col - left];
h = fcol(row, col + 1);
memset(diff, 0, sizeof diff);
for (i = 1, d = 0; d < 6; d++, i ^= TS ^ 1, h ^= 2)
{
for (c = 0; c < 2; c++, h ^= 2)
{
g = 2 * rix[0][1] - rix[i << c][1] - rix[-i << c][1];
color[h][d] = g + rix[i << c][h] + rix[-i << c][h];
if (d > 1)
diff[d] += SQR(rix[i << c][1] - rix[-i << c][1] - rix[i << c][h] + rix[-i << c][h]) + SQR(g);
}
if (d > 1 && (d & 1))
if (diff[d - 1] < diff[d])
FORC(2) color[c * 2][d] = color[c * 2][d - 1];
if (d < 2 || (d & 1))
{
FORC(2) rix[0][c * 2] = CLIP(color[c * 2][d] / 2);
rix += TS * TS;
}
}
}
/* Interpolate red for blue pixels and vice versa: */
for (row = top + 3; row < mrow - 3; row++)
for (col = left + 3; col < mcol - 3; col++)
{
if ((f = 2 - fcol(row, col)) == 1)
continue;
rix = &rgb[0][row - top][col - left];
c = (row - sgrow) % 3 ? TS : 1;
h = 3 * (c ^ TS ^ 1);
for (d = 0; d < 4; d++, rix += TS * TS)
{
i = d > 1 || ((d ^ c) & 1) ||
((ABS(rix[0][1] - rix[c][1]) + ABS(rix[0][1] - rix[-c][1])) <
2 * (ABS(rix[0][1] - rix[h][1]) + ABS(rix[0][1] - rix[-h][1])))
? c
: h;
rix[0][f] = CLIP((rix[i][f] + rix[-i][f] + 2 * rix[0][1] - rix[i][1] - rix[-i][1]) / 2);
}
}
/* Fill in red and blue for 2x2 blocks of green: */
for (row = top + 2; row < mrow - 2; row++)
if ((row - sgrow) % 3)
for (col = left + 2; col < mcol - 2; col++)
if ((col - sgcol) % 3)
{
rix = &rgb[0][row - top][col - left];
hex = allhex[row % 3][col % 3][1];
for (d = 0; d < ndir; d += 2, rix += TS * TS)
if (hex[d] + hex[d + 1])
{
g = 3 * rix[0][1] - 2 * rix[hex[d]][1] - rix[hex[d + 1]][1];
for (c = 0; c < 4; c += 2)
rix[0][c] = CLIP((g + 2 * rix[hex[d]][c] + rix[hex[d + 1]][c]) / 3);
}
else
{
g = 2 * rix[0][1] - rix[hex[d]][1] - rix[hex[d + 1]][1];
for (c = 0; c < 4; c += 2)
rix[0][c] = CLIP((g + rix[hex[d]][c] + rix[hex[d + 1]][c]) / 2);
}
}
}
rgb = (ushort(*)[TS][TS][3])buffer;
mrow -= top;
mcol -= left;
/* Convert to CIELab and differentiate in all directions: */
for (d = 0; d < ndir; d++)
{
for (row = 2; row < mrow - 2; row++)
for (col = 2; col < mcol - 2; col++)
cielab(rgb[d][row][col], lab[row][col]);
for (f = dir[d & 3], row = 3; row < mrow - 3; row++)
for (col = 3; col < mcol - 3; col++)
{
lix = &lab[row][col];
g = 2 * lix[0][0] - lix[f][0] - lix[-f][0];
drv[d][row][col] = SQR(g) + SQR((2 * lix[0][1] - lix[f][1] - lix[-f][1] + g * 500 / 232)) +
SQR((2 * lix[0][2] - lix[f][2] - lix[-f][2] - g * 500 / 580));
}
}
/* Build homogeneity maps from the derivatives: */
memset(homo, 0, ndir * TS * TS);
for (row = 4; row < mrow - 4; row++)
for (col = 4; col < mcol - 4; col++)
{
for (tr = FLT_MAX, d = 0; d < ndir; d++)
if (tr > drv[d][row][col])
tr = drv[d][row][col];
tr *= 8;
for (d = 0; d < ndir; d++)
for (v = -1; v <= 1; v++)
for (h = -1; h <= 1; h++)
if (drv[d][row + v][col + h] <= tr)
homo[d][row][col]++;
}
/* Average the most homogenous pixels for the final result: */
if (height - top < TS + 4)
mrow = height - top + 2;
if (width - left < TS + 4)
mcol = width - left + 2;
for (row = MIN(top, 8); row < mrow - 8; row++)
for (col = MIN(left, 8); col < mcol - 8; col++)
{
for (d = 0; d < ndir; d++)
for (hm[d] = 0, v = -2; v <= 2; v++)
for (h = -2; h <= 2; h++)
hm[d] += homo[d][row + v][col + h];
for (d = 0; d < ndir - 4; d++)
if (hm[d] < hm[d + 4])
hm[d] = 0;
else if (hm[d] > hm[d + 4])
hm[d + 4] = 0;
for (max = hm[0], d = 1; d < ndir; d++)
if (max < hm[d])
max = hm[d];
max -= max >> 3;
memset(avg, 0, sizeof avg);
for (d = 0; d < ndir; d++)
if (hm[d] >= max)
{
FORC3 avg[c] += rgb[d][row][col][c];
avg[3]++;
}
FORC3 image[(row + top) * width + col + left][c] = avg[c] / avg[3];
}
}
free(buffer);
border_interpolate(8);
}
#undef fcol
/*
Adaptive Homogeneity-Directed interpolation is based on
the work of Keigo Hirakawa, Thomas Parks, and Paul Lee.
*/
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS ahd_interpolate_green_h_and_v(int top, int left, ushort (*out_rgb)[TS][TS][3])
{
int row, col;
int c, val;
ushort(*pix)[4];
const int rowlimit = MIN(top + TS, height - 2);
const int collimit = MIN(left + TS, width - 2);
for (row = top; row < rowlimit; row++)
{
col = left + (FC(row, left) & 1);
for (c = FC(row, col); col < collimit; col += 2)
{
pix = image + row * width + col;
val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2 - pix[-2][c] - pix[2][c]) >> 2;
out_rgb[0][row - top][col - left][1] = ULIM(val, pix[-1][1], pix[1][1]);
val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2 - pix[-2 * width][c] - pix[2 * width][c]) >> 2;
out_rgb[1][row - top][col - left][1] = ULIM(val, pix[-width][1], pix[width][1]);
}
}
}
void CLASS ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][3],
short (*out_lab)[TS][3])
{
unsigned row, col;
int c, val;
ushort(*pix)[4];
ushort(*rix)[3];
short(*lix)[3];
float xyz[3];
const unsigned num_pix_per_row = 4 * width;
const unsigned rowlimit = MIN(top + TS - 1, height - 3);
const unsigned collimit = MIN(left + TS - 1, width - 3);
ushort *pix_above;
ushort *pix_below;
int t1, t2;
for (row = top + 1; row < rowlimit; row++)
{
pix = image + row * width + left;
rix = &inout_rgb[row - top][0];
lix = &out_lab[row - top][0];
for (col = left + 1; col < collimit; col++)
{
pix++;
pix_above = &pix[0][0] - num_pix_per_row;
pix_below = &pix[0][0] + num_pix_per_row;
rix++;
lix++;
c = 2 - FC(row, col);
if (c == 1)
{
c = FC(row + 1, col);
t1 = 2 - c;
val = pix[0][1] + ((pix[-1][t1] + pix[1][t1] - rix[-1][1] - rix[1][1]) >> 1);
rix[0][t1] = CLIP(val);
val = pix[0][1] + ((pix_above[c] + pix_below[c] - rix[-TS][1] - rix[TS][1]) >> 1);
}
else
{
t1 = -4 + c; /* -4+c: pixel of color c to the left */
t2 = 4 + c; /* 4+c: pixel of color c to the right */
val = rix[0][1] + ((pix_above[t1] + pix_above[t2] + pix_below[t1] + pix_below[t2] - rix[-TS - 1][1] -
rix[-TS + 1][1] - rix[+TS - 1][1] - rix[+TS + 1][1] + 1) >>
2);
}
rix[0][c] = CLIP(val);
c = FC(row, col);
rix[0][c] = pix[0][c];
cielab(rix[0], lix[0]);
}
}
}
void CLASS ahd_interpolate_r_and_b_and_convert_to_cielab(int top, int left, ushort (*inout_rgb)[TS][TS][3],
short (*out_lab)[TS][TS][3])
{
int direction;
for (direction = 0; direction < 2; direction++)
{
ahd_interpolate_r_and_b_in_rgb_and_convert_to_cielab(top, left, inout_rgb[direction], out_lab[direction]);
}
}
void CLASS ahd_interpolate_build_homogeneity_map(int top, int left, short (*lab)[TS][TS][3],
char (*out_homogeneity_map)[TS][2])
{
int row, col;
int tr, tc;
int direction;
int i;
short(*lix)[3];
short(*lixs[2])[3];
short *adjacent_lix;
unsigned ldiff[2][4], abdiff[2][4], leps, abeps;
static const int dir[4] = {-1, 1, -TS, TS};
const int rowlimit = MIN(top + TS - 2, height - 4);
const int collimit = MIN(left + TS - 2, width - 4);
int homogeneity;
char(*homogeneity_map_p)[2];
memset(out_homogeneity_map, 0, 2 * TS * TS);
for (row = top + 2; row < rowlimit; row++)
{
tr = row - top;
homogeneity_map_p = &out_homogeneity_map[tr][1];
for (direction = 0; direction < 2; direction++)
{
lixs[direction] = &lab[direction][tr][1];
}
for (col = left + 2; col < collimit; col++)
{
tc = col - left;
homogeneity_map_p++;
for (direction = 0; direction < 2; direction++)
{
lix = ++lixs[direction];
for (i = 0; i < 4; i++)
{
adjacent_lix = lix[dir[i]];
ldiff[direction][i] = ABS(lix[0][0] - adjacent_lix[0]);
abdiff[direction][i] = SQR(lix[0][1] - adjacent_lix[1]) + SQR(lix[0][2] - adjacent_lix[2]);
}
}
leps = MIN(MAX(ldiff[0][0], ldiff[0][1]), MAX(ldiff[1][2], ldiff[1][3]));
abeps = MIN(MAX(abdiff[0][0], abdiff[0][1]), MAX(abdiff[1][2], abdiff[1][3]));
for (direction = 0; direction < 2; direction++)
{
homogeneity = 0;
for (i = 0; i < 4; i++)
{
if (ldiff[direction][i] <= leps && abdiff[direction][i] <= abeps)
{
homogeneity++;
}
}
homogeneity_map_p[0][direction] = homogeneity;
}
}
}
}
void CLASS ahd_interpolate_combine_homogeneous_pixels(int top, int left, ushort (*rgb)[TS][TS][3],
char (*homogeneity_map)[TS][2])
{
int row, col;
int tr, tc;
int i, j;
int direction;
int hm[2];
int c;
const int rowlimit = MIN(top + TS - 3, height - 5);
const int collimit = MIN(left + TS - 3, width - 5);
ushort(*pix)[4];
ushort(*rix[2])[3];
for (row = top + 3; row < rowlimit; row++)
{
tr = row - top;
pix = &image[row * width + left + 2];
for (direction = 0; direction < 2; direction++)
{
rix[direction] = &rgb[direction][tr][2];
}
for (col = left + 3; col < collimit; col++)
{
tc = col - left;
pix++;
for (direction = 0; direction < 2; direction++)
{
rix[direction]++;
}
for (direction = 0; direction < 2; direction++)
{
hm[direction] = 0;
for (i = tr - 1; i <= tr + 1; i++)
{
for (j = tc - 1; j <= tc + 1; j++)
{
hm[direction] += homogeneity_map[i][j][direction];
}
}
}
if (hm[0] != hm[1])
{
memcpy(pix[0], rix[hm[1] > hm[0]][0], 3 * sizeof(ushort));
}
else
{
FORC3 { pix[0][c] = (rix[0][0][c] + rix[1][0][c]) >> 1; }
}
}
}
}
void CLASS ahd_interpolate()
{
int i, j, k, top, left;
float xyz_cam[3][4], r;
char *buffer;
ushort(*rgb)[TS][TS][3];
short(*lab)[TS][TS][3];
char(*homo)[TS][2];
int terminate_flag = 0;
cielab(0, 0);
border_interpolate(5);
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
#pragma omp parallel private(buffer, rgb, lab, homo, top, left, i, j, k) shared(xyz_cam, terminate_flag)
#endif
#endif
{
buffer = (char *)malloc(26 * TS * TS); /* 1664 kB */
merror(buffer, "ahd_interpolate()");
rgb = (ushort(*)[TS][TS][3])buffer;
lab = (short(*)[TS][TS][3])(buffer + 12 * TS * TS);
homo = (char(*)[TS][2])(buffer + 24 * TS * TS);
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
#pragma omp for schedule(dynamic)
#endif
#endif
for (top = 2; top < height - 5; top += TS - 6)
{
#ifdef LIBRAW_LIBRARY_BUILD
#ifdef LIBRAW_USE_OPENMP
if (0 == omp_get_thread_num())
#endif
if (callbacks.progress_cb)
{
int rr =
(*callbacks.progress_cb)(callbacks.progresscb_data, LIBRAW_PROGRESS_INTERPOLATE, top - 2, height - 7);
if (rr)
terminate_flag = 1;
}
#endif
for (left = 2; !terminate_flag && (left < width - 5); left += TS - 6)
{
ahd_interpolate_green_h_and_v(top, left, rgb);
ahd_interpolate_r_and_b_and_convert_to_cielab(top, left, rgb, lab);
ahd_interpolate_build_homogeneity_map(top, left, lab, homo);
ahd_interpolate_combine_homogeneous_pixels(top, left, rgb, homo);
}
}
free(buffer);
}
#ifdef LIBRAW_LIBRARY_BUILD
if (terminate_flag)
throw LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK;
#endif
}
#else
void CLASS ahd_interpolate()
{
int i, j, top, left, row, col, tr, tc, c, d, val, hm[2];
static const int dir[4] = {-1, 1, -TS, TS};
unsigned ldiff[2][4], abdiff[2][4], leps, abeps;
ushort(*rgb)[TS][TS][3], (*rix)[3], (*pix)[4];
short(*lab)[TS][TS][3], (*lix)[3];
char(*homo)[TS][TS], *buffer;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("AHD interpolation...\n"));
#endif
cielab(0, 0);
border_interpolate(5);
buffer = (char *)malloc(26 * TS * TS);
merror(buffer, "ahd_interpolate()");
rgb = (ushort(*)[TS][TS][3])buffer;
lab = (short(*)[TS][TS][3])(buffer + 12 * TS * TS);
homo = (char(*)[TS][TS])(buffer + 24 * TS * TS);
for (top = 2; top < height - 5; top += TS - 6)
for (left = 2; left < width - 5; left += TS - 6)
{
/* Interpolate green horizontally and vertically: */
for (row = top; row < top + TS && row < height - 2; row++)
{
col = left + (FC(row, left) & 1);
for (c = FC(row, col); col < left + TS && col < width - 2; col += 2)
{
pix = image + row * width + col;
val = ((pix[-1][1] + pix[0][c] + pix[1][1]) * 2 - pix[-2][c] - pix[2][c]) >> 2;
rgb[0][row - top][col - left][1] = ULIM(val, pix[-1][1], pix[1][1]);
val = ((pix[-width][1] + pix[0][c] + pix[width][1]) * 2 - pix[-2 * width][c] - pix[2 * width][c]) >> 2;
rgb[1][row - top][col - left][1] = ULIM(val, pix[-width][1], pix[width][1]);
}
}
/* Interpolate red and blue, and convert to CIELab: */
for (d = 0; d < 2; d++)
for (row = top + 1; row < top + TS - 1 && row < height - 3; row++)
for (col = left + 1; col < left + TS - 1 && col < width - 3; col++)
{
pix = image + row * width + col;
rix = &rgb[d][row - top][col - left];
lix = &lab[d][row - top][col - left];
if ((c = 2 - FC(row, col)) == 1)
{
c = FC(row + 1, col);
val = pix[0][1] + ((pix[-1][2 - c] + pix[1][2 - c] - rix[-1][1] - rix[1][1]) >> 1);
rix[0][2 - c] = CLIP(val);
val = pix[0][1] + ((pix[-width][c] + pix[width][c] - rix[-TS][1] - rix[TS][1]) >> 1);
}
else
val = rix[0][1] + ((pix[-width - 1][c] + pix[-width + 1][c] + pix[+width - 1][c] + pix[+width + 1][c] -
rix[-TS - 1][1] - rix[-TS + 1][1] - rix[+TS - 1][1] - rix[+TS + 1][1] + 1) >>
2);
rix[0][c] = CLIP(val);
c = FC(row, col);
rix[0][c] = pix[0][c];
cielab(rix[0], lix[0]);
}
/* Build homogeneity maps from the CIELab images: */
memset(homo, 0, 2 * TS * TS);
for (row = top + 2; row < top + TS - 2 && row < height - 4; row++)
{
tr = row - top;
for (col = left + 2; col < left + TS - 2 && col < width - 4; col++)
{
tc = col - left;
for (d = 0; d < 2; d++)
{
lix = &lab[d][tr][tc];
for (i = 0; i < 4; i++)
{
ldiff[d][i] = ABS(lix[0][0] - lix[dir[i]][0]);
abdiff[d][i] = SQR(lix[0][1] - lix[dir[i]][1]) + SQR(lix[0][2] - lix[dir[i]][2]);
}
}
leps = MIN(MAX(ldiff[0][0], ldiff[0][1]), MAX(ldiff[1][2], ldiff[1][3]));
abeps = MIN(MAX(abdiff[0][0], abdiff[0][1]), MAX(abdiff[1][2], abdiff[1][3]));
for (d = 0; d < 2; d++)
for (i = 0; i < 4; i++)
if (ldiff[d][i] <= leps && abdiff[d][i] <= abeps)
homo[d][tr][tc]++;
}
}
/* Combine the most homogenous pixels for the final result: */
for (row = top + 3; row < top + TS - 3 && row < height - 5; row++)
{
tr = row - top;
for (col = left + 3; col < left + TS - 3 && col < width - 5; col++)
{
tc = col - left;
for (d = 0; d < 2; d++)
for (hm[d] = 0, i = tr - 1; i <= tr + 1; i++)
for (j = tc - 1; j <= tc + 1; j++)
hm[d] += homo[d][i][j];
if (hm[0] != hm[1])
FORC3 image[row * width + col][c] = rgb[hm[1] > hm[0]][tr][tc][c];
else
FORC3 image[row * width + col][c] = (rgb[0][tr][tc][c] + rgb[1][tr][tc][c]) >> 1;
}
}
}
free(buffer);
}
#endif
#undef TS
void CLASS median_filter()
{
ushort(*pix)[4];
int pass, c, i, j, k, med[9];
static const uchar opt[] = /* Optimal 9-element median search */
{1, 2, 4, 5, 7, 8, 0, 1, 3, 4, 6, 7, 1, 2, 4, 5, 7, 8, 0,
3, 5, 8, 4, 7, 3, 6, 1, 4, 2, 5, 4, 7, 4, 2, 6, 4, 4, 2};
for (pass = 1; pass <= med_passes; pass++)
{
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_MEDIAN_FILTER, pass - 1, med_passes);
#endif
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Median filter pass %d...\n"), pass);
#endif
for (c = 0; c < 3; c += 2)
{
for (pix = image; pix < image + width * height; pix++)
pix[0][3] = pix[0][c];
for (pix = image + width; pix < image + width * (height - 1); pix++)
{
if ((pix - image + 1) % width < 2)
continue;
for (k = 0, i = -width; i <= width; i += width)
for (j = i - 1; j <= i + 1; j++)
med[k++] = pix[j][3] - pix[j][1];
for (i = 0; i < sizeof opt; i += 2)
if (med[opt[i]] > med[opt[i + 1]])
SWAP(med[opt[i]], med[opt[i + 1]]);
pix[0][c] = CLIP(med[4] + pix[0][1]);
}
}
}
}
void CLASS blend_highlights()
{
int clip = INT_MAX, row, col, c, i, j;
static const float trans[2][4][4] = {{{1, 1, 1}, {1.7320508, -1.7320508, 0}, {-1, -1, 2}},
{{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1, -1, 1}}};
static const float itrans[2][4][4] = {{{1, 0.8660254, -0.5}, {1, -0.8660254, -0.5}, {1, 0, 1}},
{{1, 1, 1, 1}, {1, -1, 1, -1}, {1, 1, -1, -1}, {1, -1, -1, 1}}};
float cam[2][4], lab[2][4], sum[2], chratio;
if ((unsigned)(colors - 3) > 1)
return;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Blending highlights...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS, 0, 2);
#endif
FORCC if (clip > (i = 65535 * pre_mul[c])) clip = i;
for (row = 0; row < height; row++)
for (col = 0; col < width; col++)
{
FORCC if (image[row * width + col][c] > clip) break;
if (c == colors)
continue;
FORCC
{
cam[0][c] = image[row * width + col][c];
cam[1][c] = MIN(cam[0][c], clip);
}
for (i = 0; i < 2; i++)
{
FORCC for (lab[i][c] = j = 0; j < colors; j++) lab[i][c] += trans[colors - 3][c][j] * cam[i][j];
for (sum[i] = 0, c = 1; c < colors; c++)
sum[i] += SQR(lab[i][c]);
}
chratio = sqrt(sum[1] / sum[0]);
for (c = 1; c < colors; c++)
lab[0][c] *= chratio;
FORCC for (cam[0][c] = j = 0; j < colors; j++) cam[0][c] += itrans[colors - 3][c][j] * lab[0][j];
FORCC image[row * width + col][c] = cam[0][c] / colors;
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS, 1, 2);
#endif
}
#define SCALE (4 >> shrink)
void CLASS recover_highlights()
{
float *map, sum, wgt, grow;
int hsat[4], count, spread, change, val, i;
unsigned high, wide, mrow, mcol, row, col, kc, c, d, y, x;
ushort *pixel;
static const signed char dir[8][2] = {{-1, -1}, {-1, 0}, {-1, 1}, {0, 1}, {1, 1}, {1, 0}, {1, -1}, {0, -1}};
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Rebuilding highlights...\n"));
#endif
grow = pow(2.0, 4 - highlight);
FORCC hsat[c] = 32000 * pre_mul[c];
for (kc = 0, c = 1; c < colors; c++)
if (pre_mul[kc] < pre_mul[c])
kc = c;
high = height / SCALE;
wide = width / SCALE;
map = (float *)calloc(high, wide * sizeof *map);
merror(map, "recover_highlights()");
FORCC if (c != kc)
{
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_HIGHLIGHTS, c - 1, colors - 1);
#endif
memset(map, 0, high * wide * sizeof *map);
for (mrow = 0; mrow < high; mrow++)
for (mcol = 0; mcol < wide; mcol++)
{
sum = wgt = count = 0;
for (row = mrow * SCALE; row < (mrow + 1) * SCALE; row++)
for (col = mcol * SCALE; col < (mcol + 1) * SCALE; col++)
{
pixel = image[row * width + col];
if (pixel[c] / hsat[c] == 1 && pixel[kc] > 24000)
{
sum += pixel[c];
wgt += pixel[kc];
count++;
}
}
if (count == SCALE * SCALE)
map[mrow * wide + mcol] = sum / wgt;
}
for (spread = 32 / grow; spread--;)
{
for (mrow = 0; mrow < high; mrow++)
for (mcol = 0; mcol < wide; mcol++)
{
if (map[mrow * wide + mcol])
continue;
sum = count = 0;
for (d = 0; d < 8; d++)
{
y = mrow + dir[d][0];
x = mcol + dir[d][1];
if (y < high && x < wide && map[y * wide + x] > 0)
{
sum += (1 + (d & 1)) * map[y * wide + x];
count += 1 + (d & 1);
}
}
if (count > 3)
map[mrow * wide + mcol] = -(sum + grow) / (count + grow);
}
for (change = i = 0; i < high * wide; i++)
if (map[i] < 0)
{
map[i] = -map[i];
change = 1;
}
if (!change)
break;
}
for (i = 0; i < high * wide; i++)
if (map[i] == 0)
map[i] = 1;
for (mrow = 0; mrow < high; mrow++)
for (mcol = 0; mcol < wide; mcol++)
{
for (row = mrow * SCALE; row < (mrow + 1) * SCALE; row++)
for (col = mcol * SCALE; col < (mcol + 1) * SCALE; col++)
{
pixel = image[row * width + col];
if (pixel[c] / hsat[c] > 1)
{
val = pixel[kc] * map[mrow * wide + mcol];
if (pixel[c] < val)
pixel[c] = CLIP(val);
}
}
}
}
free(map);
}
#undef SCALE
void CLASS tiff_get(unsigned base, unsigned *tag, unsigned *type, unsigned *len, unsigned *save)
{
#ifdef LIBRAW_IOSPACE_CHECK
INT64 pos = ftell(ifp);
INT64 fsize = ifp->size();
if(fsize < 12 || (fsize-pos) < 12)
throw LIBRAW_EXCEPTION_IO_EOF;
#endif
*tag = get2();
*type = get2();
*len = get4();
*save = ftell(ifp) + 4;
if (*len * ("11124811248484"[*type < 14 ? *type : 0] - '0') > 4)
fseek(ifp, get4() + base, SEEK_SET);
}
void CLASS parse_thumb_note(int base, unsigned toff, unsigned tlen)
{
unsigned entries, tag, type, len, save;
entries = get2();
while (entries--)
{
tiff_get(base, &tag, &type, &len, &save);
if (tag == toff)
thumb_offset = get4() + base;
if (tag == tlen)
thumb_length = get4();
fseek(ifp, save, SEEK_SET);
}
}
//@end COMMON
int CLASS parse_tiff_ifd(int base);
//@out COMMON
static float powf_lim(float a, float b, float limup) { return (b > limup || b < -limup) ? 0.f : powf(a, b); }
static float libraw_powf64l(float a, float b) { return powf_lim(a, b, 64.f); }
#ifdef LIBRAW_LIBRARY_BUILD
static float my_roundf(float x)
{
float t;
if (x >= 0.0)
{
t = ceilf(x);
if (t - x > 0.5)
t -= 1.0;
return t;
}
else
{
t = ceilf(-x);
if (t + x > 0.5)
t -= 1.0;
return -t;
}
}
static float _CanonConvertAperture(ushort in)
{
if ((in == (ushort)0xffe0) || (in == (ushort)0x7fff))
return 0.0f;
return libraw_powf64l(2.0, in / 64.0);
}
static float _CanonConvertEV(short in)
{
short EV, Sign, Frac;
float Frac_f;
EV = in;
if (EV < 0)
{
EV = -EV;
Sign = -1;
}
else
{
Sign = 1;
}
Frac = EV & 0x1f;
EV -= Frac; // remove fraction
if (Frac == 0x0c)
{ // convert 1/3 and 2/3 codes
Frac_f = 32.0f / 3.0f;
}
else if (Frac == 0x14)
{
Frac_f = 64.0f / 3.0f;
}
else
Frac_f = (float)Frac;
return ((float)Sign * ((float)EV + Frac_f)) / 32.0f;
}
unsigned CLASS setCanonBodyFeatures(unsigned id)
{
if (id == 0x03740000) // EOS M3
id = 0x80000374;
else if (id == 0x03840000) // EOS M10
id = 0x80000384;
else if (id == 0x03940000) // EOS M5
id = 0x80000394;
else if (id == 0x04070000) // EOS M6
id = 0x80000407;
else if (id == 0x03980000) // EOS M100
id = 0x80000398;
imgdata.lens.makernotes.CamID = id;
if ((id == 0x80000001) || // 1D
(id == 0x80000174) || // 1D2
(id == 0x80000232) || // 1D2N
(id == 0x80000169) || // 1D3
(id == 0x80000281) // 1D4
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSH;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
}
else if ((id == 0x80000167) || // 1Ds
(id == 0x80000188) || // 1Ds2
(id == 0x80000215) || // 1Ds3
(id == 0x80000269) || // 1DX
(id == 0x80000328) || // 1DX2
(id == 0x80000324) || // 1DC
(id == 0x80000213) || // 5D
(id == 0x80000218) || // 5D2
(id == 0x80000285) || // 5D3
(id == 0x80000349) || // 5D4
(id == 0x80000382) || // 5DS
(id == 0x80000401) || // 5DS R
(id == 0x80000302) // 6D
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
}
else if ((id == 0x80000331) || // M
(id == 0x80000355) || // M2
(id == 0x80000374) || // M3
(id == 0x80000384) || // M10
(id == 0x80000394) || // M5
(id == 0x80000407) || // M6
(id == 0x80000398) // M100
)
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF_M;
}
else if ((id == 0x01140000) || // D30
(id == 0x01668000) || // D60
(id > 0x80000000))
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Canon_EF;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Unknown;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
return id;
}
void CLASS processCanonCameraInfo(unsigned id, uchar *CameraInfo, unsigned maxlen, unsigned type)
{
ushort iCanonLensID = 0, iCanonMaxFocal = 0, iCanonMinFocal = 0, iCanonLens = 0, iCanonCurFocal = 0,
iCanonFocalType = 0;
if (maxlen < 16)
return; // too short
CameraInfo[0] = 0;
CameraInfo[1] = 0;
if (type == 4)
{
if ((maxlen == 94) || (maxlen == 138) || (maxlen == 148) || (maxlen == 156) || (maxlen == 162) || (maxlen == 167) ||
(maxlen == 171) || (maxlen == 264) || (maxlen > 400))
imgdata.other.CameraTemperature = sget4(CameraInfo + ((maxlen - 3) << 2));
else if (maxlen == 72)
imgdata.other.CameraTemperature = sget4(CameraInfo + ((maxlen - 1) << 2));
else if ((maxlen == 85) || (maxlen == 93))
imgdata.other.CameraTemperature = sget4(CameraInfo + ((maxlen - 2) << 2));
else if ((maxlen == 96) || (maxlen == 104))
imgdata.other.CameraTemperature = sget4(CameraInfo + ((maxlen - 4) << 2));
}
switch (id)
{
case 0x80000001: // 1D
case 0x80000167: // 1DS
iCanonCurFocal = 10;
iCanonLensID = 13;
iCanonMinFocal = 14;
iCanonMaxFocal = 16;
if (!imgdata.lens.makernotes.CurFocal)
imgdata.lens.makernotes.CurFocal = sget2(CameraInfo + iCanonCurFocal);
if (!imgdata.lens.makernotes.MinFocal)
imgdata.lens.makernotes.MinFocal = sget2(CameraInfo + iCanonMinFocal);
if (!imgdata.lens.makernotes.MaxFocal)
imgdata.lens.makernotes.MaxFocal = sget2(CameraInfo + iCanonMaxFocal);
imgdata.other.CameraTemperature = 0.0f;
break;
case 0x80000174: // 1DMkII
case 0x80000188: // 1DsMkII
iCanonCurFocal = 9;
iCanonLensID = 12;
iCanonMinFocal = 17;
iCanonMaxFocal = 19;
iCanonFocalType = 45;
break;
case 0x80000232: // 1DMkII N
iCanonCurFocal = 9;
iCanonLensID = 12;
iCanonMinFocal = 17;
iCanonMaxFocal = 19;
break;
case 0x80000169: // 1DMkIII
case 0x80000215: // 1DsMkIII
iCanonCurFocal = 29;
iCanonLensID = 273;
iCanonMinFocal = 275;
iCanonMaxFocal = 277;
break;
case 0x80000281: // 1DMkIV
iCanonCurFocal = 30;
iCanonLensID = 335;
iCanonMinFocal = 337;
iCanonMaxFocal = 339;
break;
case 0x80000269: // 1D X
iCanonCurFocal = 35;
iCanonLensID = 423;
iCanonMinFocal = 425;
iCanonMaxFocal = 427;
break;
case 0x80000213: // 5D
iCanonCurFocal = 40;
if (!sget2Rev(CameraInfo + 12))
iCanonLensID = 151;
else
iCanonLensID = 12;
iCanonMinFocal = 147;
iCanonMaxFocal = 149;
break;
case 0x80000218: // 5DMkII
iCanonCurFocal = 30;
iCanonLensID = 230;
iCanonMinFocal = 232;
iCanonMaxFocal = 234;
break;
case 0x80000285: // 5DMkIII
iCanonCurFocal = 35;
iCanonLensID = 339;
iCanonMinFocal = 341;
iCanonMaxFocal = 343;
break;
case 0x80000302: // 6D
iCanonCurFocal = 35;
iCanonLensID = 353;
iCanonMinFocal = 355;
iCanonMaxFocal = 357;
break;
case 0x80000250: // 7D
iCanonCurFocal = 30;
iCanonLensID = 274;
iCanonMinFocal = 276;
iCanonMaxFocal = 278;
break;
case 0x80000190: // 40D
iCanonCurFocal = 29;
iCanonLensID = 214;
iCanonMinFocal = 216;
iCanonMaxFocal = 218;
iCanonLens = 2347;
break;
case 0x80000261: // 50D
iCanonCurFocal = 30;
iCanonLensID = 234;
iCanonMinFocal = 236;
iCanonMaxFocal = 238;
break;
case 0x80000287: // 60D
iCanonCurFocal = 30;
iCanonLensID = 232;
iCanonMinFocal = 234;
iCanonMaxFocal = 236;
break;
case 0x80000325: // 70D
iCanonCurFocal = 35;
iCanonLensID = 358;
iCanonMinFocal = 360;
iCanonMaxFocal = 362;
break;
case 0x80000176: // 450D
iCanonCurFocal = 29;
iCanonLensID = 222;
iCanonLens = 2355;
break;
case 0x80000252: // 500D
iCanonCurFocal = 30;
iCanonLensID = 246;
iCanonMinFocal = 248;
iCanonMaxFocal = 250;
break;
case 0x80000270: // 550D
iCanonCurFocal = 30;
iCanonLensID = 255;
iCanonMinFocal = 257;
iCanonMaxFocal = 259;
break;
case 0x80000286: // 600D
case 0x80000288: // 1100D
iCanonCurFocal = 30;
iCanonLensID = 234;
iCanonMinFocal = 236;
iCanonMaxFocal = 238;
break;
case 0x80000301: // 650D
case 0x80000326: // 700D
iCanonCurFocal = 35;
iCanonLensID = 295;
iCanonMinFocal = 297;
iCanonMaxFocal = 299;
break;
case 0x80000254: // 1000D
iCanonCurFocal = 29;
iCanonLensID = 226;
iCanonMinFocal = 228;
iCanonMaxFocal = 230;
iCanonLens = 2359;
break;
}
if (iCanonFocalType)
{
if (iCanonFocalType >= maxlen)
return; // broken;
imgdata.lens.makernotes.FocalType = CameraInfo[iCanonFocalType];
if (!imgdata.lens.makernotes.FocalType) // zero means 'fixed' here, replacing with standard '1'
imgdata.lens.makernotes.FocalType = 1;
}
if (!imgdata.lens.makernotes.CurFocal)
{
if (iCanonCurFocal >= maxlen)
return; // broken;
imgdata.lens.makernotes.CurFocal = sget2Rev(CameraInfo + iCanonCurFocal);
}
if (!imgdata.lens.makernotes.LensID)
{
if (iCanonLensID >= maxlen)
return; // broken;
imgdata.lens.makernotes.LensID = sget2Rev(CameraInfo + iCanonLensID);
}
if (!imgdata.lens.makernotes.MinFocal)
{
if (iCanonMinFocal >= maxlen)
return; // broken;
imgdata.lens.makernotes.MinFocal = sget2Rev(CameraInfo + iCanonMinFocal);
}
if (!imgdata.lens.makernotes.MaxFocal)
{
if (iCanonMaxFocal >= maxlen)
return; // broken;
imgdata.lens.makernotes.MaxFocal = sget2Rev(CameraInfo + iCanonMaxFocal);
}
if (!imgdata.lens.makernotes.Lens[0] && iCanonLens)
{
if (iCanonLens + 64 >= maxlen)
return; // broken;
if (CameraInfo[iCanonLens] < 65) // non-Canon lens
{
memcpy(imgdata.lens.makernotes.Lens, CameraInfo + iCanonLens, 64);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "EF-S", 4))
{
memcpy(imgdata.lens.makernotes.Lens, "EF-S ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF-E", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_S;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "TS-E", 4))
{
memcpy(imgdata.lens.makernotes.Lens, "TS-E ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "TS-E", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "MP-E", 4))
{
memcpy(imgdata.lens.makernotes.Lens, "MP-E ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "MP-E", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else if (!strncmp((char *)CameraInfo + iCanonLens, "EF-M", 4))
{
memcpy(imgdata.lens.makernotes.Lens, "EF-M ", 5);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF-M", 4);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_M;
memcpy(imgdata.lens.makernotes.Lens + 5, CameraInfo + iCanonLens + 4, 60);
}
else
{
memcpy(imgdata.lens.makernotes.Lens, CameraInfo + iCanonLens, 2);
memcpy(imgdata.lens.makernotes.LensFeatures_pre, "EF", 2);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
imgdata.lens.makernotes.Lens[2] = 32;
memcpy(imgdata.lens.makernotes.Lens + 3, CameraInfo + iCanonLens + 2, 62);
}
}
return;
}
void CLASS Canon_CameraSettings()
{
fseek(ifp, 10, SEEK_CUR);
imgdata.shootinginfo.DriveMode = get2();
get2();
imgdata.shootinginfo.FocusMode = get2();
fseek(ifp, 18, SEEK_CUR);
imgdata.shootinginfo.MeteringMode = get2();
get2();
imgdata.shootinginfo.AFPoint = get2();
imgdata.shootinginfo.ExposureMode = get2();
get2();
imgdata.lens.makernotes.LensID = get2();
imgdata.lens.makernotes.MaxFocal = get2();
imgdata.lens.makernotes.MinFocal = get2();
imgdata.lens.makernotes.CanonFocalUnits = get2();
if (imgdata.lens.makernotes.CanonFocalUnits > 1)
{
imgdata.lens.makernotes.MaxFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
imgdata.lens.makernotes.MinFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
imgdata.lens.makernotes.MaxAp = _CanonConvertAperture(get2());
imgdata.lens.makernotes.MinAp = _CanonConvertAperture(get2());
fseek(ifp, 12, SEEK_CUR);
imgdata.shootinginfo.ImageStabilization = get2();
}
void CLASS Canon_WBpresets(int skip1, int skip2)
{
int c;
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c ^ (c >> 1)] = get2();
if (skip1)
fseek(ifp, skip1, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c ^ (c >> 1)] = get2();
if (skip1)
fseek(ifp, skip1, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c ^ (c >> 1)] = get2();
if (skip1)
fseek(ifp, skip1, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ (c >> 1)] = get2();
if (skip1)
fseek(ifp, skip1, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c ^ (c >> 1)] = get2();
if (skip2)
fseek(ifp, skip2, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c ^ (c >> 1)] = get2();
return;
}
void CLASS Canon_WBCTpresets(short WBCTversion)
{
if (WBCTversion == 0)
for (int i = 0; i < 15; i++) // tint, as shot R, as shot B, CСT
{
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 1.0f;
fseek(ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][1] = 1024.0f / fMAX(get2(), 1.f);
imgdata.color.WBCT_Coeffs[i][3] = 1024.0f / fMAX(get2(), 1.f);
imgdata.color.WBCT_Coeffs[i][0] = get2();
}
else if (WBCTversion == 1)
for (int i = 0; i < 15; i++) // as shot R, as shot B, tint, CСT
{
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 1.0f;
imgdata.color.WBCT_Coeffs[i][1] = 1024.0f / fMAX(get2(), 1.f);
imgdata.color.WBCT_Coeffs[i][3] = 1024.0f / fMAX(get2(), 1.f);
fseek(ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][0] = get2();
}
else if ((WBCTversion == 2) && ((unique_id == 0x80000374) || // M3
(unique_id == 0x80000384) || // M10
(unique_id == 0x80000394) || // M5
(unique_id == 0x80000407) || // M6
(unique_id == 0x80000398) || // M100
(unique_id == 0x03970000) || // G7 X Mark II
(unique_id == 0x04100000) || // G9 X Mark II
(unique_id == 0x04180000))) // G1 X Mark III
for (int i = 0; i < 15; i++) // tint, offset, as shot R, as shot B, CСT
{
fseek(ifp, 2, SEEK_CUR);
fseek(ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 1.0f;
imgdata.color.WBCT_Coeffs[i][1] = 1024.0f / fMAX(1.f, get2());
imgdata.color.WBCT_Coeffs[i][3] = 1024.0f / fMAX(1.f, get2());
imgdata.color.WBCT_Coeffs[i][0] = get2();
}
else if ((WBCTversion == 2) && ((unique_id == 0x03950000) || (unique_id == 0x03930000))) // G5 X, G9 X
for (int i = 0; i < 15; i++) // tint, offset, as shot R, as shot B, CСT
{
fseek(ifp, 2, SEEK_CUR);
fseek(ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 1.0f;
imgdata.color.WBCT_Coeffs[i][1] = (float)get2() / 512.0f;
imgdata.color.WBCT_Coeffs[i][3] = (float)get2() / 512.0f;
imgdata.color.WBCT_Coeffs[i][0] = get2();
}
return;
}
void CLASS processNikonLensData(uchar *LensData, unsigned len)
{
ushort i;
if (!(imgdata.lens.nikon.NikonLensType & 0x01))
{
imgdata.lens.makernotes.LensFeatures_pre[0] = 'A';
imgdata.lens.makernotes.LensFeatures_pre[1] = 'F';
}
else
{
imgdata.lens.makernotes.LensFeatures_pre[0] = 'M';
imgdata.lens.makernotes.LensFeatures_pre[1] = 'F';
}
if (imgdata.lens.nikon.NikonLensType & 0x02)
{
if (imgdata.lens.nikon.NikonLensType & 0x04)
imgdata.lens.makernotes.LensFeatures_suf[0] = 'G';
else
imgdata.lens.makernotes.LensFeatures_suf[0] = 'D';
imgdata.lens.makernotes.LensFeatures_suf[1] = ' ';
}
if (imgdata.lens.nikon.NikonLensType & 0x08)
{
imgdata.lens.makernotes.LensFeatures_suf[2] = 'V';
imgdata.lens.makernotes.LensFeatures_suf[3] = 'R';
}
if (imgdata.lens.nikon.NikonLensType & 0x10)
{
imgdata.lens.makernotes.LensMount = imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Nikon_CX;
imgdata.lens.makernotes.CameraFormat = imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_1INCH;
}
else
imgdata.lens.makernotes.LensMount = imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Nikon_F;
if (imgdata.lens.nikon.NikonLensType & 0x20)
{
strcpy(imgdata.lens.makernotes.Adapter, "FT-1");
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Nikon_F;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Nikon_CX;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_1INCH;
}
imgdata.lens.nikon.NikonLensType = imgdata.lens.nikon.NikonLensType & 0xdf;
if (len < 20)
{
switch (len)
{
case 9:
i = 2;
break;
case 15:
i = 7;
break;
case 16:
i = 8;
break;
}
imgdata.lens.nikon.NikonLensIDNumber = LensData[i];
imgdata.lens.nikon.NikonLensFStops = LensData[i + 1];
imgdata.lens.makernotes.LensFStops = (float)imgdata.lens.nikon.NikonLensFStops / 12.0f;
if (fabsf(imgdata.lens.makernotes.MinFocal) < 1.1f)
{
if ((imgdata.lens.nikon.NikonLensType ^ (uchar)0x01) || LensData[i + 2])
imgdata.lens.makernotes.MinFocal = 5.0f * libraw_powf64l(2.0f, (float)LensData[i + 2] / 24.0f);
if ((imgdata.lens.nikon.NikonLensType ^ (uchar)0x01) || LensData[i + 3])
imgdata.lens.makernotes.MaxFocal = 5.0f * libraw_powf64l(2.0f, (float)LensData[i + 3] / 24.0f);
if ((imgdata.lens.nikon.NikonLensType ^ (uchar)0x01) || LensData[i + 4])
imgdata.lens.makernotes.MaxAp4MinFocal = libraw_powf64l(2.0f, (float)LensData[i + 4] / 24.0f);
if ((imgdata.lens.nikon.NikonLensType ^ (uchar)0x01) || LensData[i + 5])
imgdata.lens.makernotes.MaxAp4MaxFocal = libraw_powf64l(2.0f, (float)LensData[i + 5] / 24.0f);
}
imgdata.lens.nikon.NikonMCUVersion = LensData[i + 6];
if (i != 2)
{
if ((LensData[i - 1]) && (fabsf(imgdata.lens.makernotes.CurFocal) < 1.1f))
imgdata.lens.makernotes.CurFocal = 5.0f * libraw_powf64l(2.0f, (float)LensData[i - 1] / 24.0f);
if (LensData[i + 7])
imgdata.lens.nikon.NikonEffectiveMaxAp = libraw_powf64l(2.0f, (float)LensData[i + 7] / 24.0f);
}
imgdata.lens.makernotes.LensID =
(unsigned long long)LensData[i] << 56 | (unsigned long long)LensData[i + 1] << 48 |
(unsigned long long)LensData[i + 2] << 40 | (unsigned long long)LensData[i + 3] << 32 |
(unsigned long long)LensData[i + 4] << 24 | (unsigned long long)LensData[i + 5] << 16 |
(unsigned long long)LensData[i + 6] << 8 | (unsigned long long)imgdata.lens.nikon.NikonLensType;
}
else if ((len == 459) || (len == 590))
{
memcpy(imgdata.lens.makernotes.Lens, LensData + 390, 64);
}
else if (len == 509)
{
memcpy(imgdata.lens.makernotes.Lens, LensData + 391, 64);
}
else if (len == 879)
{
memcpy(imgdata.lens.makernotes.Lens, LensData + 680, 64);
}
return;
}
void CLASS setOlympusBodyFeatures(unsigned long long id)
{
imgdata.lens.makernotes.CamID = id;
if (id == 0x5330303638ULL)
{
strcpy(model, "E-M10MarkIII");
}
if ((id == 0x4434303430ULL) || // E-1
(id == 0x4434303431ULL) || // E-300
((id & 0x00ffff0000ULL) == 0x0030300000ULL))
{
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FT;
if ((id == 0x4434303430ULL) || // E-1
(id == 0x4434303431ULL) || // E-330
((id >= 0x5330303033ULL) && (id <= 0x5330303138ULL)) || // E-330 to E-520
(id == 0x5330303233ULL) || // E-620
(id == 0x5330303239ULL) || // E-450
(id == 0x5330303330ULL) || // E-600
(id == 0x5330303333ULL)) // E-5
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FT;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_mFT;
}
}
else
{
imgdata.lens.makernotes.LensMount = imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
return;
}
void CLASS parseCanonMakernotes(unsigned tag, unsigned type, unsigned len)
{
if (tag == 0x0001)
Canon_CameraSettings();
else if (tag == 0x0002) // focal length
{
imgdata.lens.makernotes.FocalType = get2();
imgdata.lens.makernotes.CurFocal = get2();
if (imgdata.lens.makernotes.CanonFocalUnits > 1)
{
imgdata.lens.makernotes.CurFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
}
else if (tag == 0x0004) // shot info
{
short tempAp;
fseek(ifp, 24, SEEK_CUR);
tempAp = get2();
if (tempAp != 0)
imgdata.other.CameraTemperature = (float)(tempAp - 128);
tempAp = get2();
if (tempAp != -1)
imgdata.other.FlashGN = ((float)tempAp) / 32;
get2();
// fseek(ifp, 30, SEEK_CUR);
imgdata.other.FlashEC = _CanonConvertEV((signed short)get2());
fseek(ifp, 8 - 32, SEEK_CUR);
if ((tempAp = get2()) != 0x7fff)
imgdata.lens.makernotes.CurAp = _CanonConvertAperture(tempAp);
if (imgdata.lens.makernotes.CurAp < 0.7f)
{
fseek(ifp, 32, SEEK_CUR);
imgdata.lens.makernotes.CurAp = _CanonConvertAperture(get2());
}
if (!aperture)
aperture = imgdata.lens.makernotes.CurAp;
}
else if (tag == 0x000c)
{
unsigned tS = get4();
sprintf (imgdata.shootinginfo.BodySerial, "%d", tS);
}
else if (tag == 0x0095 && // lens model tag
!imgdata.lens.makernotes.Lens[0])
{
fread(imgdata.lens.makernotes.Lens, 2, 1, ifp);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
if (imgdata.lens.makernotes.Lens[0] < 65) // non-Canon lens
fread(imgdata.lens.makernotes.Lens + 2, 62, 1, ifp);
else
{
char efs[2];
imgdata.lens.makernotes.LensFeatures_pre[0] = imgdata.lens.makernotes.Lens[0];
imgdata.lens.makernotes.LensFeatures_pre[1] = imgdata.lens.makernotes.Lens[1];
fread(efs, 2, 1, ifp);
if (efs[0] == 45 && (efs[1] == 83 || efs[1] == 69 || efs[1] == 77))
{ // "EF-S, TS-E, MP-E, EF-M" lenses
imgdata.lens.makernotes.Lens[2] = imgdata.lens.makernotes.LensFeatures_pre[2] = efs[0];
imgdata.lens.makernotes.Lens[3] = imgdata.lens.makernotes.LensFeatures_pre[3] = efs[1];
imgdata.lens.makernotes.Lens[4] = 32;
if (efs[1] == 83)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_S;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC;
}
else if (efs[1] == 77)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF_M;
}
}
else
{ // "EF" lenses
imgdata.lens.makernotes.Lens[2] = 32;
imgdata.lens.makernotes.Lens[3] = efs[0];
imgdata.lens.makernotes.Lens[4] = efs[1];
}
fread(imgdata.lens.makernotes.Lens + 5, 58, 1, ifp);
}
}
else if (tag == 0x009a)
{
get4();
imgdata.sizes.raw_crop.cwidth = get4();
imgdata.sizes.raw_crop.cheight = get4();
imgdata.sizes.raw_crop.cleft = get4();
imgdata.sizes.raw_crop.ctop = get4();
}
else if (tag == 0x00a9)
{
long int save1 = ftell(ifp);
int c;
fseek(ifp, (0x1 << 1), SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c >> 1)] = get2();
Canon_WBpresets(0, 0);
fseek(ifp, save1, SEEK_SET);
}
else if (tag == 0x00e0) // sensor info
{
imgdata.makernotes.canon.SensorWidth = (get2(), get2());
imgdata.makernotes.canon.SensorHeight = get2();
imgdata.makernotes.canon.SensorLeftBorder = (get2(), get2(), get2());
imgdata.makernotes.canon.SensorTopBorder = get2();
imgdata.makernotes.canon.SensorRightBorder = get2();
imgdata.makernotes.canon.SensorBottomBorder = get2();
imgdata.makernotes.canon.BlackMaskLeftBorder = get2();
imgdata.makernotes.canon.BlackMaskTopBorder = get2();
imgdata.makernotes.canon.BlackMaskRightBorder = get2();
imgdata.makernotes.canon.BlackMaskBottomBorder = get2();
}
else if (tag == 0x4013)
{
get4();
imgdata.makernotes.canon.AFMicroAdjMode = get4();
imgdata.makernotes.canon.AFMicroAdjValue = ((float)get4()) / ((float)get4());
}
else if (tag == 0x4001 && len > 500)
{
int c;
long int save1 = ftell(ifp);
switch (len)
{
case 582:
imgdata.makernotes.canon.CanonColorDataVer = 1; // 20D / 350D
{
fseek(ifp, save1 + (0x1e << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x41 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom1][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x46 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom2][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x23 << 1), SEEK_SET);
Canon_WBpresets(2, 2);
fseek(ifp, save1 + (0x4b << 1), SEEK_SET);
Canon_WBCTpresets(1); // ABCT
}
break;
case 653:
imgdata.makernotes.canon.CanonColorDataVer = 2; // 1Dmk2 / 1DsMK2
{
fseek(ifp, save1 + (0x18 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x90 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom1][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x95 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom2][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x9a << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom3][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x27 << 1), SEEK_SET);
Canon_WBpresets(2, 12);
fseek(ifp, save1 + (0xa4 << 1), SEEK_SET);
Canon_WBCTpresets(1); // ABCT
}
break;
case 796:
imgdata.makernotes.canon.CanonColorDataVer = 3; // 1DmkIIN / 5D / 30D / 400D
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek(ifp, save1 + (0x44 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x49 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Measured][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x71 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom1][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x76 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom2][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x7b << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom3][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x80 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x4e << 1), SEEK_SET);
Canon_WBpresets(2, 12);
fseek(ifp, save1 + (0x85 << 1), SEEK_SET);
Canon_WBCTpresets(0); // BCAT
fseek(ifp, save1 + (0x0c4 << 1), SEEK_SET); // offset 196 short
int bls = 0;
FORC4
bls += (imgdata.makernotes.canon.ChannelBlackLevel[c] = get2());
imgdata.makernotes.canon.AverageBlackLevel = bls / 4;
}
break;
// 1DmkIII / 1DSmkIII / 1DmkIV / 5DmkII
// 7D / 40D / 50D / 60D / 450D / 500D
// 550D / 1000D / 1100D
case 674:
case 692:
case 702:
case 1227:
case 1250:
case 1251:
case 1337:
case 1338:
case 1346:
imgdata.makernotes.canon.CanonColorDataVer = 4;
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek(ifp, save1 + (0x44 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x49 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Measured][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x53 << 1), SEEK_SET);
Canon_WBpresets(2, 12);
fseek(ifp, save1 + (0xa8 << 1), SEEK_SET);
Canon_WBCTpresets(0); // BCAT
fseek(ifp, save1 + (0x0e7 << 1), SEEK_SET); // offset 231 short
int bls = 0;
FORC4
bls += (imgdata.makernotes.canon.ChannelBlackLevel[c] = get2());
imgdata.makernotes.canon.AverageBlackLevel = bls / 4;
}
if ((imgdata.makernotes.canon.CanonColorDataSubVer == 4) || (imgdata.makernotes.canon.CanonColorDataSubVer == 5))
{
fseek(ifp, save1 + (0x2b8 << 1), SEEK_SET); // offset 696 shorts
imgdata.makernotes.canon.NormalWhiteLevel = get2();
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
else if ((imgdata.makernotes.canon.CanonColorDataSubVer == 6) ||
(imgdata.makernotes.canon.CanonColorDataSubVer == 7))
{
fseek(ifp, save1 + (0x2cf << 1), SEEK_SET); // offset 719 shorts
imgdata.makernotes.canon.NormalWhiteLevel = get2();
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
else if (imgdata.makernotes.canon.CanonColorDataSubVer == 9)
{
fseek(ifp, save1 + (0x2d3 << 1), SEEK_SET); // offset 723 shorts
imgdata.makernotes.canon.NormalWhiteLevel = get2();
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
break;
case 5120:
imgdata.makernotes.canon.CanonColorDataVer = 5; // PowerSot G10, G12, G5 X, G7 X, G9 X, EOS M3, EOS M5, EOS M6
{
if ((unique_id == 0x03970000) || // G7 X Mark II
(unique_id == 0x04100000) || // G9 X Mark II
(unique_id == 0x04180000) || // G1 X Mark III
(unique_id == 0x80000394) || // EOS M5
(unique_id == 0x80000398) || // EOS M100
(unique_id == 0x80000407)) // EOS M6
{
fseek(ifp, save1 + (0x4f << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c >> 1)] = get2();
fseek(ifp, 8, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Measured][c ^ (c >> 1)] = get2();
fseek(ifp, 8, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Other][c ^ (c >> 1)] = get2();
fseek(ifp, 8, SEEK_CUR);
Canon_WBpresets(8, 24);
fseek(ifp, 168, SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][c ^ (c >> 1)] = get2();
fseek(ifp, 24, SEEK_CUR);
Canon_WBCTpresets(2); // BCADT
fseek(ifp, 6, SEEK_CUR);
}
else
{
fseek(ifp, save1 + (0x4c << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c >> 1)] = get2();
get2();
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Measured][c ^ (c >> 1)] = get2();
get2();
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Other][c ^ (c >> 1)] = get2();
get2();
Canon_WBpresets(2, 12);
fseek(ifp, save1 + (0xba << 1), SEEK_SET);
Canon_WBCTpresets(2); // BCADT
fseek(ifp, save1 + (0x108 << 1), SEEK_SET); // offset 264 short
}
int bls = 0;
FORC4 bls += (imgdata.makernotes.canon.ChannelBlackLevel[c] = get2());
imgdata.makernotes.canon.AverageBlackLevel = bls / 4;
}
break;
case 1273:
case 1275:
imgdata.makernotes.canon.CanonColorDataVer = 6; // 600D / 1200D
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek(ifp, save1 + (0x44 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x49 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Measured][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x67 << 1), SEEK_SET);
Canon_WBpresets(2, 12);
fseek(ifp, save1 + (0xbc << 1), SEEK_SET);
Canon_WBCTpresets(0); // BCAT
fseek(ifp, save1 + (0x0fb << 1), SEEK_SET); // offset 251 short
int bls = 0;
FORC4
bls += (imgdata.makernotes.canon.ChannelBlackLevel[c] = get2());
imgdata.makernotes.canon.AverageBlackLevel = bls / 4;
}
fseek(ifp, save1 + (0x1e3 << 1), SEEK_SET); // offset 483 shorts
imgdata.makernotes.canon.NormalWhiteLevel = get2();
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
break;
// 1DX / 5DmkIII / 6D / 100D / 650D / 700D / EOS M / 7DmkII / 750D / 760D
case 1312:
case 1313:
case 1316:
case 1506:
imgdata.makernotes.canon.CanonColorDataVer = 7;
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek(ifp, save1 + (0x44 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x49 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Measured][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x80 << 1), SEEK_SET);
Canon_WBpresets(2, 12);
fseek(ifp, save1 + (0xd5 << 1), SEEK_SET);
Canon_WBCTpresets(0); // BCAT
fseek(ifp, save1 + (0x114 << 1), SEEK_SET); // offset 276 shorts
int bls = 0;
FORC4
bls += (imgdata.makernotes.canon.ChannelBlackLevel[c] = get2());
imgdata.makernotes.canon.AverageBlackLevel = bls / 4;
}
if (imgdata.makernotes.canon.CanonColorDataSubVer == 10)
{
fseek(ifp, save1 + (0x1fc << 1), SEEK_SET); // offset 508 shorts
imgdata.makernotes.canon.NormalWhiteLevel = get2();
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
else if (imgdata.makernotes.canon.CanonColorDataSubVer == 11)
{
fseek(ifp, save1 + (0x2dc << 1), SEEK_SET); // offset 732 shorts
imgdata.makernotes.canon.NormalWhiteLevel = get2();
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
break;
// 5DS / 5DS R / 80D / 1300D / 5D4 / 800D / 77D / 6D II / 200D
case 1560:
case 1592:
case 1353:
case 1602:
imgdata.makernotes.canon.CanonColorDataVer = 8;
imgdata.makernotes.canon.CanonColorDataSubVer = get2();
{
fseek(ifp, save1 + (0x44 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x49 << 1), SEEK_SET);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Measured][c ^ (c >> 1)] = get2();
fseek(ifp, save1 + (0x85 << 1), SEEK_SET);
Canon_WBpresets(2, 12);
fseek(ifp, save1 + (0x107 << 1), SEEK_SET);
Canon_WBCTpresets(0); // BCAT
fseek(ifp, save1 + (0x146 << 1), SEEK_SET); // offset 326 shorts
int bls = 0;
FORC4
bls += (imgdata.makernotes.canon.ChannelBlackLevel[c] = get2());
imgdata.makernotes.canon.AverageBlackLevel = bls / 4;
}
if (imgdata.makernotes.canon.CanonColorDataSubVer == 14) // 1300D
{
fseek(ifp, save1 + (0x230 << 1), SEEK_SET); // offset 560 shorts
imgdata.makernotes.canon.NormalWhiteLevel = get2();
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
else
{
fseek(ifp, save1 + (0x30e << 1), SEEK_SET); // offset 782 shorts
imgdata.makernotes.canon.NormalWhiteLevel = get2();
imgdata.makernotes.canon.SpecularWhiteLevel = get2();
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.canon.SpecularWhiteLevel;
}
break;
}
fseek(ifp, save1, SEEK_SET);
}
}
void CLASS setPentaxBodyFeatures(unsigned id)
{
imgdata.lens.makernotes.CamID = id;
switch (id)
{
case 0x12994:
case 0x12aa2:
case 0x12b1a:
case 0x12b60:
case 0x12b62:
case 0x12b7e:
case 0x12b80:
case 0x12b9c:
case 0x12b9d:
case 0x12ba2:
case 0x12c1e:
case 0x12c20:
case 0x12cd2:
case 0x12cd4:
case 0x12cfa:
case 0x12d72:
case 0x12d73:
case 0x12db8:
case 0x12dfe:
case 0x12e6c:
case 0x12e76:
case 0x12ef8:
case 0x12f52:
case 0x12f70:
case 0x12f71:
case 0x12fb6:
case 0x12fc0:
case 0x12fca:
case 0x1301a:
case 0x13024:
case 0x1309c:
case 0x13222:
case 0x1322c:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_K;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_K;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
break;
case 0x13092:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_K;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_K;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
break;
case 0x12e08:
case 0x13010:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_645;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_MF;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_645;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_MF;
break;
case 0x12ee4:
case 0x12f66:
case 0x12f7a:
case 0x1302e:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Pentax_Q;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Pentax_Q;
break;
default:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
return;
}
void CLASS PentaxISO(ushort c)
{
int code[] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 50, 100, 200, 400, 800, 1600, 3200, 258, 259, 260, 261,
262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278};
double value[] = {50, 64, 80, 100, 125, 160, 200, 250, 320, 400, 500, 640,
800, 1000, 1250, 1600, 2000, 2500, 3200, 4000, 5000, 6400, 8000, 10000,
12800, 16000, 20000, 25600, 32000, 40000, 51200, 64000, 80000, 102400, 128000, 160000,
204800, 258000, 325000, 409600, 516000, 650000, 819200, 50, 100, 200, 400, 800,
1600, 3200, 50, 70, 100, 140, 200, 280, 400, 560, 800, 1100,
1600, 2200, 3200, 4500, 6400, 9000, 12800, 18000, 25600, 36000, 51200};
#define numel (sizeof(code) / sizeof(code[0]))
int i;
for (i = 0; i < numel; i++)
{
if (code[i] == c)
{
iso_speed = value[i];
return;
}
}
if (i == numel)
iso_speed = 65535.0f;
}
#undef numel
void CLASS PentaxLensInfo(unsigned id, unsigned len) // tag 0x0207
{
ushort iLensData = 0;
uchar *table_buf;
table_buf = (uchar *)malloc(MAX(len, 128));
fread(table_buf, len, 1, ifp);
if ((id < 0x12b9c) || (((id == 0x12b9c) || // K100D
(id == 0x12b9d) || // K110D
(id == 0x12ba2)) && // K100D Super
((!table_buf[20] || (table_buf[20] == 0xff)))))
{
iLensData = 3;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID = (((unsigned)table_buf[0]) << 8) + table_buf[1];
}
else
switch (len)
{
case 90: // LensInfo3
iLensData = 13;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID = ((unsigned)((table_buf[1] & 0x0f) + table_buf[3]) << 8) + table_buf[4];
break;
case 91: // LensInfo4
iLensData = 12;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID = ((unsigned)((table_buf[1] & 0x0f) + table_buf[3]) << 8) + table_buf[4];
break;
case 80: // LensInfo5
case 128:
iLensData = 15;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID = ((unsigned)((table_buf[1] & 0x0f) + table_buf[4]) << 8) + table_buf[5];
break;
default:
if (id >= 0x12b9c) // LensInfo2
{
iLensData = 4;
if (imgdata.lens.makernotes.LensID == -1)
imgdata.lens.makernotes.LensID = ((unsigned)((table_buf[0] & 0x0f) + table_buf[2]) << 8) + table_buf[3];
}
}
if (iLensData)
{
if (table_buf[iLensData + 9] && (fabs(imgdata.lens.makernotes.CurFocal) < 0.1f))
imgdata.lens.makernotes.CurFocal =
10 * (table_buf[iLensData + 9] >> 2) * libraw_powf64l(4, (table_buf[iLensData + 9] & 0x03) - 2);
if (table_buf[iLensData + 10] & 0xf0)
imgdata.lens.makernotes.MaxAp4CurFocal =
libraw_powf64l(2.0f, (float)((table_buf[iLensData + 10] & 0xf0) >> 4) / 4.0f);
if (table_buf[iLensData + 10] & 0x0f)
imgdata.lens.makernotes.MinAp4CurFocal =
libraw_powf64l(2.0f, (float)((table_buf[iLensData + 10] & 0x0f) + 10) / 4.0f);
if (iLensData != 12)
{
switch (table_buf[iLensData] & 0x06)
{
case 0:
imgdata.lens.makernotes.MinAp4MinFocal = 22.0f;
break;
case 2:
imgdata.lens.makernotes.MinAp4MinFocal = 32.0f;
break;
case 4:
imgdata.lens.makernotes.MinAp4MinFocal = 45.0f;
break;
case 6:
imgdata.lens.makernotes.MinAp4MinFocal = 16.0f;
break;
}
if (table_buf[iLensData] & 0x70)
imgdata.lens.makernotes.LensFStops = ((float)(((table_buf[iLensData] & 0x70) >> 4) ^ 0x07)) / 2.0f + 5.0f;
imgdata.lens.makernotes.MinFocusDistance = (float)(table_buf[iLensData + 3] & 0xf8);
imgdata.lens.makernotes.FocusRangeIndex = (float)(table_buf[iLensData + 3] & 0x07);
if ((table_buf[iLensData + 14] > 1) && (fabs(imgdata.lens.makernotes.MaxAp4CurFocal) < 0.7f))
imgdata.lens.makernotes.MaxAp4CurFocal =
libraw_powf64l(2.0f, (float)((table_buf[iLensData + 14] & 0x7f) - 1) / 32.0f);
}
else if ((id != 0x12e76) && // K-5
(table_buf[iLensData + 15] > 1) && (fabs(imgdata.lens.makernotes.MaxAp4CurFocal) < 0.7f))
{
imgdata.lens.makernotes.MaxAp4CurFocal =
libraw_powf64l(2.0f, (float)((table_buf[iLensData + 15] & 0x7f) - 1) / 32.0f);
}
}
free(table_buf);
return;
}
void CLASS setPhaseOneFeatures(unsigned id)
{
ushort i;
static const struct
{
ushort id;
char t_model[32];
} p1_unique[] = {
// Phase One section:
{1, "Hasselblad V"},
{10, "PhaseOne/Mamiya"},
{12, "Contax 645"},
{16, "Hasselblad V"},
{17, "Hasselblad V"},
{18, "Contax 645"},
{19, "PhaseOne/Mamiya"},
{20, "Hasselblad V"},
{21, "Contax 645"},
{22, "PhaseOne/Mamiya"},
{23, "Hasselblad V"},
{24, "Hasselblad H"},
{25, "PhaseOne/Mamiya"},
{32, "Contax 645"},
{34, "Hasselblad V"},
{35, "Hasselblad V"},
{36, "Hasselblad H"},
{37, "Contax 645"},
{38, "PhaseOne/Mamiya"},
{39, "Hasselblad V"},
{40, "Hasselblad H"},
{41, "Contax 645"},
{42, "PhaseOne/Mamiya"},
{44, "Hasselblad V"},
{45, "Hasselblad H"},
{46, "Contax 645"},
{47, "PhaseOne/Mamiya"},
{48, "Hasselblad V"},
{49, "Hasselblad H"},
{50, "Contax 645"},
{51, "PhaseOne/Mamiya"},
{52, "Hasselblad V"},
{53, "Hasselblad H"},
{54, "Contax 645"},
{55, "PhaseOne/Mamiya"},
{67, "Hasselblad V"},
{68, "Hasselblad H"},
{69, "Contax 645"},
{70, "PhaseOne/Mamiya"},
{71, "Hasselblad V"},
{72, "Hasselblad H"},
{73, "Contax 645"},
{74, "PhaseOne/Mamiya"},
{76, "Hasselblad V"},
{77, "Hasselblad H"},
{78, "Contax 645"},
{79, "PhaseOne/Mamiya"},
{80, "Hasselblad V"},
{81, "Hasselblad H"},
{82, "Contax 645"},
{83, "PhaseOne/Mamiya"},
{84, "Hasselblad V"},
{85, "Hasselblad H"},
{86, "Contax 645"},
{87, "PhaseOne/Mamiya"},
{99, "Hasselblad V"},
{100, "Hasselblad H"},
{101, "Contax 645"},
{102, "PhaseOne/Mamiya"},
{103, "Hasselblad V"},
{104, "Hasselblad H"},
{105, "PhaseOne/Mamiya"},
{106, "Contax 645"},
{112, "Hasselblad V"},
{113, "Hasselblad H"},
{114, "Contax 645"},
{115, "PhaseOne/Mamiya"},
{131, "Hasselblad V"},
{132, "Hasselblad H"},
{133, "Contax 645"},
{134, "PhaseOne/Mamiya"},
{135, "Hasselblad V"},
{136, "Hasselblad H"},
{137, "Contax 645"},
{138, "PhaseOne/Mamiya"},
{140, "Hasselblad V"},
{141, "Hasselblad H"},
{142, "Contax 645"},
{143, "PhaseOne/Mamiya"},
{148, "Hasselblad V"},
{149, "Hasselblad H"},
{150, "Contax 645"},
{151, "PhaseOne/Mamiya"},
{160, "A-250"},
{161, "A-260"},
{162, "A-280"},
{167, "Hasselblad V"},
{168, "Hasselblad H"},
{169, "Contax 645"},
{170, "PhaseOne/Mamiya"},
{172, "Hasselblad V"},
{173, "Hasselblad H"},
{174, "Contax 645"},
{175, "PhaseOne/Mamiya"},
{176, "Hasselblad V"},
{177, "Hasselblad H"},
{178, "Contax 645"},
{179, "PhaseOne/Mamiya"},
{180, "Hasselblad V"},
{181, "Hasselblad H"},
{182, "Contax 645"},
{183, "PhaseOne/Mamiya"},
{208, "Hasselblad V"},
{211, "PhaseOne/Mamiya"},
{448, "Phase One 645AF"},
{457, "Phase One 645DF"},
{471, "Phase One 645DF+"},
{704, "Phase One iXA"},
{705, "Phase One iXA - R"},
{706, "Phase One iXU 150"},
{707, "Phase One iXU 150 - NIR"},
{708, "Phase One iXU 180"},
{721, "Phase One iXR"},
// Leaf section:
{333, "Mamiya"},
{329, "Universal"},
{330, "Hasselblad H1/H2"},
{332, "Contax"},
{336, "AFi"},
{327, "Mamiya"},
{324, "Universal"},
{325, "Hasselblad H1/H2"},
{326, "Contax"},
{335, "AFi"},
{340, "Mamiya"},
{337, "Universal"},
{338, "Hasselblad H1/H2"},
{339, "Contax"},
{323, "Mamiya"},
{320, "Universal"},
{322, "Hasselblad H1/H2"},
{321, "Contax"},
{334, "AFi"},
{369, "Universal"},
{370, "Mamiya"},
{371, "Hasselblad H1/H2"},
{372, "Contax"},
{373, "Afi"},
};
imgdata.lens.makernotes.CamID = id;
if (id && !imgdata.lens.makernotes.body[0])
{
for (i = 0; i < sizeof p1_unique / sizeof *p1_unique; i++)
if (id == p1_unique[i].id)
{
strcpy(imgdata.lens.makernotes.body, p1_unique[i].t_model);
}
}
return;
}
void CLASS parseFujiMakernotes(unsigned tag, unsigned type)
{
switch (tag)
{
case 0x1002:
imgdata.makernotes.fuji.WB_Preset = get2();
break;
case 0x1011:
imgdata.other.FlashEC = getreal(type);
break;
case 0x1020:
imgdata.makernotes.fuji.Macro = get2();
break;
case 0x1021:
imgdata.makernotes.fuji.FocusMode = get2();
break;
case 0x1022:
imgdata.makernotes.fuji.AFMode = get2();
break;
case 0x1023:
imgdata.makernotes.fuji.FocusPixel[0] = get2();
imgdata.makernotes.fuji.FocusPixel[1] = get2();
break;
case 0x1034:
imgdata.makernotes.fuji.ExrMode = get2();
break;
case 0x1050:
imgdata.makernotes.fuji.ShutterType = get2();
break;
case 0x1400:
imgdata.makernotes.fuji.FujiDynamicRange = get2();
break;
case 0x1401:
imgdata.makernotes.fuji.FujiFilmMode = get2();
break;
case 0x1402:
imgdata.makernotes.fuji.FujiDynamicRangeSetting = get2();
break;
case 0x1403:
imgdata.makernotes.fuji.FujiDevelopmentDynamicRange = get2();
break;
case 0x140b:
imgdata.makernotes.fuji.FujiAutoDynamicRange = get2();
break;
case 0x1404:
imgdata.lens.makernotes.MinFocal = getreal(type);
break;
case 0x1405:
imgdata.lens.makernotes.MaxFocal = getreal(type);
break;
case 0x1406:
imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type);
break;
case 0x1407:
imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type);
break;
case 0x1422:
imgdata.makernotes.fuji.ImageStabilization[0] = get2();
imgdata.makernotes.fuji.ImageStabilization[1] = get2();
imgdata.makernotes.fuji.ImageStabilization[2] = get2();
imgdata.shootinginfo.ImageStabilization =
(imgdata.makernotes.fuji.ImageStabilization[0] << 9) + imgdata.makernotes.fuji.ImageStabilization[1];
break;
case 0x1431:
imgdata.makernotes.fuji.Rating = get4();
break;
case 0x3820:
imgdata.makernotes.fuji.FrameRate = get2();
break;
case 0x3821:
imgdata.makernotes.fuji.FrameWidth = get2();
break;
case 0x3822:
imgdata.makernotes.fuji.FrameHeight = get2();
break;
}
return;
}
void CLASS setSonyBodyFeatures(unsigned id)
{
ushort idx;
static const struct
{
ushort scf[8];
/*
scf[0] camera id
scf[1] camera format
scf[2] camera mount: Minolta A, Sony E, fixed,
scf[3] camera type: DSLR, NEX, SLT, ILCE, ILCA, DSC
scf[4] lens mount
scf[5] tag 0x2010 group (0 if not used)
scf[6] offset of Sony ISO in 0x2010 table, 0xffff if not valid
scf[7] offset of ImageCount3 in 0x9050 table, 0xffff if not valid
*/
} SonyCamFeatures[] = {
{256, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{257, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{258, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{259, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{260, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{261, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{262, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{263, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{264, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{265, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{266, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{267, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{268, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{269, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{270, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{271, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{272, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{273, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{274, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{275, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{276, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{277, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{278, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_NEX, 0, 0, 0xffff, 0xffff},
{279, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_NEX, 0, 0, 0xffff, 0xffff},
{280, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_SLT, 0, 0, 0xffff, 0xffff},
{281, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_SLT, 0, 0, 0xffff, 0xffff},
{282, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{283, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_DSLR, 0, 0, 0xffff, 0xffff},
{284, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_NEX, 0, 0, 0xffff, 0xffff},
{285, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_SLT, 0, 0, 0xffff, 0xffff},
{286, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_SLT, 0, 2, 0x1218, 0x01bd},
{287, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_SLT, 0, 2, 0x1218, 0x01bd},
{288, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_NEX, 0, 1, 0x113e, 0x01bd},
{289, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_NEX, 0, 2, 0x1218, 0x01bd},
{290, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_NEX, 0, 2, 0x1218, 0x01bd},
{291, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_SLT, 0, 3, 0x11f4, 0x01bd},
{292, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_SLT, 0, 3, 0x11f4, 0x01bd},
{293, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_NEX, 0, 3, 0x11f4, 0x01bd},
{294, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_SLT, 0, 5, 0x1254, 0x01aa},
{295, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_NEX, 0, 5, 0x1254, 0x01aa},
{296, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_NEX, 0, 5, 0x1254, 0x01aa},
{297, LIBRAW_FORMAT_1INCH, LIBRAW_MOUNT_FixedLens, LIBRAW_SONY_DSC, LIBRAW_MOUNT_FixedLens, 5, 0x1254, 0xffff},
{298, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_FixedLens, LIBRAW_SONY_DSC, LIBRAW_MOUNT_FixedLens, 5, 0x1258, 0xffff},
{299, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_NEX, 0, 5, 0x1254, 0x01aa},
{300, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_NEX, 0, 5, 0x1254, 0x01aa},
{301, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{302, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 5, 0x1280, 0x01aa},
{303, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_SLT, 0, 5, 0x1280, 0x01aa},
{304, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{305, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_NEX, 0, 5, 0x1280, 0x01aa},
{306, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 7, 0x0344, 0xffff},
{307, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_NEX, 0, 5, 0x1254, 0x01aa},
{308, LIBRAW_FORMAT_1INCH, LIBRAW_MOUNT_FixedLens, LIBRAW_SONY_DSC, LIBRAW_MOUNT_FixedLens, 6, 0x113c, 0xffff},
{309, LIBRAW_FORMAT_1INCH, LIBRAW_MOUNT_FixedLens, LIBRAW_SONY_DSC, LIBRAW_MOUNT_FixedLens, 7, 0x0344, 0xffff},
{310, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_FixedLens, LIBRAW_SONY_DSC, LIBRAW_MOUNT_FixedLens, 5, 0x1258, 0xffff},
{311, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 7, 0x0344, 0xffff},
{312, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 7, 0x0344, 0xffff},
{313, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 7, 0x0344, 0x01aa},
{314, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{315, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{316, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{317, LIBRAW_FORMAT_1INCH, LIBRAW_MOUNT_FixedLens, LIBRAW_SONY_DSC, LIBRAW_MOUNT_FixedLens, 7, 0x0344, 0xffff},
{318, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 7, 0x0344, 0xffff},
{319, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_ILCA, 0, 7, 0x0344, 0x01a0},
{320, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{321, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{322, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{323, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{324, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{325, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{326, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{327, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{328, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{329, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{330, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{331, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{332, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{333, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{334, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{335, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{336, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{337, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{338, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{339, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 7, 0x0344, 0x01a0},
{340, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 7, 0x0344, 0xffff},
{341, LIBRAW_FORMAT_1INCH, LIBRAW_MOUNT_FixedLens, LIBRAW_SONY_DSC, LIBRAW_MOUNT_FixedLens, 8, 0x0346, 0xffff},
{342, LIBRAW_FORMAT_1INCH, LIBRAW_MOUNT_FixedLens, LIBRAW_SONY_DSC, LIBRAW_MOUNT_FixedLens, 8, 0x0346, 0xffff},
{343, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{344, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_FixedLens, LIBRAW_SONY_DSC, LIBRAW_MOUNT_FixedLens, 8, 0x0346, 0xffff},
{345, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{346, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 7, 0x0344, 0x01a0},
{347, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 8, 0x0346, 0x01cb},
{348, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{349, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{350, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 8, 0x0346, 0x01cb},
{351, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{352, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{353, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_ILCA, 0, 7, 0x0344, 0x01a0},
{354, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_Minolta_A, LIBRAW_SONY_ILCA, 0, 8, 0x0346, 0x01cd},
{355, LIBRAW_FORMAT_1INCH, LIBRAW_MOUNT_FixedLens, LIBRAW_SONY_DSC, LIBRAW_MOUNT_FixedLens, 8, 0x0346, 0xffff},
{356, LIBRAW_FORMAT_1INCH, LIBRAW_MOUNT_FixedLens, LIBRAW_SONY_DSC, LIBRAW_MOUNT_FixedLens, 8, 0x0346, 0xffff},
{357, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 8, 0x0346, 0x01cd},
{358, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 9, 0x0320, 0x019f},
{359, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{360, LIBRAW_FORMAT_APSC, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 8, 0x0346, 0x01cd},
{361, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{362, LIBRAW_FORMAT_FF, LIBRAW_MOUNT_Sony_E, LIBRAW_SONY_ILCE, 0, 9, 0x0320, 0x019f},
{363, 0, 0, 0, 0, 0, 0xffff, 0xffff},
{364, LIBRAW_FORMAT_1INCH, LIBRAW_MOUNT_FixedLens, LIBRAW_SONY_DSC, LIBRAW_MOUNT_FixedLens, 8, 0x0346, 0xffff},
{365, LIBRAW_FORMAT_1INCH, LIBRAW_MOUNT_FixedLens, LIBRAW_SONY_DSC, LIBRAW_MOUNT_FixedLens, 9, 0x0320, 0xffff},
};
imgdata.lens.makernotes.CamID = id;
if (id == 2)
{
imgdata.lens.makernotes.CameraMount = imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.makernotes.sony.SonyCameraType = LIBRAW_SONY_DSC;
imgdata.makernotes.sony.group2010 = 0;
imgdata.makernotes.sony.real_iso_offset = 0xffff;
imgdata.makernotes.sony.ImageCount3_offset = 0xffff;
return;
}
else
idx = id - 256;
if ((idx >= 0) && (idx < sizeof SonyCamFeatures / sizeof *SonyCamFeatures))
{
if (!SonyCamFeatures[idx].scf[2])
return;
imgdata.lens.makernotes.CameraFormat = SonyCamFeatures[idx].scf[1];
imgdata.lens.makernotes.CameraMount = SonyCamFeatures[idx].scf[2];
imgdata.makernotes.sony.SonyCameraType = SonyCamFeatures[idx].scf[3];
if (SonyCamFeatures[idx].scf[4])
imgdata.lens.makernotes.LensMount = SonyCamFeatures[idx].scf[4];
imgdata.makernotes.sony.group2010 = SonyCamFeatures[idx].scf[5];
imgdata.makernotes.sony.real_iso_offset = SonyCamFeatures[idx].scf[6];
imgdata.makernotes.sony.ImageCount3_offset = SonyCamFeatures[idx].scf[7];
}
char *sbstr = strstr(software, " v");
if (sbstr != NULL)
{
sbstr += 2;
imgdata.makernotes.sony.firmware = atof(sbstr);
if ((id == 306) || (id == 311))
{
if (imgdata.makernotes.sony.firmware < 1.2f)
imgdata.makernotes.sony.ImageCount3_offset = 0x01aa;
else
imgdata.makernotes.sony.ImageCount3_offset = 0x01c0;
}
else if (id == 312)
{
if (imgdata.makernotes.sony.firmware < 2.0f)
imgdata.makernotes.sony.ImageCount3_offset = 0x01aa;
else
imgdata.makernotes.sony.ImageCount3_offset = 0x01c0;
}
else if ((id == 318) || (id == 340))
{
if (imgdata.makernotes.sony.firmware < 1.2f)
imgdata.makernotes.sony.ImageCount3_offset = 0x01a0;
else
imgdata.makernotes.sony.ImageCount3_offset = 0x01b6;
}
}
}
void CLASS parseSonyLensType2(uchar a, uchar b)
{
ushort lid2;
lid2 = (((ushort)a) << 8) | ((ushort)b);
if (!lid2)
return;
if (lid2 < 0x100)
{
if ((imgdata.lens.makernotes.AdapterID != 0x4900) && (imgdata.lens.makernotes.AdapterID != 0xEF00))
{
imgdata.lens.makernotes.AdapterID = lid2;
switch (lid2)
{
case 1:
case 2:
case 3:
case 6:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A;
break;
case 44:
case 78:
case 239:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
break;
}
}
}
else
imgdata.lens.makernotes.LensID = lid2;
if ((lid2 >= 50481) && (lid2 < 50500))
{
strcpy(imgdata.lens.makernotes.Adapter, "MC-11");
imgdata.lens.makernotes.AdapterID = 0x4900;
}
return;
}
#define strnXcat(buf, string) strncat(buf, string, LIM(sizeof(buf) - strbuflen(buf) - 1, 0, sizeof(buf)))
void CLASS parseSonyLensFeatures(uchar a, uchar b)
{
ushort features;
features = (((ushort)a) << 8) | ((ushort)b);
if ((imgdata.lens.makernotes.LensMount == LIBRAW_MOUNT_Canon_EF) ||
(imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Sigma_X3F) || !features)
return;
imgdata.lens.makernotes.LensFeatures_pre[0] = 0;
imgdata.lens.makernotes.LensFeatures_suf[0] = 0;
if ((features & 0x0200) && (features & 0x0100))
strcpy(imgdata.lens.makernotes.LensFeatures_pre, "E");
else if (features & 0x0200)
strcpy(imgdata.lens.makernotes.LensFeatures_pre, "FE");
else if (features & 0x0100)
strcpy(imgdata.lens.makernotes.LensFeatures_pre, "DT");
if (!imgdata.lens.makernotes.LensFormat && !imgdata.lens.makernotes.LensMount)
{
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FF;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A;
if ((features & 0x0200) && (features & 0x0100))
{
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E;
}
else if (features & 0x0200)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E;
}
else if (features & 0x0100)
{
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_APSC;
}
}
if (features & 0x4000)
strnXcat(imgdata.lens.makernotes.LensFeatures_pre, " PZ");
if (features & 0x0008)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " G");
else if (features & 0x0004)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " ZA");
if ((features & 0x0020) && (features & 0x0040))
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " Macro");
else if (features & 0x0020)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " STF");
else if (features & 0x0040)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " Reflex");
else if (features & 0x0080)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " Fisheye");
if (features & 0x0001)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " SSM");
else if (features & 0x0002)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " SAM");
if (features & 0x8000)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " OSS");
if (features & 0x2000)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " LE");
if (features & 0x0800)
strnXcat(imgdata.lens.makernotes.LensFeatures_suf, " II");
if (imgdata.lens.makernotes.LensFeatures_suf[0] == ' ')
memmove(imgdata.lens.makernotes.LensFeatures_suf, imgdata.lens.makernotes.LensFeatures_suf + 1,
strbuflen(imgdata.lens.makernotes.LensFeatures_suf) - 1);
return;
}
#undef strnXcat
void CLASS process_Sony_0x0116(uchar *buf, ushort len, unsigned id)
{
short bufx;
if (((id == 257) || (id == 262) || (id == 269) || (id == 270)) && (len >= 2))
bufx = buf[1];
else if ((id >= 273) && (len >= 3))
bufx = buf[2];
else
return;
imgdata.other.BatteryTemperature = (float)(bufx - 32) / 1.8f;
}
void CLASS process_Sony_0x2010(uchar *buf, ushort len)
{
if ((!imgdata.makernotes.sony.group2010) || (imgdata.makernotes.sony.real_iso_offset == 0xffff) ||
(len < (imgdata.makernotes.sony.real_iso_offset + 2)))
return;
if (imgdata.other.real_ISO < 0.1f)
{
uchar s[2];
s[0] = SonySubstitution[buf[imgdata.makernotes.sony.real_iso_offset]];
s[1] = SonySubstitution[buf[imgdata.makernotes.sony.real_iso_offset + 1]];
imgdata.other.real_ISO = 100.0f * libraw_powf64l(2.0f, (16 - ((float)sget2(s)) / 256.0f));
}
}
void CLASS process_Sony_0x9050(uchar *buf, ushort len, unsigned id)
{
ushort lid;
uchar s[4];
int c;
if ((imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_Sony_E) &&
(imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens))
{
if (len < 2)
return;
if (buf[0])
imgdata.lens.makernotes.MaxAp4CurFocal =
my_roundf(libraw_powf64l(2.0f, ((float)SonySubstitution[buf[0]] / 8.0 - 1.06f) / 2.0f) * 10.0f) / 10.0f;
if (buf[1])
imgdata.lens.makernotes.MinAp4CurFocal =
my_roundf(libraw_powf64l(2.0f, ((float)SonySubstitution[buf[1]] / 8.0 - 1.06f) / 2.0f) * 10.0f) / 10.0f;
}
if (imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens)
{
if (len <= 0x106)
return;
if (buf[0x3d] | buf[0x3c])
{
lid = SonySubstitution[buf[0x3d]] << 8 | SonySubstitution[buf[0x3c]];
imgdata.lens.makernotes.CurAp = libraw_powf64l(2.0f, ((float)lid / 256.0f - 16.0f) / 2.0f);
}
if (buf[0x105] && (imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF) &&
(imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Sigma_X3F))
imgdata.lens.makernotes.LensMount = SonySubstitution[buf[0x105]];
if (buf[0x106])
imgdata.lens.makernotes.LensFormat = SonySubstitution[buf[0x106]];
}
if (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E)
{
if (len <= 0x108)
return;
parseSonyLensType2(SonySubstitution[buf[0x0108]], // LensType2 - Sony lens ids
SonySubstitution[buf[0x0107]]);
}
if (len <= 0x10a)
return;
if ((imgdata.lens.makernotes.LensID == -1) && (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Minolta_A) &&
(buf[0x010a] | buf[0x0109]))
{
imgdata.lens.makernotes.LensID = // LensType - Minolta/Sony lens ids
SonySubstitution[buf[0x010a]] << 8 | SonySubstitution[buf[0x0109]];
if ((imgdata.lens.makernotes.LensID > 0x4900) && (imgdata.lens.makernotes.LensID <= 0x5900))
{
imgdata.lens.makernotes.AdapterID = 0x4900;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sigma_X3F;
strcpy(imgdata.lens.makernotes.Adapter, "MC-11");
}
else if ((imgdata.lens.makernotes.LensID > 0xEF00) && (imgdata.lens.makernotes.LensID < 0xFFFF) &&
(imgdata.lens.makernotes.LensID != 0xFF00))
{
imgdata.lens.makernotes.AdapterID = 0xEF00;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
}
}
if ((id >= 286) && (id <= 293))
{
if (len <= 0x116)
return;
// "SLT-A65", "SLT-A77", "NEX-7", "NEX-VG20E",
// "SLT-A37", "SLT-A57", "NEX-F3", "Lunar"
parseSonyLensFeatures(SonySubstitution[buf[0x115]], SonySubstitution[buf[0x116]]);
}
else if (imgdata.lens.makernotes.CameraMount != LIBRAW_MOUNT_FixedLens)
{
if (len <= 0x117)
return;
parseSonyLensFeatures(SonySubstitution[buf[0x116]], SonySubstitution[buf[0x117]]);
}
if ((id == 347) || (id == 350) || (id == 354) || (id == 357) || (id == 358) || (id == 360) || (id == 362))
{
if (len <= 0x8d)
return;
unsigned long long b88 = SonySubstitution[buf[0x88]];
unsigned long long b89 = SonySubstitution[buf[0x89]];
unsigned long long b8a = SonySubstitution[buf[0x8a]];
unsigned long long b8b = SonySubstitution[buf[0x8b]];
unsigned long long b8c = SonySubstitution[buf[0x8c]];
unsigned long long b8d = SonySubstitution[buf[0x8d]];
sprintf(imgdata.shootinginfo.InternalBodySerial, "%06llx",
(b88 << 40) + (b89 << 32) + (b8a << 24) + (b8b << 16) + (b8c << 8) + b8d);
}
else if (imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Minolta_A)
{
if (len <= 0xf4)
return;
unsigned long long bf0 = SonySubstitution[buf[0xf0]];
unsigned long long bf1 = SonySubstitution[buf[0xf1]];
unsigned long long bf2 = SonySubstitution[buf[0xf2]];
unsigned long long bf3 = SonySubstitution[buf[0xf3]];
unsigned long long bf4 = SonySubstitution[buf[0xf4]];
sprintf(imgdata.shootinginfo.InternalBodySerial, "%05llx",
(bf0 << 32) + (bf1 << 24) + (bf2 << 16) + (bf3 << 8) + bf4);
}
else if ((imgdata.lens.makernotes.CameraMount == LIBRAW_MOUNT_Sony_E) && (id != 288) && (id != 289) && (id != 290))
{
if (len <= 0x7f)
return;
unsigned b7c = SonySubstitution[buf[0x7c]];
unsigned b7d = SonySubstitution[buf[0x7d]];
unsigned b7e = SonySubstitution[buf[0x7e]];
unsigned b7f = SonySubstitution[buf[0x7f]];
sprintf(imgdata.shootinginfo.InternalBodySerial, "%04x", (b7c << 24) + (b7d << 16) + (b7e << 8) + b7f);
}
if ((imgdata.makernotes.sony.ImageCount3_offset != 0xffff) &&
(len >= (imgdata.makernotes.sony.ImageCount3_offset + 4)))
{
FORC4 s[c] = SonySubstitution[buf[imgdata.makernotes.sony.ImageCount3_offset + c]];
imgdata.makernotes.sony.ImageCount3 = sget4(s);
}
if (id == 362)
{
for (c = 0; c < 6; c++)
{
imgdata.makernotes.sony.TimeStamp[c] = SonySubstitution[buf[0x0066 + c]];
}
}
return;
}
void CLASS process_Sony_0x9400(uchar *buf, ushort len, unsigned id)
{
uchar s[4];
int c;
short bufx = buf[0];
if (((bufx == 0x23) || (bufx == 0x24) || (bufx == 0x26)) && (len >= 0x1f))
{ // 0x9400 'c' version
if ((id == 358) || (id == 362) || (id == 365))
{
imgdata.makernotes.sony.ShotNumberSincePowerUp = SonySubstitution[buf[0x0a]];
}
else
{
FORC4 s[c] = SonySubstitution[buf[0x0a + c]];
imgdata.makernotes.sony.ShotNumberSincePowerUp = sget4(s);
}
imgdata.makernotes.sony.Sony0x9400_version = 0xc;
imgdata.makernotes.sony.Sony0x9400_ReleaseMode2 = SonySubstitution[buf[0x09]];
FORC4 s[c] = SonySubstitution[buf[0x12 + c]];
imgdata.makernotes.sony.Sony0x9400_SequenceImageNumber = sget4(s);
imgdata.makernotes.sony.Sony0x9400_SequenceLength1 = SonySubstitution[buf[0x16]]; // shots
FORC4 s[c] = SonySubstitution[buf[0x1a + c]];
imgdata.makernotes.sony.Sony0x9400_SequenceFileNumber = sget4(s);
imgdata.makernotes.sony.Sony0x9400_SequenceLength2 = SonySubstitution[buf[0x1e]]; // files
}
else if ((bufx == 0x0c) && (len >= 0x1f))
{ // 0x9400 'b' version
imgdata.makernotes.sony.Sony0x9400_version = 0xb;
FORC4 s[c] = SonySubstitution[buf[0x08 + c]];
imgdata.makernotes.sony.Sony0x9400_SequenceImageNumber = sget4(s);
FORC4 s[c] = SonySubstitution[buf[0x0c + c]];
imgdata.makernotes.sony.Sony0x9400_SequenceFileNumber = sget4(s);
imgdata.makernotes.sony.Sony0x9400_ReleaseMode2 = SonySubstitution[buf[0x10]];
imgdata.makernotes.sony.Sony0x9400_SequenceLength1 = SonySubstitution[buf[0x1e]];
}
else if ((bufx == 0x0a) && (len >= 0x23))
{ // 0x9400 'a' version
imgdata.makernotes.sony.Sony0x9400_version = 0xa;
FORC4 s[c] = SonySubstitution[buf[0x08 + c]];
imgdata.makernotes.sony.Sony0x9400_SequenceImageNumber = sget4(s);
FORC4 s[c] = SonySubstitution[buf[0x0c + c]];
imgdata.makernotes.sony.Sony0x9400_SequenceFileNumber = sget4(s);
imgdata.makernotes.sony.Sony0x9400_ReleaseMode2 = SonySubstitution[buf[0x10]];
imgdata.makernotes.sony.Sony0x9400_SequenceLength1 = SonySubstitution[buf[0x22]];
}
else
return;
}
void CLASS process_Sony_0x9402(uchar *buf, ushort len)
{
if ((imgdata.makernotes.sony.SonyCameraType == LIBRAW_SONY_SLT) ||
(imgdata.makernotes.sony.SonyCameraType == LIBRAW_SONY_ILCA))
return;
if (len < 5)
return;
short bufx = buf[0x00];
if ((bufx == 0x05) || (bufx == 0xff) || (buf[0x02] != 0xff))
return;
imgdata.other.AmbientTemperature = (float)((short)SonySubstitution[buf[0x04]]);
return;
}
void CLASS process_Sony_0x9403(uchar *buf, ushort len)
{
if (len < 6)
return;
short bufx = SonySubstitution[buf[4]];
if ((bufx == 0x00) || (bufx == 0x94))
return;
imgdata.other.SensorTemperature = (float)((short)SonySubstitution[buf[5]]);
return;
}
void CLASS process_Sony_0x9406(uchar *buf, ushort len)
{
if (len < 6)
return;
short bufx = buf[0];
if ((bufx != 0x01) && (bufx != 0x08) && (bufx != 0x1b))
return;
bufx = buf[2];
if ((bufx != 0x08) && (bufx != 0x1b))
return;
imgdata.other.BatteryTemperature = (float)(SonySubstitution[buf[5]] - 32) / 1.8f;
return;
}
void CLASS process_Sony_0x940c(uchar *buf, ushort len)
{
if ((imgdata.makernotes.sony.SonyCameraType != LIBRAW_SONY_ILCE) &&
(imgdata.makernotes.sony.SonyCameraType != LIBRAW_SONY_NEX))
return;
if (len <= 0x000a)
return;
ushort lid2;
if ((imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF) &&
(imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Sigma_X3F))
{
switch (SonySubstitution[buf[0x0008]])
{
case 1:
case 5:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A;
break;
case 4:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E;
break;
}
}
lid2 = (((ushort)SonySubstitution[buf[0x000a]]) << 8) | ((ushort)SonySubstitution[buf[0x0009]]);
if ((lid2 > 0) && (lid2 < 32784))
parseSonyLensType2(SonySubstitution[buf[0x000a]], // LensType2 - Sony lens ids
SonySubstitution[buf[0x0009]]);
return;
}
void CLASS process_Sony_0x940e(uchar *buf, ushort len, unsigned id)
{
if (((id == 286) || (id == 287) || (id == 294)) && (len >= 0x017e))
{
imgdata.makernotes.sony.AFMicroAdjValue = SonySubstitution[buf[0x017d]];
}
else if ((imgdata.makernotes.sony.SonyCameraType == LIBRAW_SONY_ILCA) && (len >= 0x0051))
{
imgdata.makernotes.sony.AFMicroAdjValue = SonySubstitution[buf[0x0050]];
}
else
return;
if (imgdata.makernotes.sony.AFMicroAdjValue != 0)
imgdata.makernotes.sony.AFMicroAdjOn = 1;
}
void CLASS parseSonyMakernotes(unsigned tag, unsigned type, unsigned len, unsigned dng_writer, uchar *&table_buf_0x0116,
ushort &table_buf_0x0116_len, uchar *&table_buf_0x2010, ushort &table_buf_0x2010_len,
uchar *&table_buf_0x9050, ushort &table_buf_0x9050_len, uchar *&table_buf_0x9400,
ushort &table_buf_0x9400_len, uchar *&table_buf_0x9402, ushort &table_buf_0x9402_len,
uchar *&table_buf_0x9403, ushort &table_buf_0x9403_len, uchar *&table_buf_0x9406,
ushort &table_buf_0x9406_len, uchar *&table_buf_0x940c, ushort &table_buf_0x940c_len,
uchar *&table_buf_0x940e, ushort &table_buf_0x940e_len)
{
ushort lid;
uchar *table_buf;
if (tag == 0xb001) // Sony ModelID
{
unique_id = get2();
setSonyBodyFeatures(unique_id);
if (table_buf_0x0116_len)
{
process_Sony_0x0116(table_buf_0x0116, table_buf_0x0116_len, unique_id);
free(table_buf_0x0116);
table_buf_0x0116_len = 0;
}
if (table_buf_0x2010_len)
{
process_Sony_0x2010(table_buf_0x2010, table_buf_0x2010_len);
free(table_buf_0x2010);
table_buf_0x2010_len = 0;
}
if (table_buf_0x9050_len)
{
process_Sony_0x9050(table_buf_0x9050, table_buf_0x9050_len, unique_id);
free(table_buf_0x9050);
table_buf_0x9050_len = 0;
}
if (table_buf_0x9400_len)
{
process_Sony_0x9400(table_buf_0x9400, table_buf_0x9400_len, unique_id);
free(table_buf_0x9400);
table_buf_0x9400_len = 0;
}
if (table_buf_0x9402_len)
{
process_Sony_0x9402(table_buf_0x9402, table_buf_0x9402_len);
free(table_buf_0x9402);
table_buf_0x9402_len = 0;
}
if (table_buf_0x9403_len)
{
process_Sony_0x9403(table_buf_0x9403, table_buf_0x9403_len);
free(table_buf_0x9403);
table_buf_0x9403_len = 0;
}
if (table_buf_0x9406_len)
{
process_Sony_0x9406(table_buf_0x9406, table_buf_0x9406_len);
free(table_buf_0x9406);
table_buf_0x9406_len = 0;
}
if (table_buf_0x940c_len)
{
process_Sony_0x940c(table_buf_0x940c, table_buf_0x940c_len);
free(table_buf_0x940c);
table_buf_0x940c_len = 0;
}
if (table_buf_0x940e_len)
{
process_Sony_0x940e(table_buf_0x940e, table_buf_0x940e_len, unique_id);
free(table_buf_0x940e);
table_buf_0x940e_len = 0;
}
}
else if ((tag == 0x0010) && // CameraInfo
strncasecmp(model, "DSLR-A100", 9) && strncasecmp(model, "NEX-5C", 6) && !strncasecmp(make, "SONY", 4) &&
((len == 368) || // a700
(len == 5478) || // a850, a900
(len == 5506) || // a200, a300, a350
(len == 6118) || // a230, a290, a330, a380, a390
// a450, a500, a550, a560, a580
// a33, a35, a55
// NEX3, NEX5, NEX5C, NEXC3, VG10E
(len == 15360)))
{
table_buf = (uchar *)malloc(len);
fread(table_buf, len, 1, ifp);
if (memcmp(table_buf, "\xff\xff\xff\xff\xff\xff\xff\xff", 8) &&
memcmp(table_buf, "\x00\x00\x00\x00\x00\x00\x00\x00", 8))
{
switch (len)
{
case 368:
case 5478:
// a700, a850, a900: CameraInfo
if ((!dng_writer) ||
(saneSonyCameraInfo(table_buf[0], table_buf[3], table_buf[2], table_buf[5], table_buf[4], table_buf[7])))
{
if (table_buf[0] | table_buf[3])
imgdata.lens.makernotes.MinFocal = bcd2dec(table_buf[0]) * 100 + bcd2dec(table_buf[3]);
if (table_buf[2] | table_buf[5])
imgdata.lens.makernotes.MaxFocal = bcd2dec(table_buf[2]) * 100 + bcd2dec(table_buf[5]);
if (table_buf[4])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[4]) / 10.0f;
if (table_buf[4])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[7]) / 10.0f;
parseSonyLensFeatures(table_buf[1], table_buf[6]);
if (len == 5478)
{
imgdata.makernotes.sony.AFMicroAdjValue = table_buf[304] - 20;
imgdata.makernotes.sony.AFMicroAdjOn = (((table_buf[305] & 0x80) == 0x80) ? 1 : 0);
imgdata.makernotes.sony.AFMicroAdjRegisteredLenses = table_buf[305] & 0x7f;
}
}
break;
default:
// CameraInfo2 & 3
if ((!dng_writer) ||
(saneSonyCameraInfo(table_buf[1], table_buf[2], table_buf[3], table_buf[4], table_buf[5], table_buf[6])))
{
if (table_buf[1] | table_buf[2])
imgdata.lens.makernotes.MinFocal = bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]);
if (table_buf[3] | table_buf[4])
imgdata.lens.makernotes.MaxFocal = bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]);
if (table_buf[5])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f;
if (table_buf[6])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f;
parseSonyLensFeatures(table_buf[0], table_buf[7]);
}
}
}
free(table_buf);
}
else if ((!dng_writer) && (tag == 0x0020) && // WBInfoA100, needs 0xb028 processing
!strncasecmp(model, "DSLR-A100", 9))
{
fseek(ifp, 0x49dc, SEEK_CUR);
stmread(imgdata.shootinginfo.InternalBodySerial, 12, ifp);
}
else if (tag == 0x0104)
{
imgdata.other.FlashEC = getreal(type);
}
else if (tag == 0x0105) // Teleconverter
{
imgdata.lens.makernotes.TeleconverterID = get2();
}
else if (tag == 0x0114 && len < 256000) // CameraSettings
{
table_buf = (uchar *)malloc(len);
fread(table_buf, len, 1, ifp);
switch (len)
{
case 280:
case 364:
case 332:
// CameraSettings and CameraSettings2 are big endian
if (table_buf[2] | table_buf[3])
{
lid = (((ushort)table_buf[2]) << 8) | ((ushort)table_buf[3]);
imgdata.lens.makernotes.CurAp = libraw_powf64l(2.0f, ((float)lid / 8.0f - 1.0f) / 2.0f);
}
break;
case 1536:
case 2048:
// CameraSettings3 are little endian
parseSonyLensType2(table_buf[1016], table_buf[1015]);
if (imgdata.lens.makernotes.LensMount != LIBRAW_MOUNT_Canon_EF)
{
switch (table_buf[153])
{
case 16:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Minolta_A;
break;
case 17:
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sony_E;
break;
}
}
break;
}
free(table_buf);
}
else if ((tag == 0x3000) && (len < 256000))
{
uchar *table_buf_0x3000;
table_buf_0x3000 = (uchar *)malloc(len);
fread(table_buf_0x3000, len, 1, ifp);
for (int i = 0; i < 20; i++)
imgdata.makernotes.sony.SonyDateTime[i] = table_buf_0x3000[6 + i];
}
else if (tag == 0x0116 && len < 256000)
{
table_buf_0x0116 = (uchar *)malloc(len);
table_buf_0x0116_len = len;
fread(table_buf_0x0116, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x0116(table_buf_0x0116, table_buf_0x0116_len, imgdata.lens.makernotes.CamID);
free(table_buf_0x0116);
table_buf_0x0116_len = 0;
}
}
else if (tag == 0x2010 && len < 256000)
{
table_buf_0x2010 = (uchar *)malloc(len);
table_buf_0x2010_len = len;
fread(table_buf_0x2010, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x2010(table_buf_0x2010, table_buf_0x2010_len);
free(table_buf_0x2010);
table_buf_0x2010_len = 0;
}
}
else if (tag == 0x201a)
{
imgdata.makernotes.sony.ElectronicFrontCurtainShutter = get4();
}
else if (tag == 0x201b)
{
uchar uc;
fread(&uc, 1, 1, ifp);
imgdata.shootinginfo.FocusMode = (short)uc;
}
else if (tag == 0x202c)
{
imgdata.makernotes.sony.MeteringMode2 = get2();
}
else if (tag == 0x9050 && len < 256000) // little endian
{
table_buf_0x9050 = (uchar *)malloc(len);
table_buf_0x9050_len = len;
fread(table_buf_0x9050, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x9050(table_buf_0x9050, table_buf_0x9050_len, imgdata.lens.makernotes.CamID);
free(table_buf_0x9050);
table_buf_0x9050_len = 0;
}
}
else if (tag == 0x9400 && len < 256000)
{
table_buf_0x9400 = (uchar *)malloc(len);
table_buf_0x9400_len = len;
fread(table_buf_0x9400, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x9400(table_buf_0x9400, table_buf_0x9400_len, unique_id);
free(table_buf_0x9400);
table_buf_0x9400_len = 0;
}
}
else if (tag == 0x9402 && len < 256000)
{
table_buf_0x9402 = (uchar *)malloc(len);
table_buf_0x9402_len = len;
fread(table_buf_0x9402, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x9402(table_buf_0x9402, table_buf_0x9402_len);
free(table_buf_0x9402);
table_buf_0x9402_len = 0;
}
}
else if (tag == 0x9403 && len < 256000)
{
table_buf_0x9403 = (uchar *)malloc(len);
table_buf_0x9403_len = len;
fread(table_buf_0x9403, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x9403(table_buf_0x9403, table_buf_0x9403_len);
free(table_buf_0x9403);
table_buf_0x9403_len = 0;
}
}
else if ((tag == 0x9405) && (len < 256000) && (len > 0x64))
{
uchar *table_buf_0x9405;
table_buf_0x9405 = (uchar *)malloc(len);
fread(table_buf_0x9405, len, 1, ifp);
uchar bufx = table_buf_0x9405[0x0];
if (imgdata.other.real_ISO < 0.1f)
{
if ((bufx == 0x25) || (bufx == 0x3a) || (bufx == 0x76) || (bufx == 0x7e) || (bufx == 0x8b) || (bufx == 0x9a) ||
(bufx == 0xb3) || (bufx == 0xe1))
{
uchar s[2];
s[0] = SonySubstitution[table_buf_0x9405[0x04]];
s[1] = SonySubstitution[table_buf_0x9405[0x05]];
imgdata.other.real_ISO = 100.0f * libraw_powf64l(2.0f, (16 - ((float)sget2(s)) / 256.0f));
}
}
free(table_buf_0x9405);
}
else if (tag == 0x9406 && len < 256000)
{
table_buf_0x9406 = (uchar *)malloc(len);
table_buf_0x9406_len = len;
fread(table_buf_0x9406, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x9406(table_buf_0x9406, table_buf_0x9406_len);
free(table_buf_0x9406);
table_buf_0x9406_len = 0;
}
}
else if (tag == 0x940c && len < 256000)
{
table_buf_0x940c = (uchar *)malloc(len);
table_buf_0x940c_len = len;
fread(table_buf_0x940c, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x940c(table_buf_0x940c, table_buf_0x940c_len);
free(table_buf_0x940c);
table_buf_0x940c_len = 0;
}
}
else if (tag == 0x940e && len < 256000)
{
table_buf_0x940e = (uchar *)malloc(len);
table_buf_0x940e_len = len;
fread(table_buf_0x940e, len, 1, ifp);
if (imgdata.lens.makernotes.CamID)
{
process_Sony_0x940e(table_buf_0x940e, table_buf_0x940e_len, imgdata.lens.makernotes.CamID);
free(table_buf_0x940e);
table_buf_0x940e_len = 0;
}
}
else if (((tag == 0xb027) || (tag == 0x010c)) && (imgdata.lens.makernotes.LensID == -1))
{
imgdata.lens.makernotes.LensID = get4();
if ((imgdata.lens.makernotes.LensID > 0x4900) && (imgdata.lens.makernotes.LensID <= 0x5900))
{
imgdata.lens.makernotes.AdapterID = 0x4900;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Sigma_X3F;
strcpy(imgdata.lens.makernotes.Adapter, "MC-11");
}
else if ((imgdata.lens.makernotes.LensID > 0xEF00) && (imgdata.lens.makernotes.LensID < 0xFFFF) &&
(imgdata.lens.makernotes.LensID != 0xFF00))
{
imgdata.lens.makernotes.AdapterID = 0xEF00;
imgdata.lens.makernotes.LensID -= imgdata.lens.makernotes.AdapterID;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Canon_EF;
}
if (tag == 0x010c)
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Minolta_A;
}
else if (tag == 0xb02a && len < 256000) // Sony LensSpec
{
table_buf = (uchar *)malloc(len);
fread(table_buf, len, 1, ifp);
if ((!dng_writer) ||
(saneSonyCameraInfo(table_buf[1], table_buf[2], table_buf[3], table_buf[4], table_buf[5], table_buf[6])))
{
if (table_buf[1] | table_buf[2])
imgdata.lens.makernotes.MinFocal = bcd2dec(table_buf[1]) * 100 + bcd2dec(table_buf[2]);
if (table_buf[3] | table_buf[4])
imgdata.lens.makernotes.MaxFocal = bcd2dec(table_buf[3]) * 100 + bcd2dec(table_buf[4]);
if (table_buf[5])
imgdata.lens.makernotes.MaxAp4MinFocal = bcd2dec(table_buf[5]) / 10.0f;
if (table_buf[6])
imgdata.lens.makernotes.MaxAp4MaxFocal = bcd2dec(table_buf[6]) / 10.0f;
parseSonyLensFeatures(table_buf[0], table_buf[7]);
}
free(table_buf);
}
else if ((tag == 0xb02b) && !imgdata.sizes.raw_crop.cwidth && (len == 2))
{
imgdata.sizes.raw_crop.cheight = get4();
imgdata.sizes.raw_crop.cwidth = get4();
}
}
void CLASS parse_makernote_0xc634(int base, int uptag, unsigned dng_writer)
{
unsigned ver97 = 0, offset = 0, entries, tag, type, len, save, c;
unsigned i;
uchar NikonKey, ci, cj, ck;
unsigned serial = 0;
unsigned custom_serial = 0;
unsigned NikonLensDataVersion = 0;
unsigned lenNikonLensData = 0;
unsigned NikonFlashInfoVersion = 0;
uchar *CanonCameraInfo;
unsigned lenCanonCameraInfo = 0;
unsigned typeCanonCameraInfo = 0;
uchar *table_buf;
uchar *table_buf_0x0116;
ushort table_buf_0x0116_len = 0;
uchar *table_buf_0x2010;
ushort table_buf_0x2010_len = 0;
uchar *table_buf_0x9050;
ushort table_buf_0x9050_len = 0;
uchar *table_buf_0x9400;
ushort table_buf_0x9400_len = 0;
uchar *table_buf_0x9402;
ushort table_buf_0x9402_len = 0;
uchar *table_buf_0x9403;
ushort table_buf_0x9403_len = 0;
uchar *table_buf_0x9406;
ushort table_buf_0x9406_len = 0;
uchar *table_buf_0x940c;
ushort table_buf_0x940c_len = 0;
uchar *table_buf_0x940e;
ushort table_buf_0x940e_len = 0;
short morder, sorder = order;
char buf[10];
INT64 fsize = ifp->size();
fread(buf, 1, 10, ifp);
/*
printf("===>>buf: 0x");
for (int i = 0; i < sizeof buf; i ++) {
printf("%02x", buf[i]);
}
putchar('\n');
*/
if (!strcmp(buf, "Nikon"))
{
base = ftell(ifp);
order = get2();
if (get2() != 42)
goto quit;
offset = get4();
fseek(ifp, offset - 8, SEEK_CUR);
}
else if (!strcmp(buf, "OLYMPUS") || !strcmp(buf, "PENTAX ") ||
(!strncmp(make, "SAMSUNG", 7) && (dng_writer == CameraDNG)))
{
base = ftell(ifp) - 10;
fseek(ifp, -2, SEEK_CUR);
order = get2();
if (buf[0] == 'O')
get2();
}
else if (!strncmp(buf, "SONY", 4) || !strcmp(buf, "Panasonic"))
{
goto nf;
}
else if (!strncmp(buf, "FUJIFILM", 8))
{
base = ftell(ifp) - 10;
nf:
order = 0x4949;
fseek(ifp, 2, SEEK_CUR);
}
else if (!strcmp(buf, "OLYMP") || !strcmp(buf, "LEICA") || !strcmp(buf, "Ricoh") || !strcmp(buf, "EPSON"))
fseek(ifp, -2, SEEK_CUR);
else if (!strcmp(buf, "AOC") || !strcmp(buf, "QVC"))
fseek(ifp, -4, SEEK_CUR);
else
{
fseek(ifp, -10, SEEK_CUR);
if ((!strncmp(make, "SAMSUNG", 7) && (dng_writer == AdobeDNG)))
base = ftell(ifp);
}
entries = get2();
if (entries > 1000)
return;
morder = order;
while (entries--)
{
order = morder;
tiff_get(base, &tag, &type, &len, &save);
INT64 pos = ifp->tell();
if (len > 8 && pos + len > 2 * fsize)
{
fseek(ifp, save, SEEK_SET); // Recover tiff-read position!!
continue;
}
tag |= uptag << 16;
if (len > 100 * 1024 * 1024)
goto next; // 100Mb tag? No!
if (!strncmp(make, "Canon", 5))
{
if (tag == 0x000d && len < 256000) // camera info
{
if (type != 4)
{
CanonCameraInfo = (uchar *)malloc(MAX(16, len));
fread(CanonCameraInfo, len, 1, ifp);
}
else
{
CanonCameraInfo = (uchar *)malloc(MAX(16, len * 4));
fread(CanonCameraInfo, len, 4, ifp);
}
lenCanonCameraInfo = len;
typeCanonCameraInfo = type;
}
else if (tag == 0x10) // Canon ModelID
{
unique_id = get4();
unique_id = setCanonBodyFeatures(unique_id);
if (lenCanonCameraInfo)
{
processCanonCameraInfo(unique_id, CanonCameraInfo, lenCanonCameraInfo, typeCanonCameraInfo);
free(CanonCameraInfo);
CanonCameraInfo = 0;
lenCanonCameraInfo = 0;
}
}
else
parseCanonMakernotes(tag, type, len);
}
else if (!strncmp(make, "FUJI", 4))
parseFujiMakernotes(tag, type);
else if (!strncasecmp(make, "LEICA", 5))
{
if ((tag == 0x0320) && (type == 9) && (len == 1) && !strncasecmp(make, "Leica Camera AG", 15) &&
!strncmp(buf, "LEICA", 5) && (buf[5] == 0) && (buf[6] == 0) && (buf[7] == 0))
imgdata.other.CameraTemperature = getreal(type);
if (tag == 0x34003402)
imgdata.other.CameraTemperature = getreal(type);
if (((tag == 0x035e) || (tag == 0x035f)) && (type == 10) && (len == 9))
{
int ind = tag == 0x035e ? 0 : 1;
for (int j = 0; j < 3; j++)
FORCC imgdata.color.dng_color[ind].forwardmatrix[j][c] = getreal(type);
imgdata.color.dng_color[ind].parsedfields |= LIBRAW_DNGFM_FORWARDMATRIX;
}
if ((tag == 0x0303) && (type != 4))
{
stmread(imgdata.lens.makernotes.Lens, len, ifp);
}
if ((tag == 0x3405) || (tag == 0x0310) || (tag == 0x34003405))
{
imgdata.lens.makernotes.LensID = get4();
imgdata.lens.makernotes.LensID =
((imgdata.lens.makernotes.LensID >> 2) << 8) | (imgdata.lens.makernotes.LensID & 0x3);
if (imgdata.lens.makernotes.LensID != -1)
{
if ((model[0] == 'M') || !strncasecmp(model, "LEICA M", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M;
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_M;
}
else if ((model[0] == 'S') || !strncasecmp(model, "LEICA S", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_S;
if (imgdata.lens.makernotes.Lens[0])
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_S;
}
}
}
else if (((tag == 0x0313) || (tag == 0x34003406)) && (fabs(imgdata.lens.makernotes.CurAp) < 0.17f) &&
((type == 10) || (type == 5)))
{
imgdata.lens.makernotes.CurAp = getreal(type);
if (imgdata.lens.makernotes.CurAp > 126.3)
imgdata.lens.makernotes.CurAp = 0.0f;
}
else if (tag == 0x3400)
{
parse_makernote(base, 0x3400);
}
}
else if (!strncmp(make, "NIKON", 5))
{
if (tag == 0x1d) // serial number
while ((c = fgetc(ifp)) && c != EOF)
{
if ((!custom_serial) && (!isdigit(c)))
{
if ((strbuflen(model) == 3) && (!strcmp(model, "D50")))
{
custom_serial = 34;
}
else
{
custom_serial = 96;
}
}
serial = serial * 10 + (isdigit(c) ? c - '0' : c % 10);
}
else if (tag == 0x000a)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
else if (tag == 0x0082) // lens attachment
{
stmread(imgdata.lens.makernotes.Attachment, len, ifp);
}
else if (tag == 0x0083) // lens type
{
imgdata.lens.nikon.NikonLensType = fgetc(ifp);
}
else if (tag == 0x0084) // lens
{
imgdata.lens.makernotes.MinFocal = getreal(type);
imgdata.lens.makernotes.MaxFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type);
}
else if (tag == 0x008b) // lens f-stops
{
uchar a, b, c;
a = fgetc(ifp);
b = fgetc(ifp);
c = fgetc(ifp);
if (c)
{
imgdata.lens.nikon.NikonLensFStops = a * b * (12 / c);
imgdata.lens.makernotes.LensFStops = (float)imgdata.lens.nikon.NikonLensFStops / 12.0f;
}
}
else if (tag == 0x0093)
{
imgdata.makernotes.nikon.NEFCompression = i = get2();
if ((i == 7) || (i == 9))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0097)
{
for (i = 0; i < 4; i++)
ver97 = ver97 * 10 + fgetc(ifp) - '0';
if (ver97 == 601) // Coolpix A
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0098) // contains lens data
{
for (i = 0; i < 4; i++)
{
NikonLensDataVersion = NikonLensDataVersion * 10 + fgetc(ifp) - '0';
}
switch (NikonLensDataVersion)
{
case 100:
lenNikonLensData = 9;
break;
case 101:
case 201: // encrypted, starting from v.201
case 202:
case 203:
lenNikonLensData = 15;
break;
case 204:
lenNikonLensData = 16;
break;
case 400:
lenNikonLensData = 459;
break;
case 401:
lenNikonLensData = 590;
break;
case 402:
lenNikonLensData = 509;
break;
case 403:
lenNikonLensData = 879;
break;
}
if (lenNikonLensData)
{
table_buf = (uchar *)malloc(lenNikonLensData);
fread(table_buf, lenNikonLensData, 1, ifp);
if ((NikonLensDataVersion < 201) && lenNikonLensData)
{
processNikonLensData(table_buf, lenNikonLensData);
free(table_buf);
lenNikonLensData = 0;
}
}
}
else if (tag == 0xa7) // shutter count
{
NikonKey = fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp);
if ((NikonLensDataVersion > 200) && lenNikonLensData)
{
if (custom_serial)
{
ci = xlat[0][custom_serial];
}
else
{
ci = xlat[0][serial & 0xff];
}
cj = xlat[1][NikonKey];
ck = 0x60;
for (i = 0; i < lenNikonLensData; i++)
table_buf[i] ^= (cj += ci * ck++);
processNikonLensData(table_buf, lenNikonLensData);
lenNikonLensData = 0;
free(table_buf);
}
}
else if (tag == 0x00a8) // contains flash data
{
for (i = 0; i < 4; i++)
{
NikonFlashInfoVersion = NikonFlashInfoVersion * 10 + fgetc(ifp) - '0';
}
}
else if (tag == 0x00b0)
{
get4(); // ME tag version, 4 symbols
imgdata.makernotes.nikon.ExposureMode = get4();
imgdata.makernotes.nikon.nMEshots = get4();
imgdata.makernotes.nikon.MEgainOn = get4();
}
else if (tag == 0x00b9)
{
uchar uc;
int8_t sc;
fread(&uc, 1, 1, ifp);
imgdata.makernotes.nikon.AFFineTune = uc;
fread(&uc, 1, 1, ifp);
imgdata.makernotes.nikon.AFFineTuneIndex = uc;
fread(&sc, 1, 1, ifp);
imgdata.makernotes.nikon.AFFineTuneAdj = sc;
}
else if (tag == 37 && (!iso_speed || iso_speed == 65535))
{
unsigned char cc;
fread(&cc, 1, 1, ifp);
iso_speed = (int)(100.0 * libraw_powf64l(2.0, (double)(cc) / 12.0 - 5.0));
break;
}
}
else if (!strncmp(make, "OLYMPUS", 7))
{
short nWB, tWB;
int SubDirOffsetValid = strncmp(model, "E-300", 5) && strncmp(model, "E-330", 5) && strncmp(model, "E-400", 5) &&
strncmp(model, "E-500", 5) && strncmp(model, "E-1", 3);
if ((tag == 0x2010) || (tag == 0x2020) || (tag == 0x2030) || (tag == 0x2031) || (tag == 0x2040) ||
(tag == 0x2050) || (tag == 0x3000))
{
fseek(ifp, save - 4, SEEK_SET);
fseek(ifp, base + get4(), SEEK_SET);
parse_makernote_0xc634(base, tag, dng_writer);
}
if (!SubDirOffsetValid && ((len > 4) || (((type == 3) || (type == 8)) && (len > 2)) ||
(((type == 4) || (type == 9)) && (len > 1)) || (type == 5) || (type > 9)))
goto skip_Oly_broken_tags;
if ((tag >= 0x20400101) && (tag <= 0x20400111))
{
if ((tag == 0x20400101) && (len == 2) && (!strncasecmp(model, "E-410", 5) || !strncasecmp(model, "E-510", 5)))
{
int i;
for (i = 0; i < 64; i++)
{
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = imgdata.color.WB_Coeffs[i][1] =
imgdata.color.WB_Coeffs[i][3] = 0x100;
}
for (i = 64; i < 256; i++)
{
imgdata.color.WB_Coeffs[i][1] = imgdata.color.WB_Coeffs[i][3] = 0x100;
}
}
nWB = tag - 0x20400101;
tWB = Oly_wb_list2[nWB << 1];
ushort CT = Oly_wb_list2[(nWB << 1) | 1];
int wb[4];
wb[0] = get2();
wb[2] = get2();
if (tWB != 0x100)
{
imgdata.color.WB_Coeffs[tWB][0] = wb[0];
imgdata.color.WB_Coeffs[tWB][2] = wb[2];
}
if (CT)
{
imgdata.color.WBCT_Coeffs[nWB - 1][0] = CT;
imgdata.color.WBCT_Coeffs[nWB - 1][1] = wb[0];
imgdata.color.WBCT_Coeffs[nWB - 1][3] = wb[2];
}
if (len == 4)
{
wb[1] = get2();
wb[3] = get2();
if (tWB != 0x100)
{
imgdata.color.WB_Coeffs[tWB][1] = wb[1];
imgdata.color.WB_Coeffs[tWB][3] = wb[3];
}
if (CT)
{
imgdata.color.WBCT_Coeffs[nWB - 1][2] = wb[1];
imgdata.color.WBCT_Coeffs[nWB - 1][4] = wb[3];
}
}
}
else if ((tag >= 0x20400112) && (tag <= 0x2040011e))
{
nWB = tag - 0x20400112;
int wbG = get2();
tWB = Oly_wb_list2[nWB << 1];
if (nWB)
imgdata.color.WBCT_Coeffs[nWB - 1][2] = imgdata.color.WBCT_Coeffs[nWB - 1][4] = wbG;
if (tWB != 0x100)
imgdata.color.WB_Coeffs[tWB][1] = imgdata.color.WB_Coeffs[tWB][3] = wbG;
}
else if (tag == 0x2040011f)
{
int wbG = get2();
if (imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][0])
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] = wbG;
FORC4 if (imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom1 + c][0])
imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom1 + c][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom1 + c][3] =
wbG;
}
else if (tag == 0x20400121)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][2] = get2();
if (len == 4)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] = get2();
}
}
else if ((tag == 0x30000110) && strcmp(software, "v757-71"))
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][2] = get2();
if (len == 2)
{
for (int i = 0; i < 256; i++)
imgdata.color.WB_Coeffs[i][1] = imgdata.color.WB_Coeffs[i][3] = 0x100;
}
}
else if ((((tag >= 0x30000120) && (tag <= 0x30000124)) || ((tag >= 0x30000130) && (tag <= 0x30000133))) &&
strcmp(software, "v757-71"))
{
int wb_ind;
if (tag <= 0x30000124)
wb_ind = tag - 0x30000120;
else
wb_ind = tag - 0x30000130 + 5;
imgdata.color.WB_Coeffs[Oly_wb_list1[wb_ind]][0] = get2();
imgdata.color.WB_Coeffs[Oly_wb_list1[wb_ind]][2] = get2();
}
else
{
switch (tag)
{
case 0x0207:
case 0x20100100:
{
uchar sOlyID[8];
fread(sOlyID, MIN(len, 7), 1, ifp);
sOlyID[7] = 0;
OlyID = sOlyID[0];
i = 1;
while (i < 7 && sOlyID[i])
{
OlyID = OlyID << 8 | sOlyID[i];
i++;
}
setOlympusBodyFeatures(OlyID);
}
break;
case 0x1002:
imgdata.lens.makernotes.CurAp = libraw_powf64l(2.0f, getreal(type) / 2);
break;
case 0x20100102:
stmread(imgdata.shootinginfo.InternalBodySerial, len, ifp);
break;
case 0x20100201:
imgdata.lens.makernotes.LensID = (unsigned long long)fgetc(ifp) << 16 |
(unsigned long long)(fgetc(ifp), fgetc(ifp)) << 8 |
(unsigned long long)fgetc(ifp);
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FT;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FT;
if (((imgdata.lens.makernotes.LensID < 0x20000) || (imgdata.lens.makernotes.LensID > 0x4ffff)) &&
(imgdata.lens.makernotes.LensID & 0x10))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_mFT;
}
break;
case 0x20100202:
if ((!imgdata.lens.LensSerial[0]))
stmread(imgdata.lens.LensSerial, len, ifp);
break;
case 0x20100203:
stmread(imgdata.lens.makernotes.Lens, len, ifp);
break;
case 0x20100205:
imgdata.lens.makernotes.MaxAp4MinFocal = libraw_powf64l(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100206:
imgdata.lens.makernotes.MaxAp4MaxFocal = libraw_powf64l(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100207:
imgdata.lens.makernotes.MinFocal = (float)get2();
break;
case 0x20100208:
imgdata.lens.makernotes.MaxFocal = (float)get2();
if (imgdata.lens.makernotes.MaxFocal > 1000.0f)
imgdata.lens.makernotes.MaxFocal = imgdata.lens.makernotes.MinFocal;
break;
case 0x2010020a:
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64l(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100301:
imgdata.lens.makernotes.TeleconverterID = fgetc(ifp) << 8;
fgetc(ifp);
imgdata.lens.makernotes.TeleconverterID = imgdata.lens.makernotes.TeleconverterID | fgetc(ifp);
break;
case 0x20100303:
stmread(imgdata.lens.makernotes.Teleconverter, len, ifp);
break;
case 0x20100403:
stmread(imgdata.lens.makernotes.Attachment, len, ifp);
break;
case 0x20200306:
{
uchar uc;
fread(&uc, 1, 1, ifp);
imgdata.makernotes.olympus.AFFineTune = uc;
}
break;
case 0x20200307:
FORC3 imgdata.makernotes.olympus.AFFineTuneAdj[c] = get2();
break;
case 0x20200401:
imgdata.other.FlashEC = getreal(type);
break;
case 0x1007:
imgdata.other.SensorTemperature = (float)get2();
break;
case 0x1008:
imgdata.other.LensTemperature = (float)get2();
break;
case 0x20401306:
{
int temp = get2();
if ((temp != 0) && (temp != 100))
{
if (temp < 61)
imgdata.other.CameraTemperature = (float)temp;
else
imgdata.other.CameraTemperature = (float)(temp - 32) / 1.8f;
if ((OlyID == 0x4434353933ULL) && // TG-5
(imgdata.other.exifAmbientTemperature > -273.15f))
imgdata.other.CameraTemperature += imgdata.other.exifAmbientTemperature;
}
}
break;
case 0x20501500:
if (OlyID != 0x0ULL)
{
short temp = get2();
if ((OlyID == 0x4434303430ULL) || // E-1
(OlyID == 0x5330303336ULL) || // E-M5
(len != 1))
imgdata.other.SensorTemperature = (float)temp;
else if ((temp != -32768) && (temp != 0))
{
if (temp > 199)
imgdata.other.SensorTemperature = 86.474958f - 0.120228f * (float)temp;
else
imgdata.other.SensorTemperature = (float)temp;
}
}
break;
}
}
skip_Oly_broken_tags:;
}
else if (!strncmp(make, "PENTAX", 6) || !strncmp(model, "PENTAX", 6) ||
(!strncmp(make, "SAMSUNG", 7) && (dng_writer == CameraDNG)))
{
if (tag == 0x0005)
{
unique_id = get4();
setPentaxBodyFeatures(unique_id);
}
else if (tag == 0x000d)
{
imgdata.makernotes.pentax.FocusMode = get2();
}
else if (tag == 0x000e)
{
imgdata.makernotes.pentax.AFPointSelected = get2();
}
else if (tag == 0x000f)
{
imgdata.makernotes.pentax.AFPointsInFocus = getint(type);
}
else if (tag == 0x0010)
{
imgdata.makernotes.pentax.FocusPosition = get2();
}
else if (tag == 0x0013)
{
imgdata.lens.makernotes.CurAp = (float)get2() / 10.0f;
}
else if (tag == 0x0014)
{
PentaxISO(get2());
}
else if (tag == 0x001d)
{
imgdata.lens.makernotes.CurFocal = (float)get4() / 100.0f;
}
else if (tag == 0x0034)
{
uchar uc;
FORC4
{
fread(&uc, 1, 1, ifp);
imgdata.makernotes.pentax.DriveMode[c] = uc;
}
}
else if (tag == 0x0038)
{
imgdata.sizes.raw_crop.cleft = get2();
imgdata.sizes.raw_crop.ctop = get2();
}
else if (tag == 0x0039)
{
imgdata.sizes.raw_crop.cwidth = get2();
imgdata.sizes.raw_crop.cheight = get2();
}
else if (tag == 0x003f)
{
imgdata.lens.makernotes.LensID = fgetc(ifp) << 8 | fgetc(ifp);
}
else if (tag == 0x0047)
{
imgdata.other.CameraTemperature = (float)fgetc(ifp);
}
else if (tag == 0x004d)
{
if (type == 9)
imgdata.other.FlashEC = getreal(type) / 256.0f;
else
imgdata.other.FlashEC = (float)((signed short)fgetc(ifp)) / 6.0f;
}
else if (tag == 0x0072)
{
imgdata.makernotes.pentax.AFAdjustment = get2();
}
else if (tag == 0x007e)
{
imgdata.color.linear_max[0] = imgdata.color.linear_max[1] = imgdata.color.linear_max[2] =
imgdata.color.linear_max[3] = (long)(-1) * get4();
}
else if (tag == 0x0207)
{
if (len < 65535) // Safety belt
PentaxLensInfo(imgdata.lens.makernotes.CamID, len);
}
else if ((tag >= 0x020d) && (tag <= 0x0214))
{
FORC4 imgdata.color.WB_Coeffs[Pentax_wb_list1[tag - 0x020d]][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0221)
{
int nWB = get2();
if (nWB <= sizeof(imgdata.color.WBCT_Coeffs) / sizeof(imgdata.color.WBCT_Coeffs[0]))
for (int i = 0; i < nWB; i++)
{
imgdata.color.WBCT_Coeffs[i][0] = (unsigned)0xcfc6 - get2();
fseek(ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][1] = get2();
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 0x2000;
imgdata.color.WBCT_Coeffs[i][3] = get2();
}
}
else if (tag == 0x0215)
{
fseek(ifp, 16, SEEK_CUR);
sprintf(imgdata.shootinginfo.InternalBodySerial, "%d", get4());
}
else if (tag == 0x0229)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0x022d)
{
int wb_ind;
getc(ifp);
for (int wb_cnt = 0; wb_cnt < nPentax_wb_list2; wb_cnt++)
{
wb_ind = getc(ifp);
if (wb_ind < nPentax_wb_list2)
FORC4 imgdata.color.WB_Coeffs[Pentax_wb_list2[wb_ind]][c ^ (c >> 1)] = get2();
}
}
else if (tag == 0x0239) // Q-series lens info (LensInfoQ)
{
char LensInfo[20];
fseek(ifp, 12, SEEK_CUR);
stread(imgdata.lens.makernotes.Lens, 30, ifp);
strcat(imgdata.lens.makernotes.Lens, " ");
stread(LensInfo, 20, ifp);
strcat(imgdata.lens.makernotes.Lens, LensInfo);
}
}
else if (!strncmp(make, "SAMSUNG", 7) && (dng_writer == AdobeDNG))
{
if (tag == 0x0002)
{
if (get4() == 0x2000)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (!strncmp(model, "NX mini", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX_M;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0003)
{
imgdata.lens.makernotes.CamID = unique_id = get4();
}
else if (tag == 0x0043)
{
int temp = get4();
if (temp)
{
imgdata.other.CameraTemperature = (float)temp;
if (get4() == 10)
imgdata.other.CameraTemperature /= 10.0f;
}
}
else if (tag == 0xa003)
{
imgdata.lens.makernotes.LensID = get2();
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (tag == 0xa005)
{
stmread(imgdata.lens.InternalLensSerial, len, ifp);
}
else if (tag == 0xa019)
{
imgdata.lens.makernotes.CurAp = getreal(type);
}
else if (tag == 0xa01a)
{
imgdata.lens.makernotes.FocalLengthIn35mmFormat = get4() / 10.0f;
if (imgdata.lens.makernotes.FocalLengthIn35mmFormat < 10.0f)
imgdata.lens.makernotes.FocalLengthIn35mmFormat *= 10.0f;
}
}
else if (!strncasecmp(make, "SONY", 4) || !strncasecmp(make, "Konica", 6) || !strncasecmp(make, "Minolta", 7) ||
(!strncasecmp(make, "Hasselblad", 10) &&
(!strncasecmp(model, "Stellar", 7) || !strncasecmp(model, "Lunar", 5) ||
!strncasecmp(model, "Lusso", 5) || !strncasecmp(model, "HV", 2))))
{
parseSonyMakernotes(tag, type, len, AdobeDNG, table_buf_0x0116, table_buf_0x0116_len, table_buf_0x2010,
table_buf_0x2010_len, table_buf_0x9050, table_buf_0x9050_len, table_buf_0x9400,
table_buf_0x9400_len, table_buf_0x9402, table_buf_0x9402_len, table_buf_0x9403,
table_buf_0x9403_len, table_buf_0x9406, table_buf_0x9406_len, table_buf_0x940c,
table_buf_0x940c_len, table_buf_0x940e, table_buf_0x940e_len);
}
next:
fseek(ifp, save, SEEK_SET);
}
quit:
order = sorder;
}
#else
void CLASS parse_makernote_0xc634(int base, int uptag, unsigned dng_writer)
{ /*placeholder */
}
#endif
void CLASS parse_makernote(int base, int uptag)
{
unsigned offset = 0, entries, tag, type, len, save, c;
unsigned ver97 = 0, serial = 0, i, wbi = 0, wb[4] = {0, 0, 0, 0};
uchar buf97[324], ci, cj, ck;
short morder, sorder = order;
char buf[10];
unsigned SamsungKey[11];
uchar NikonKey;
#ifdef LIBRAW_LIBRARY_BUILD
unsigned custom_serial = 0;
unsigned NikonLensDataVersion = 0;
unsigned lenNikonLensData = 0;
unsigned NikonFlashInfoVersion = 0;
uchar *CanonCameraInfo;
unsigned lenCanonCameraInfo = 0;
unsigned typeCanonCameraInfo = 0;
uchar *table_buf;
uchar *table_buf_0x0116;
ushort table_buf_0x0116_len = 0;
uchar *table_buf_0x2010;
ushort table_buf_0x2010_len = 0;
uchar *table_buf_0x9050;
ushort table_buf_0x9050_len = 0;
uchar *table_buf_0x9400;
ushort table_buf_0x9400_len = 0;
uchar *table_buf_0x9402;
ushort table_buf_0x9402_len = 0;
uchar *table_buf_0x9403;
ushort table_buf_0x9403_len = 0;
uchar *table_buf_0x9406;
ushort table_buf_0x9406_len = 0;
uchar *table_buf_0x940c;
ushort table_buf_0x940c_len = 0;
uchar *table_buf_0x940e;
ushort table_buf_0x940e_len = 0;
INT64 fsize = ifp->size();
#endif
/*
The MakerNote might have its own TIFF header (possibly with
its own byte-order!), or it might just be a table.
*/
if (!strncmp(make, "Nokia", 5))
return;
fread(buf, 1, 10, ifp);
/*
printf("===>>buf: 0x");
for (int i = 0; i < sizeof buf; i ++) {
printf("%02x", buf[i]);
}
putchar('\n');
*/
if (!strncmp(buf, "KDK", 3) || /* these aren't TIFF tables */
!strncmp(buf, "VER", 3) || !strncmp(buf, "IIII", 4) || !strncmp(buf, "MMMM", 4))
return;
if (!strncmp(buf, "KC", 2) || /* Konica KD-400Z, KD-510Z */
!strncmp(buf, "MLY", 3))
{ /* Minolta DiMAGE G series */
order = 0x4d4d;
while ((i = ftell(ifp)) < data_offset && i < 16384)
{
wb[0] = wb[2];
wb[2] = wb[1];
wb[1] = wb[3];
wb[3] = get2();
if (wb[1] == 256 && wb[3] == 256 && wb[0] > 256 && wb[0] < 640 && wb[2] > 256 && wb[2] < 640)
FORC4 cam_mul[c] = wb[c];
}
goto quit;
}
if (!strcmp(buf, "Nikon"))
{
base = ftell(ifp);
order = get2();
if (get2() != 42)
goto quit;
offset = get4();
fseek(ifp, offset - 8, SEEK_CUR);
}
else if (!strcmp(buf, "OLYMPUS") || !strcmp(buf, "PENTAX "))
{
base = ftell(ifp) - 10;
fseek(ifp, -2, SEEK_CUR);
order = get2();
if (buf[0] == 'O')
get2();
}
else if (!strncmp(buf, "SONY", 4) || !strcmp(buf, "Panasonic"))
{
goto nf;
}
else if (!strncmp(buf, "FUJIFILM", 8))
{
base = ftell(ifp) - 10;
nf:
order = 0x4949;
fseek(ifp, 2, SEEK_CUR);
}
else if (!strcmp(buf, "OLYMP") || !strcmp(buf, "LEICA") || !strcmp(buf, "Ricoh") || !strcmp(buf, "EPSON"))
fseek(ifp, -2, SEEK_CUR);
else if (!strcmp(buf, "AOC") || !strcmp(buf, "QVC"))
fseek(ifp, -4, SEEK_CUR);
else
{
fseek(ifp, -10, SEEK_CUR);
if (!strncmp(make, "SAMSUNG", 7))
base = ftell(ifp);
}
// adjust pos & base for Leica M8/M9/M Mono tags and dir in tag 0x3400
if (!strncasecmp(make, "LEICA", 5))
{
if (!strncmp(model, "M8", 2) || !strncasecmp(model, "Leica M8", 8) || !strncasecmp(model, "LEICA X", 7))
{
base = ftell(ifp) - 8;
}
else if (!strncasecmp(model, "LEICA M (Typ 240)", 17))
{
base = 0;
}
else if (!strncmp(model, "M9", 2) || !strncasecmp(model, "Leica M9", 8) || !strncasecmp(model, "M Monochrom", 11) ||
!strncasecmp(model, "Leica M Monochrom", 11))
{
if (!uptag)
{
base = ftell(ifp) - 10;
fseek(ifp, 8, SEEK_CUR);
}
else if (uptag == 0x3400)
{
fseek(ifp, 10, SEEK_CUR);
base += 10;
}
}
else if (!strncasecmp(model, "LEICA T", 7))
{
base = ftell(ifp) - 8;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_T;
#endif
}
#ifdef LIBRAW_LIBRARY_BUILD
else if (!strncasecmp(model, "LEICA SL", 8))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_SL;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_FF;
}
#endif
}
entries = get2();
if (entries > 1000)
return;
morder = order;
while (entries--)
{
order = morder;
tiff_get(base, &tag, &type, &len, &save);
tag |= uptag << 16;
#ifdef LIBRAW_LIBRARY_BUILD
INT64 _pos = ftell(ifp);
if (len > 8 && _pos + len > 2 * fsize)
{
fseek(ifp, save, SEEK_SET); // Recover tiff-read position!!
continue;
}
if (!strncasecmp(model, "KODAK P880", 10) || !strncasecmp(model, "KODAK P850", 10) ||
!strncasecmp(model, "KODAK P712", 10))
{
if (tag == 0xf90b)
{
imgdata.makernotes.kodak.clipBlack = get2();
}
else if (tag == 0xf90c)
{
imgdata.makernotes.kodak.clipWhite = get2();
}
}
if (!strncmp(make, "Canon", 5))
{
if (tag == 0x000d && len < 256000) // camera info
{
if (type != 4)
{
CanonCameraInfo = (uchar *)malloc(MAX(16, len));
fread(CanonCameraInfo, len, 1, ifp);
}
else
{
CanonCameraInfo = (uchar *)malloc(MAX(16, len * 4));
fread(CanonCameraInfo, len, 4, ifp);
}
lenCanonCameraInfo = len;
typeCanonCameraInfo = type;
}
else if (tag == 0x10) // Canon ModelID
{
unique_id = get4();
unique_id = setCanonBodyFeatures(unique_id);
if (lenCanonCameraInfo)
{
processCanonCameraInfo(unique_id, CanonCameraInfo, lenCanonCameraInfo, typeCanonCameraInfo);
free(CanonCameraInfo);
CanonCameraInfo = 0;
lenCanonCameraInfo = 0;
}
}
else
parseCanonMakernotes(tag, type, len);
}
else if (!strncmp(make, "FUJI", 4))
{
if (tag == 0x0010)
{
char FujiSerial[sizeof(imgdata.shootinginfo.InternalBodySerial)];
char *words[4];
char yy[2], mm[3], dd[3], ystr[16], ynum[16];
int year, nwords, ynum_len;
unsigned c;
stmread(FujiSerial, len, ifp);
nwords = getwords(FujiSerial, words, 4, sizeof(imgdata.shootinginfo.InternalBodySerial));
for (int i = 0; i < nwords; i++)
{
mm[2] = dd[2] = 0;
if (strnlen(words[i], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) < 18)
if (i == 0)
strncpy(imgdata.shootinginfo.InternalBodySerial, words[0],
sizeof(imgdata.shootinginfo.InternalBodySerial) - 1);
else
{
char tbuf[sizeof(imgdata.shootinginfo.InternalBodySerial)];
snprintf(tbuf, sizeof(tbuf), "%s %s", imgdata.shootinginfo.InternalBodySerial, words[i]);
strncpy(imgdata.shootinginfo.InternalBodySerial, tbuf,
sizeof(imgdata.shootinginfo.InternalBodySerial) - 1);
}
else
{
strncpy(dd, words[i] + strnlen(words[i], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) - 14, 2);
strncpy(mm, words[i] + strnlen(words[i], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) - 16, 2);
strncpy(yy, words[i] + strnlen(words[i], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) - 18, 2);
year = (yy[0] - '0') * 10 + (yy[1] - '0');
if (year < 70)
year += 2000;
else
year += 1900;
ynum_len = (int)strnlen(words[i], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) - 18;
strncpy(ynum, words[i], ynum_len);
ynum[ynum_len] = 0;
for (int j = 0; ynum[j] && ynum[j + 1] && sscanf(ynum + j, "%2x", &c); j += 2)
ystr[j / 2] = c;
ystr[ynum_len / 2 + 1] = 0;
strcpy(model2, ystr);
if (i == 0)
{
char tbuf[sizeof(imgdata.shootinginfo.InternalBodySerial)];
if (nwords == 1)
snprintf(tbuf, sizeof(tbuf), "%s %s %d:%s:%s",
words[0] + strnlen(words[0], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) - 12, ystr,
year, mm, dd);
else
snprintf(tbuf, sizeof(tbuf), "%s %d:%s:%s %s", ystr, year, mm, dd,
words[0] + strnlen(words[0], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) - 12);
strncpy(imgdata.shootinginfo.InternalBodySerial, tbuf,
sizeof(imgdata.shootinginfo.InternalBodySerial) - 1);
}
else
{
char tbuf[sizeof(imgdata.shootinginfo.InternalBodySerial)];
snprintf(tbuf, sizeof(tbuf), "%s %s %d:%s:%s %s", imgdata.shootinginfo.InternalBodySerial, ystr, year, mm,
dd, words[i] + strnlen(words[i], sizeof(imgdata.shootinginfo.InternalBodySerial) - 1) - 12);
strncpy(imgdata.shootinginfo.InternalBodySerial, tbuf,
sizeof(imgdata.shootinginfo.InternalBodySerial) - 1);
}
}
}
}
else
parseFujiMakernotes(tag, type);
}
else if (!strncasecmp(model, "Hasselblad X1D", 14) || !strncasecmp(model, "Hasselblad H6D", 14) ||
!strncasecmp(model, "Hasselblad A6D", 14))
{
if (tag == 0x0045)
{
imgdata.makernotes.hasselblad.BaseISO = get4();
}
else if (tag == 0x0046)
{
imgdata.makernotes.hasselblad.Gain = getreal(type);
}
}
else if (!strncasecmp(make, "LEICA", 5))
{
if (((tag == 0x035e) || (tag == 0x035f)) && (type == 10) && (len == 9))
{
int ind = tag == 0x035e ? 0 : 1;
for (int j = 0; j < 3; j++)
FORCC imgdata.color.dng_color[ind].forwardmatrix[j][c] = getreal(type);
imgdata.color.dng_color[ind].parsedfields |= LIBRAW_DNGFM_FORWARDMATRIX;
}
if (tag == 0x34003402)
imgdata.other.CameraTemperature = getreal(type);
if ((tag == 0x0320) && (type == 9) && (len == 1) && !strncasecmp(make, "Leica Camera AG", 15) &&
!strncmp(buf, "LEICA", 5) && (buf[5] == 0) && (buf[6] == 0) && (buf[7] == 0))
imgdata.other.CameraTemperature = getreal(type);
if ((tag == 0x0303) && (type != 4))
{
stmread(imgdata.lens.makernotes.Lens, len, ifp);
}
if ((tag == 0x3405) || (tag == 0x0310) || (tag == 0x34003405))
{
imgdata.lens.makernotes.LensID = get4();
imgdata.lens.makernotes.LensID =
((imgdata.lens.makernotes.LensID >> 2) << 8) | (imgdata.lens.makernotes.LensID & 0x3);
if (imgdata.lens.makernotes.LensID != -1)
{
if ((model[0] == 'M') || !strncasecmp(model, "LEICA M", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M;
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_M;
}
else if ((model[0] == 'S') || !strncasecmp(model, "LEICA S", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_S;
if (imgdata.lens.makernotes.Lens[0])
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Leica_S;
}
}
}
else if (((tag == 0x0313) || (tag == 0x34003406)) && (fabs(imgdata.lens.makernotes.CurAp) < 0.17f) &&
((type == 10) || (type == 5)))
{
imgdata.lens.makernotes.CurAp = getreal(type);
if (imgdata.lens.makernotes.CurAp > 126.3)
imgdata.lens.makernotes.CurAp = 0.0f;
}
else if (tag == 0x3400)
{
parse_makernote(base, 0x3400);
}
}
else if (!strncmp(make, "NIKON", 5))
{
if (tag == 0x000a)
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
else if (tag == 0x0012)
{
char a, b, c;
a = fgetc(ifp);
b = fgetc(ifp);
c = fgetc(ifp);
if (c)
imgdata.other.FlashEC = (float)(a * b) / (float)c;
}
else if (tag == 0x003b) // all 1s for regular exposures
{
imgdata.makernotes.nikon.ME_WB[0] = getreal(type);
imgdata.makernotes.nikon.ME_WB[2] = getreal(type);
imgdata.makernotes.nikon.ME_WB[1] = getreal(type);
imgdata.makernotes.nikon.ME_WB[3] = getreal(type);
}
else if (tag == 0x0045)
{
imgdata.sizes.raw_crop.cleft = get2();
imgdata.sizes.raw_crop.ctop = get2();
imgdata.sizes.raw_crop.cwidth = get2();
imgdata.sizes.raw_crop.cheight = get2();
}
else if (tag == 0x0082) // lens attachment
{
stmread(imgdata.lens.makernotes.Attachment, len, ifp);
}
else if (tag == 0x0083) // lens type
{
imgdata.lens.nikon.NikonLensType = fgetc(ifp);
}
else if (tag == 0x0084) // lens
{
imgdata.lens.makernotes.MinFocal = getreal(type);
imgdata.lens.makernotes.MaxFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MinFocal = getreal(type);
imgdata.lens.makernotes.MaxAp4MaxFocal = getreal(type);
}
else if (tag == 0x008b) // lens f-stops
{
uchar a, b, c;
a = fgetc(ifp);
b = fgetc(ifp);
c = fgetc(ifp);
if (c)
{
imgdata.lens.nikon.NikonLensFStops = a * b * (12 / c);
imgdata.lens.makernotes.LensFStops = (float)imgdata.lens.nikon.NikonLensFStops / 12.0f;
}
}
else if (tag == 0x0093) // Nikon compression
{
imgdata.makernotes.nikon.NEFCompression = i = get2();
if ((i == 7) || (i == 9))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0098) // contains lens data
{
for (i = 0; i < 4; i++)
{
NikonLensDataVersion = NikonLensDataVersion * 10 + fgetc(ifp) - '0';
}
switch (NikonLensDataVersion)
{
case 100:
lenNikonLensData = 9;
break;
case 101:
case 201: // encrypted, starting from v.201
case 202:
case 203:
lenNikonLensData = 15;
break;
case 204:
lenNikonLensData = 16;
break;
case 400:
lenNikonLensData = 459;
break;
case 401:
lenNikonLensData = 590;
break;
case 402:
lenNikonLensData = 509;
break;
case 403:
lenNikonLensData = 879;
break;
}
if (lenNikonLensData > 0)
{
table_buf = (uchar *)malloc(lenNikonLensData);
fread(table_buf, lenNikonLensData, 1, ifp);
if ((NikonLensDataVersion < 201) && lenNikonLensData)
{
processNikonLensData(table_buf, lenNikonLensData);
free(table_buf);
lenNikonLensData = 0;
}
}
}
else if (tag == 0x00a0)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0x00a8) // contains flash data
{
for (i = 0; i < 4; i++)
{
NikonFlashInfoVersion = NikonFlashInfoVersion * 10 + fgetc(ifp) - '0';
}
}
else if (tag == 0x00b0)
{
get4(); // ME tag version, 4 symbols
imgdata.makernotes.nikon.ExposureMode = get4();
imgdata.makernotes.nikon.nMEshots = get4();
imgdata.makernotes.nikon.MEgainOn = get4();
}
else if (tag == 0x00b9)
{
uchar uc;
int8_t sc;
fread(&uc, 1, 1, ifp);
imgdata.makernotes.nikon.AFFineTune = uc;
fread(&uc, 1, 1, ifp);
imgdata.makernotes.nikon.AFFineTuneIndex = uc;
fread(&sc, 1, 1, ifp);
imgdata.makernotes.nikon.AFFineTuneAdj = sc;
}
}
else if (!strncmp(make, "OLYMPUS", 7))
{
switch (tag)
{
case 0x0404:
case 0x101a:
case 0x20100101:
if (!imgdata.shootinginfo.BodySerial[0])
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
break;
case 0x20100102:
if (!imgdata.shootinginfo.InternalBodySerial[0])
stmread(imgdata.shootinginfo.InternalBodySerial, len, ifp);
break;
case 0x0207:
case 0x20100100:
{
uchar sOlyID[8];
fread(sOlyID, MIN(len, 7), 1, ifp);
sOlyID[7] = 0;
OlyID = sOlyID[0];
i = 1;
while (i < 7 && sOlyID[i])
{
OlyID = OlyID << 8 | sOlyID[i];
i++;
}
setOlympusBodyFeatures(OlyID);
}
break;
case 0x1002:
imgdata.lens.makernotes.CurAp = libraw_powf64l(2.0f, getreal(type) / 2);
break;
case 0x20400612:
case 0x30000612:
imgdata.sizes.raw_crop.cleft = get2();
break;
case 0x20400613:
case 0x30000613:
imgdata.sizes.raw_crop.ctop = get2();
break;
case 0x20400614:
case 0x30000614:
imgdata.sizes.raw_crop.cwidth = get2();
break;
case 0x20400615:
case 0x30000615:
imgdata.sizes.raw_crop.cheight = get2();
break;
case 0x20401112:
imgdata.makernotes.olympus.OlympusCropID = get2();
break;
case 0x20401113:
FORC4 imgdata.makernotes.olympus.OlympusFrame[c] = get2();
break;
case 0x20100201:
{
unsigned long long oly_lensid[3];
oly_lensid[0] = fgetc(ifp);
fgetc(ifp);
oly_lensid[1] = fgetc(ifp);
oly_lensid[2] = fgetc(ifp);
imgdata.lens.makernotes.LensID = (oly_lensid[0] << 16) | (oly_lensid[1] << 8) | oly_lensid[2];
}
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FT;
imgdata.lens.makernotes.LensFormat = LIBRAW_FORMAT_FT;
if (((imgdata.lens.makernotes.LensID < 0x20000) || (imgdata.lens.makernotes.LensID > 0x4ffff)) &&
(imgdata.lens.makernotes.LensID & 0x10))
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_mFT;
}
break;
case 0x20100202:
stmread(imgdata.lens.LensSerial, len, ifp);
break;
case 0x20100203:
stmread(imgdata.lens.makernotes.Lens, len, ifp);
break;
case 0x20100205:
imgdata.lens.makernotes.MaxAp4MinFocal = libraw_powf64l(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100206:
imgdata.lens.makernotes.MaxAp4MaxFocal = libraw_powf64l(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100207:
imgdata.lens.makernotes.MinFocal = (float)get2();
break;
case 0x20100208:
imgdata.lens.makernotes.MaxFocal = (float)get2();
if (imgdata.lens.makernotes.MaxFocal > 1000.0f)
imgdata.lens.makernotes.MaxFocal = imgdata.lens.makernotes.MinFocal;
break;
case 0x2010020a:
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64l(sqrt(2.0f), get2() / 256.0f);
break;
case 0x20100301:
imgdata.lens.makernotes.TeleconverterID = fgetc(ifp) << 8;
fgetc(ifp);
imgdata.lens.makernotes.TeleconverterID = imgdata.lens.makernotes.TeleconverterID | fgetc(ifp);
break;
case 0x20100303:
stmread(imgdata.lens.makernotes.Teleconverter, len, ifp);
break;
case 0x20100403:
stmread(imgdata.lens.makernotes.Attachment, len, ifp);
break;
case 0x1007:
imgdata.other.SensorTemperature = (float)get2();
break;
case 0x1008:
imgdata.other.LensTemperature = (float)get2();
break;
case 0x20401306:
{
int temp = get2();
if ((temp != 0) && (temp != 100))
{
if (temp < 61)
imgdata.other.CameraTemperature = (float)temp;
else
imgdata.other.CameraTemperature = (float)(temp - 32) / 1.8f;
if ((OlyID == 0x4434353933ULL) && // TG-5
(imgdata.other.exifAmbientTemperature > -273.15f))
imgdata.other.CameraTemperature += imgdata.other.exifAmbientTemperature;
}
}
break;
case 0x20501500:
if (OlyID != 0x0ULL)
{
short temp = get2();
if ((OlyID == 0x4434303430ULL) || // E-1
(OlyID == 0x5330303336ULL) || // E-M5
(len != 1))
imgdata.other.SensorTemperature = (float)temp;
else if ((temp != -32768) && (temp != 0))
{
if (temp > 199)
imgdata.other.SensorTemperature = 86.474958f - 0.120228f * (float)temp;
else
imgdata.other.SensorTemperature = (float)temp;
}
}
break;
}
}
else if ((!strncmp(make, "PENTAX", 6) || !strncmp(make, "RICOH", 5)) && !strncmp(model, "GR", 2))
{
if (tag == 0x0005)
{
char buffer[17];
int count = 0;
fread(buffer, 16, 1, ifp);
buffer[16] = 0;
for (int i = 0; i < 16; i++)
{
// sprintf(imgdata.shootinginfo.InternalBodySerial+2*i, "%02x", buffer[i]);
if ((isspace(buffer[i])) || (buffer[i] == 0x2D) || (isalnum(buffer[i])))
count++;
}
if (count == 16)
{
sprintf(imgdata.shootinginfo.BodySerial, "%8s", buffer + 8);
buffer[8] = 0;
sprintf(imgdata.shootinginfo.InternalBodySerial, "%8s", buffer);
}
else
{
sprintf(imgdata.shootinginfo.BodySerial, "%02x%02x%02x%02x", buffer[4], buffer[5], buffer[6], buffer[7]);
sprintf(imgdata.shootinginfo.InternalBodySerial, "%02x%02x%02x%02x", buffer[8], buffer[9], buffer[10],
buffer[11]);
}
}
else if ((tag == 0x1001) && (type == 3))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.LensID = -1;
imgdata.lens.makernotes.FocalType = 1;
}
else if ((tag == 0x100b) && (type == 10))
{
imgdata.other.FlashEC = getreal(type);
}
else if ((tag == 0x1017) && (get2() == 2))
{
strcpy(imgdata.lens.makernotes.Attachment, "Wide-Angle Adapter");
}
else if (tag == 0x1500)
{
imgdata.lens.makernotes.CurFocal = getreal(type);
}
}
else if (!strncmp(make, "RICOH", 5) && strncmp(model, "PENTAX", 6))
{
if ((tag == 0x0005) && !strncmp(model, "GXR", 3))
{
char buffer[9];
buffer[8] = 0;
fread(buffer, 8, 1, ifp);
sprintf(imgdata.shootinginfo.InternalBodySerial, "%8s", buffer);
}
else if ((tag == 0x100b) && (type == 10))
{
imgdata.other.FlashEC = getreal(type);
}
else if ((tag == 0x1017) && (get2() == 2))
{
strcpy(imgdata.lens.makernotes.Attachment, "Wide-Angle Adapter");
}
else if (tag == 0x1500)
{
imgdata.lens.makernotes.CurFocal = getreal(type);
}
else if ((tag == 0x2001) && !strncmp(model, "GXR", 3))
{
short ntags, cur_tag;
fseek(ifp, 20, SEEK_CUR);
ntags = get2();
cur_tag = get2();
while (cur_tag != 0x002c)
{
fseek(ifp, 10, SEEK_CUR);
cur_tag = get2();
}
fseek(ifp, 6, SEEK_CUR);
fseek(ifp, get4() + 20, SEEK_SET);
stread(imgdata.shootinginfo.BodySerial, 12, ifp);
get2();
imgdata.lens.makernotes.LensID = getc(ifp) - '0';
switch (imgdata.lens.makernotes.LensID)
{
case 1:
case 2:
case 3:
case 5:
case 6:
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_RicohModule;
break;
case 8:
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Leica_M;
imgdata.lens.makernotes.CameraFormat = LIBRAW_FORMAT_APSC;
imgdata.lens.makernotes.LensID = -1;
break;
default:
imgdata.lens.makernotes.LensID = -1;
}
fseek(ifp, 17, SEEK_CUR);
stread(imgdata.lens.LensSerial, 12, ifp);
}
}
else if ((!strncmp(make, "PENTAX", 6) || !strncmp(model, "PENTAX", 6) ||
(!strncmp(make, "SAMSUNG", 7) && dng_version)) &&
strncmp(model, "GR", 2))
{
if (tag == 0x0005)
{
unique_id = get4();
setPentaxBodyFeatures(unique_id);
}
else if (tag == 0x000d)
{
imgdata.makernotes.pentax.FocusMode = get2();
}
else if (tag == 0x000e)
{
imgdata.makernotes.pentax.AFPointSelected = get2();
}
else if (tag == 0x000f)
{
imgdata.makernotes.pentax.AFPointsInFocus = getint(type);
}
else if (tag == 0x0010)
{
imgdata.makernotes.pentax.FocusPosition = get2();
}
else if (tag == 0x0013)
{
imgdata.lens.makernotes.CurAp = (float)get2() / 10.0f;
}
else if (tag == 0x0014)
{
PentaxISO(get2());
}
else if (tag == 0x001d)
{
imgdata.lens.makernotes.CurFocal = (float)get4() / 100.0f;
}
else if (tag == 0x0034)
{
uchar uc;
FORC4
{
fread(&uc, 1, 1, ifp);
imgdata.makernotes.pentax.DriveMode[c] = uc;
}
}
else if (tag == 0x0038)
{
imgdata.sizes.raw_crop.cleft = get2();
imgdata.sizes.raw_crop.ctop = get2();
}
else if (tag == 0x0039)
{
imgdata.sizes.raw_crop.cwidth = get2();
imgdata.sizes.raw_crop.cheight = get2();
}
else if (tag == 0x003f)
{
imgdata.lens.makernotes.LensID = fgetc(ifp) << 8 | fgetc(ifp);
}
else if (tag == 0x0047)
{
imgdata.other.CameraTemperature = (float)fgetc(ifp);
}
else if (tag == 0x004d)
{
if (type == 9)
imgdata.other.FlashEC = getreal(type) / 256.0f;
else
imgdata.other.FlashEC = (float)((signed short)fgetc(ifp)) / 6.0f;
}
else if (tag == 0x0072)
{
imgdata.makernotes.pentax.AFAdjustment = get2();
}
else if (tag == 0x007e)
{
imgdata.color.linear_max[0] = imgdata.color.linear_max[1] = imgdata.color.linear_max[2] =
imgdata.color.linear_max[3] = (long)(-1) * get4();
}
else if (tag == 0x0207)
{
if (len < 65535) // Safety belt
PentaxLensInfo(imgdata.lens.makernotes.CamID, len);
}
else if ((tag >= 0x020d) && (tag <= 0x0214))
{
FORC4 imgdata.color.WB_Coeffs[Pentax_wb_list1[tag - 0x020d]][c ^ (c >> 1)] = get2();
}
else if (tag == 0x0221)
{
int nWB = get2();
if (nWB <= sizeof(imgdata.color.WBCT_Coeffs) / sizeof(imgdata.color.WBCT_Coeffs[0]))
for (int i = 0; i < nWB; i++)
{
imgdata.color.WBCT_Coeffs[i][0] = (unsigned)0xcfc6 - get2();
fseek(ifp, 2, SEEK_CUR);
imgdata.color.WBCT_Coeffs[i][1] = get2();
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = 0x2000;
imgdata.color.WBCT_Coeffs[i][3] = get2();
}
}
else if (tag == 0x0215)
{
fseek(ifp, 16, SEEK_CUR);
sprintf(imgdata.shootinginfo.InternalBodySerial, "%d", get4());
}
else if (tag == 0x0229)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0x022d)
{
int wb_ind;
getc(ifp);
for (int wb_cnt = 0; wb_cnt < nPentax_wb_list2; wb_cnt++)
{
wb_ind = getc(ifp);
if (wb_ind < nPentax_wb_list2)
FORC4 imgdata.color.WB_Coeffs[Pentax_wb_list2[wb_ind]][c ^ (c >> 1)] = get2();
}
}
else if (tag == 0x0239) // Q-series lens info (LensInfoQ)
{
char LensInfo[20];
fseek(ifp, 2, SEEK_CUR);
stread(imgdata.lens.makernotes.Lens, 30, ifp);
strcat(imgdata.lens.makernotes.Lens, " ");
stread(LensInfo, 20, ifp);
strcat(imgdata.lens.makernotes.Lens, LensInfo);
}
}
else if (!strncmp(make, "SAMSUNG", 7))
{
if (tag == 0x0002)
{
if (get4() == 0x2000)
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (!strncmp(model, "NX mini", 7))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Samsung_NX_M;
}
else
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
}
else if (tag == 0x0003)
{
unique_id = imgdata.lens.makernotes.CamID = get4();
}
else if (tag == 0x0043)
{
int temp = get4();
if (temp)
{
imgdata.other.CameraTemperature = (float)temp;
if (get4() == 10)
imgdata.other.CameraTemperature /= 10.0f;
}
}
else if (tag == 0xa002)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
}
else if (tag == 0xa003)
{
imgdata.lens.makernotes.LensID = get2();
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Samsung_NX;
}
else if (tag == 0xa005)
{
stmread(imgdata.lens.InternalLensSerial, len, ifp);
}
else if (tag == 0xa019)
{
imgdata.lens.makernotes.CurAp = getreal(type);
}
else if (tag == 0xa01a)
{
imgdata.lens.makernotes.FocalLengthIn35mmFormat = get4() / 10.0f;
if (imgdata.lens.makernotes.FocalLengthIn35mmFormat < 10.0f)
imgdata.lens.makernotes.FocalLengthIn35mmFormat *= 10.0f;
}
}
else if (!strncasecmp(make, "SONY", 4) || !strncasecmp(make, "Konica", 6) || !strncasecmp(make, "Minolta", 7) ||
(!strncasecmp(make, "Hasselblad", 10) &&
(!strncasecmp(model, "Stellar", 7) || !strncasecmp(model, "Lunar", 5) ||
!strncasecmp(model, "Lusso", 5) || !strncasecmp(model, "HV", 2))))
{
parseSonyMakernotes(tag, type, len, nonDNG, table_buf_0x0116, table_buf_0x0116_len, table_buf_0x2010,
table_buf_0x2010_len, table_buf_0x9050, table_buf_0x9050_len, table_buf_0x9400,
table_buf_0x9400_len, table_buf_0x9402, table_buf_0x9402_len, table_buf_0x9403,
table_buf_0x9403_len, table_buf_0x9406, table_buf_0x9406_len, table_buf_0x940c,
table_buf_0x940c_len, table_buf_0x940e, table_buf_0x940e_len);
}
fseek(ifp, _pos, SEEK_SET);
#endif
if (tag == 2 && strstr(make, "NIKON") && !iso_speed)
iso_speed = (get2(), get2());
if (tag == 37 && strstr(make, "NIKON") && (!iso_speed || iso_speed == 65535))
{
unsigned char cc;
fread(&cc, 1, 1, ifp);
iso_speed = int(100.0 * libraw_powf64l(2.0f, float(cc) / 12.0 - 5.0));
}
if (tag == 4 && len > 26 && len < 35)
{
if ((i = (get4(), get2())) != 0x7fff && (!iso_speed || iso_speed == 65535))
iso_speed = 50 * libraw_powf64l(2.0, i / 32.0 - 4);
#ifdef LIBRAW_LIBRARY_BUILD
get4();
#else
if ((i = (get2(), get2())) != 0x7fff && !aperture)
aperture = libraw_powf64l(2.0, i / 64.0);
#endif
if ((i = get2()) != 0xffff && !shutter)
shutter = libraw_powf64l(2.0, (short)i / -32.0);
wbi = (get2(), get2());
shot_order = (get2(), get2());
}
if ((tag == 4 || tag == 0x114) && !strncmp(make, "KONICA", 6))
{
fseek(ifp, tag == 4 ? 140 : 160, SEEK_CUR);
switch (get2())
{
case 72:
flip = 0;
break;
case 76:
flip = 6;
break;
case 82:
flip = 5;
break;
}
}
if (tag == 7 && type == 2 && len > 20)
fgets(model2, 64, ifp);
if (tag == 8 && type == 4)
shot_order = get4();
if (tag == 9 && !strncmp(make, "Canon", 5))
fread(artist, 64, 1, ifp);
if (tag == 0xc && len == 4)
FORC3 cam_mul[(c << 1 | c >> 1) & 3] = getreal(type);
if (tag == 0xd && type == 7 && get2() == 0xaaaa)
{
#if 0 /* Canon rotation data is handled by EXIF.Orientation */
for (c = i = 2; (ushort)c != 0xbbbb && i < len; i++)
c = c << 8 | fgetc(ifp);
while ((i += 4) < len - 5)
if (get4() == 257 && (i = len) && (c = (get4(), fgetc(ifp))) < 3)
flip = "065"[c] - '0';
#endif
}
#ifndef LIBRAW_LIBRARY_BUILD
if (tag == 0x10 && type == 4)
unique_id = get4();
#endif
#ifdef LIBRAW_LIBRARY_BUILD
INT64 _pos2 = ftell(ifp);
if (!strncasecmp(make, "Olympus", 7))
{
short nWB, tWB;
if ((tag == 0x20300108) || (tag == 0x20310109))
imgdata.makernotes.olympus.ColorSpace = get2();
if ((tag == 0x20400101) && (len == 2) && (!strncasecmp(model, "E-410", 5) || !strncasecmp(model, "E-510", 5)))
{
int i;
for (i = 0; i < 64; i++)
imgdata.color.WBCT_Coeffs[i][2] = imgdata.color.WBCT_Coeffs[i][4] = imgdata.color.WB_Coeffs[i][1] =
imgdata.color.WB_Coeffs[i][3] = 0x100;
for (i = 64; i < 256; i++)
imgdata.color.WB_Coeffs[i][1] = imgdata.color.WB_Coeffs[i][3] = 0x100;
}
if ((tag >= 0x20400101) && (tag <= 0x20400111))
{
nWB = tag - 0x20400101;
tWB = Oly_wb_list2[nWB << 1];
ushort CT = Oly_wb_list2[(nWB << 1) | 1];
int wb[4];
wb[0] = get2();
wb[2] = get2();
if (tWB != 0x100)
{
imgdata.color.WB_Coeffs[tWB][0] = wb[0];
imgdata.color.WB_Coeffs[tWB][2] = wb[2];
}
if (CT)
{
imgdata.color.WBCT_Coeffs[nWB - 1][0] = CT;
imgdata.color.WBCT_Coeffs[nWB - 1][1] = wb[0];
imgdata.color.WBCT_Coeffs[nWB - 1][3] = wb[2];
}
if (len == 4)
{
wb[1] = get2();
wb[3] = get2();
if (tWB != 0x100)
{
imgdata.color.WB_Coeffs[tWB][1] = wb[1];
imgdata.color.WB_Coeffs[tWB][3] = wb[3];
}
if (CT)
{
imgdata.color.WBCT_Coeffs[nWB - 1][2] = wb[1];
imgdata.color.WBCT_Coeffs[nWB - 1][4] = wb[3];
}
}
}
if ((tag >= 0x20400112) && (tag <= 0x2040011e))
{
nWB = tag - 0x20400112;
int wbG = get2();
tWB = Oly_wb_list2[nWB << 1];
if (nWB)
imgdata.color.WBCT_Coeffs[nWB - 1][2] = imgdata.color.WBCT_Coeffs[nWB - 1][4] = wbG;
if (tWB != 0x100)
imgdata.color.WB_Coeffs[tWB][1] = imgdata.color.WB_Coeffs[tWB][3] = wbG;
}
if (tag == 0x20400121)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][2] = get2();
if (len == 4)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] = get2();
}
}
if (tag == 0x2040011f)
{
int wbG = get2();
if (imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][0])
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] = wbG;
FORC4 if (imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom1 + c][0])
imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom1 + c][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom1 + c][3] =
wbG;
}
if ((tag == 0x30000110) && strcmp(software, "v757-71"))
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][2] = get2();
if (len == 2)
{
for (int i = 0; i < 256; i++)
imgdata.color.WB_Coeffs[i][1] = imgdata.color.WB_Coeffs[i][3] = 0x100;
}
}
if ((((tag >= 0x30000120) && (tag <= 0x30000124)) || ((tag >= 0x30000130) && (tag <= 0x30000133))) &&
strcmp(software, "v757-71"))
{
int wb_ind;
if (tag <= 0x30000124)
wb_ind = tag - 0x30000120;
else
wb_ind = tag - 0x30000130 + 5;
imgdata.color.WB_Coeffs[Oly_wb_list1[wb_ind]][0] = get2();
imgdata.color.WB_Coeffs[Oly_wb_list1[wb_ind]][2] = get2();
}
if ((tag == 0x20400805) && (len == 2))
{
imgdata.makernotes.olympus.OlympusSensorCalibration[0] = getreal(type);
imgdata.makernotes.olympus.OlympusSensorCalibration[1] = getreal(type);
FORC4 imgdata.color.linear_max[c] = imgdata.makernotes.olympus.OlympusSensorCalibration[0];
}
if (tag == 0x20200306)
{
uchar uc;
fread(&uc, 1, 1, ifp);
imgdata.makernotes.olympus.AFFineTune = uc;
}
if (tag == 0x20200307)
{
FORC3 imgdata.makernotes.olympus.AFFineTuneAdj[c] = get2();
}
if (tag == 0x20200401)
{
imgdata.other.FlashEC = getreal(type);
}
}
fseek(ifp, _pos2, SEEK_SET);
#endif
if (tag == 0x11 && is_raw && !strncmp(make, "NIKON", 5))
{
fseek(ifp, get4() + base, SEEK_SET);
parse_tiff_ifd(base);
}
if (tag == 0x14 && type == 7)
{
if (len == 2560)
{
fseek(ifp, 1248, SEEK_CUR);
goto get2_256;
}
fread(buf, 1, 10, ifp);
if (!strncmp(buf, "NRW ", 4))
{
fseek(ifp, strcmp(buf + 4, "0100") ? 46 : 1546, SEEK_CUR);
cam_mul[0] = get4() << 2;
cam_mul[1] = get4() + get4();
cam_mul[2] = get4() << 2;
}
}
if (tag == 0x15 && type == 2 && is_raw)
fread(model, 64, 1, ifp);
if (strstr(make, "PENTAX"))
{
if (tag == 0x1b)
tag = 0x1018;
if (tag == 0x1c)
tag = 0x1017;
}
if (tag == 0x1d)
{
while ((c = fgetc(ifp)) && c != EOF)
#ifdef LIBRAW_LIBRARY_BUILD
{
if ((!custom_serial) && (!isdigit(c)))
{
if ((strbuflen(model) == 3) && (!strcmp(model, "D50")))
{
custom_serial = 34;
}
else
{
custom_serial = 96;
}
}
#endif
serial = serial * 10 + (isdigit(c) ? c - '0' : c % 10);
#ifdef LIBRAW_LIBRARY_BUILD
}
if (!imgdata.shootinginfo.BodySerial[0])
sprintf(imgdata.shootinginfo.BodySerial, "%d", serial);
#endif
}
if (tag == 0x29 && type == 1)
{ // Canon PowerShot G9
c = wbi < 18 ? "012347800000005896"[wbi] - '0' : 0;
fseek(ifp, 8 + c * 32, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get4();
}
#ifndef LIBRAW_LIBRARY_BUILD
if (tag == 0x3d && type == 3 && len == 4)
FORC4 cblack[c ^ c >> 1] = get2() >> (14 - tiff_bps);
#endif
if (tag == 0x81 && type == 4)
{
data_offset = get4();
fseek(ifp, data_offset + 41, SEEK_SET);
raw_height = get2() * 2;
raw_width = get2();
filters = 0x61616161;
}
if ((tag == 0x81 && type == 7) || (tag == 0x100 && type == 7) || (tag == 0x280 && type == 1))
{
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (tag == 0x88 && type == 4 && (thumb_offset = get4()))
thumb_offset += base;
if (tag == 0x89 && type == 4)
thumb_length = get4();
if (tag == 0x8c || tag == 0x96)
meta_offset = ftell(ifp);
if (tag == 0x97)
{
for (i = 0; i < 4; i++)
ver97 = ver97 * 10 + fgetc(ifp) - '0';
switch (ver97)
{
case 100:
fseek(ifp, 68, SEEK_CUR);
FORC4 cam_mul[(c >> 1) | ((c & 1) << 1)] = get2();
break;
case 102:
fseek(ifp, 6, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
break;
case 103:
fseek(ifp, 16, SEEK_CUR);
FORC4 cam_mul[c] = get2();
}
if (ver97 >= 200)
{
if (ver97 != 205)
fseek(ifp, 280, SEEK_CUR);
fread(buf97, 324, 1, ifp);
}
}
if ((tag == 0xa1) && (type == 7) && strncasecmp(make, "Samsung", 7))
{
order = 0x4949;
fseek(ifp, 140, SEEK_CUR);
FORC3 cam_mul[c] = get4();
}
if (tag == 0xa4 && type == 3)
{
fseek(ifp, wbi * 48, SEEK_CUR);
FORC3 cam_mul[c] = get2();
}
if (tag == 0xa7)
{ // shutter count
NikonKey = fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp) ^ fgetc(ifp);
if ((unsigned)(ver97 - 200) < 17)
{
ci = xlat[0][serial & 0xff];
cj = xlat[1][NikonKey];
ck = 0x60;
for (i = 0; i < 324; i++)
buf97[i] ^= (cj += ci * ck++);
i = "66666>666;6A;:;55"[ver97 - 200] - '0';
FORC4 cam_mul[c ^ (c >> 1) ^ (i & 1)] = sget2(buf97 + (i & -2) + c * 2);
}
#ifdef LIBRAW_LIBRARY_BUILD
if ((NikonLensDataVersion > 200) && lenNikonLensData)
{
if (custom_serial)
{
ci = xlat[0][custom_serial];
}
else
{
ci = xlat[0][serial & 0xff];
}
cj = xlat[1][NikonKey];
ck = 0x60;
for (i = 0; i < lenNikonLensData; i++)
table_buf[i] ^= (cj += ci * ck++);
processNikonLensData(table_buf, lenNikonLensData);
lenNikonLensData = 0;
free(table_buf);
}
if (ver97 == 601) // Coolpix A
{
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
}
#endif
}
if (tag == 0xb001 && type == 3) // Sony ModelID
{
unique_id = get2();
}
if (tag == 0x200 && len == 3)
shot_order = (get4(), get4());
if (tag == 0x200 && len == 4) // Pentax black level
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x201 && len == 4) // Pentax As Shot WB
FORC4 cam_mul[c ^ (c >> 1)] = get2();
if (tag == 0x220 && type == 7)
meta_offset = ftell(ifp);
if (tag == 0x401 && type == 4 && len == 4)
FORC4 cblack[c ^ c >> 1] = get4();
#ifdef LIBRAW_LIBRARY_BUILD
// not corrected for file bitcount, to be patched in open_datastream
if (tag == 0x03d && strstr(make, "NIKON") && len == 4)
{
FORC4 cblack[c ^ c >> 1] = get2();
i = cblack[3];
FORC3 if (i > cblack[c]) i = cblack[c];
FORC4 cblack[c] -= i;
black += i;
}
#endif
if (tag == 0xe01)
{ /* Nikon Capture Note */
#ifdef LIBRAW_LIBRARY_BUILD
int loopc = 0;
#endif
order = 0x4949;
fseek(ifp, 22, SEEK_CUR);
for (offset = 22; offset + 22 < len; offset += 22 + i)
{
#ifdef LIBRAW_LIBRARY_BUILD
if (loopc++ > 1024)
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
tag = get4();
fseek(ifp, 14, SEEK_CUR);
i = get4() - 4;
if (tag == 0x76a43207)
flip = get2();
else
fseek(ifp, i, SEEK_CUR);
}
}
if (tag == 0xe80 && len == 256 && type == 7)
{
fseek(ifp, 48, SEEK_CUR);
cam_mul[0] = get2() * 508 * 1.078 / 0x10000;
cam_mul[2] = get2() * 382 * 1.173 / 0x10000;
}
if (tag == 0xf00 && type == 7)
{
if (len == 614)
fseek(ifp, 176, SEEK_CUR);
else if (len == 734 || len == 1502)
fseek(ifp, 148, SEEK_CUR);
else
goto next;
goto get2_256;
}
if (((tag == 0x1011 && len == 9) || tag == 0x20400200) && strcmp(software, "v757-71"))
for (i = 0; i < 3; i++)
{
#ifdef LIBRAW_LIBRARY_BUILD
if (!imgdata.makernotes.olympus.ColorSpace)
{
FORC3 cmatrix[i][c] = ((short)get2()) / 256.0;
}
else
{
FORC3 imgdata.color.ccm[i][c] = ((short)get2()) / 256.0;
}
#else
FORC3 cmatrix[i][c] = ((short)get2()) / 256.0;
#endif
}
if ((tag == 0x1012 || tag == 0x20400600) && len == 4)
FORC4 cblack[c ^ c >> 1] = get2();
if (tag == 0x1017 || tag == 0x20400100)
cam_mul[0] = get2() / 256.0;
if (tag == 0x1018 || tag == 0x20400100)
cam_mul[2] = get2() / 256.0;
if (tag == 0x2011 && len == 2)
{
get2_256:
order = 0x4d4d;
cam_mul[0] = get2() / 256.0;
cam_mul[2] = get2() / 256.0;
}
if ((tag | 0x70) == 0x2070 && (type == 4 || type == 13))
fseek(ifp, get4() + base, SEEK_SET);
#ifdef LIBRAW_LIBRARY_BUILD
// IB start
if (tag == 0x2010)
{
INT64 _pos3 = ftell(ifp);
parse_makernote(base, 0x2010);
fseek(ifp, _pos3, SEEK_SET);
}
if (((tag == 0x2020) || (tag == 0x3000) || (tag == 0x2030) || (tag == 0x2031) || (tag == 0x2050)) &&
((type == 7) || (type == 13)) && !strncasecmp(make, "Olympus", 7))
{
INT64 _pos3 = ftell(ifp);
parse_makernote(base, tag);
fseek(ifp, _pos3, SEEK_SET);
}
// IB end
#endif
if ((tag == 0x2020) && ((type == 7) || (type == 13)) && !strncmp(buf, "OLYMP", 5))
parse_thumb_note(base, 257, 258);
if (tag == 0x2040)
parse_makernote(base, 0x2040);
if (tag == 0xb028)
{
fseek(ifp, get4() + base, SEEK_SET);
parse_thumb_note(base, 136, 137);
}
if (tag == 0x4001 && len > 500 && len < 100000)
{
i = len == 582 ? 50 : len == 653 ? 68 : len == 5120 ? 142 : 126;
fseek(ifp, i, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
for (i += 18; i <= len; i += 10)
{
get2();
FORC4 sraw_mul[c ^ (c >> 1)] = get2();
if (sraw_mul[1] == 1170)
break;
}
}
if (!strncasecmp(make, "Samsung", 7))
{
if (tag == 0xa020) // get the full Samsung encryption key
for (i = 0; i < 11; i++)
SamsungKey[i] = get4();
if (tag == 0xa021) // get and decode Samsung cam_mul array
FORC4 cam_mul[c ^ (c >> 1)] = get4() - SamsungKey[c];
#ifdef LIBRAW_LIBRARY_BUILD
if (tag == 0xa022)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c >> 1)] = get4() - SamsungKey[c + 4];
if (imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][0] < (imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][1] >> 1))
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][1] >> 4;
imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][3] >> 4;
}
}
if (tag == 0xa023)
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][0] = get4() - SamsungKey[8];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1] = get4() - SamsungKey[9];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][3] = get4() - SamsungKey[10];
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][2] = get4() - SamsungKey[0];
if (imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][0] < (imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1] >> 1))
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][1] >> 4;
imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Ill_A][3] >> 4;
}
}
if (tag == 0xa024)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][c ^ (c >> 1)] = get4() - SamsungKey[c + 1];
if (imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][0] < (imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][1] >> 1))
{
imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][1] >> 4;
imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_D65][3] >> 4;
}
}
/*
if (tag == 0xa025) {
i = get4();
imgdata.color.linear_max[0] = imgdata.color.linear_max[1] = imgdata.color.linear_max[2] =
imgdata.color.linear_max[3] = i - SamsungKey[0]; printf ("Samsung 0xa025 %d\n", i); }
*/
if (tag == 0xa030 && len == 9)
for (i = 0; i < 3; i++)
FORC3 imgdata.color.ccm[i][c] = (float)((short)((get4() + SamsungKey[i * 3 + c]))) / 256.0;
#endif
if (tag == 0xa031 && len == 9) // get and decode Samsung color matrix
for (i = 0; i < 3; i++)
FORC3 cmatrix[i][c] = (float)((short)((get4() + SamsungKey[i * 3 + c]))) / 256.0;
if (tag == 0xa028)
FORC4 cblack[c ^ (c >> 1)] = get4() - SamsungKey[c];
}
else
{
// Somebody else use 0xa021 and 0xa028?
if (tag == 0xa021)
FORC4 cam_mul[c ^ (c >> 1)] = get4();
if (tag == 0xa028)
FORC4 cam_mul[c ^ (c >> 1)] -= get4();
}
#ifdef LIBRAW_LIBRARY_BUILD
if (tag == 0x4021 && (imgdata.makernotes.canon.multishot[0] = get4()) &&
(imgdata.makernotes.canon.multishot[1] = get4()))
{
if (len >= 4)
{
imgdata.makernotes.canon.multishot[2] = get4();
imgdata.makernotes.canon.multishot[3] = get4();
}
FORC4 cam_mul[c] = 1024;
}
#else
if (tag == 0x4021 && get4() && get4())
FORC4 cam_mul[c] = 1024;
#endif
next:
fseek(ifp, save, SEEK_SET);
}
quit:
order = sorder;
}
/*
Since the TIFF DateTime string has no timezone information,
assume that the camera's clock was set to Universal Time.
*/
void CLASS get_timestamp(int reversed)
{
struct tm t;
char str[20];
int i;
str[19] = 0;
if (reversed)
for (i = 19; i--;)
str[i] = fgetc(ifp);
else
fread(str, 19, 1, ifp);
memset(&t, 0, sizeof t);
if (sscanf(str, "%d:%d:%d %d:%d:%d", &t.tm_year, &t.tm_mon, &t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec) != 6)
return;
t.tm_year -= 1900;
t.tm_mon -= 1;
t.tm_isdst = -1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
void CLASS parse_exif(int base)
{
unsigned kodak, entries, tag, type, len, save, c;
double expo, ape;
kodak = !strncmp(make, "EASTMAN", 7) && tiff_nifds < 3;
entries = get2();
if (!strncmp(make, "Hasselblad", 10) && (tiff_nifds > 3) && (entries > 512))
return;
#ifdef LIBRAW_LIBRARY_BUILD
INT64 fsize = ifp->size();
#endif
while (entries--)
{
tiff_get(base, &tag, &type, &len, &save);
#ifdef LIBRAW_LIBRARY_BUILD
INT64 savepos = ftell(ifp);
if (len > 8 && savepos + len > fsize * 2)
{
fseek(ifp, save, SEEK_SET); // Recover tiff-read position!!
continue;
}
if (callbacks.exif_cb)
{
callbacks.exif_cb(callbacks.exifparser_data, tag, type, len, order, ifp);
fseek(ifp, savepos, SEEK_SET);
}
#endif
switch (tag)
{
#ifdef LIBRAW_LIBRARY_BUILD
case 0x9400:
imgdata.other.exifAmbientTemperature = getreal(type);
if ((imgdata.other.CameraTemperature > -273.15f) && (OlyID == 0x4434353933ULL)) // TG-5
imgdata.other.CameraTemperature += imgdata.other.exifAmbientTemperature;
break;
case 0x9401:
imgdata.other.exifHumidity = getreal(type);
break;
case 0x9402:
imgdata.other.exifPressure = getreal(type);
break;
case 0x9403:
imgdata.other.exifWaterDepth = getreal(type);
break;
case 0x9404:
imgdata.other.exifAcceleration = getreal(type);
break;
case 0x9405:
imgdata.other.exifCameraElevationAngle = getreal(type);
break;
case 0xa405: // FocalLengthIn35mmFormat
imgdata.lens.FocalLengthIn35mmFormat = get2();
break;
case 0xa431: // BodySerialNumber
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
break;
case 0xa432: // LensInfo, 42034dec, Lens Specification per EXIF standard
imgdata.lens.MinFocal = getreal(type);
imgdata.lens.MaxFocal = getreal(type);
imgdata.lens.MaxAp4MinFocal = getreal(type);
imgdata.lens.MaxAp4MaxFocal = getreal(type);
break;
case 0xa435: // LensSerialNumber
stmread(imgdata.lens.LensSerial, len, ifp);
break;
case 0xc630: // DNG LensInfo, Lens Specification per EXIF standard
imgdata.lens.dng.MinFocal = getreal(type);
imgdata.lens.dng.MaxFocal = getreal(type);
imgdata.lens.dng.MaxAp4MinFocal = getreal(type);
imgdata.lens.dng.MaxAp4MaxFocal = getreal(type);
break;
case 0xa433: // LensMake
stmread(imgdata.lens.LensMake, len, ifp);
break;
case 0xa434: // LensModel
stmread(imgdata.lens.Lens, len, ifp);
if (!strncmp(imgdata.lens.Lens, "----", 4))
imgdata.lens.Lens[0] = 0;
break;
case 0x9205:
imgdata.lens.EXIF_MaxAp = libraw_powf64l(2.0f, (getreal(type) / 2.0f));
break;
#endif
case 33434:
tiff_ifd[tiff_nifds - 1].t_shutter = shutter = getreal(type);
break;
case 33437:
aperture = getreal(type);
break; // 0x829d FNumber
case 34855:
iso_speed = get2();
break;
case 34865:
if (iso_speed == 0xffff && !strncasecmp(make, "FUJI", 4))
iso_speed = getreal(type);
break;
case 34866:
if (iso_speed == 0xffff && (!strncasecmp(make, "SONY", 4) || !strncasecmp(make, "CANON", 5)))
iso_speed = getreal(type);
break;
case 36867:
case 36868:
get_timestamp(0);
break;
case 37377:
if ((expo = -getreal(type)) < 128 && shutter == 0.)
tiff_ifd[tiff_nifds - 1].t_shutter = shutter = libraw_powf64l(2.0, expo);
break;
case 37378: // 0x9202 ApertureValue
if ((fabs(ape = getreal(type)) < 256.0) && (!aperture))
aperture = libraw_powf64l(2.0, ape / 2);
break;
case 37385:
flash_used = getreal(type);
break;
case 37386:
focal_len = getreal(type);
break;
case 37500: // tag 0x927c
#ifdef LIBRAW_LIBRARY_BUILD
if (((make[0] == '\0') && (!strncmp(model, "ov5647", 6))) ||
((!strncmp(make, "RaspberryPi", 11)) && (!strncmp(model, "RP_OV5647", 9))) ||
((!strncmp(make, "RaspberryPi", 11)) && (!strncmp(model, "RP_imx219", 9))))
{
char mn_text[512];
char *pos;
char ccms[512];
ushort l;
float num;
fgets(mn_text, MIN(len,511), ifp);
mn_text[511] = 0;
pos = strstr(mn_text, "gain_r=");
if (pos)
cam_mul[0] = atof(pos + 7);
pos = strstr(mn_text, "gain_b=");
if (pos)
cam_mul[2] = atof(pos + 7);
if ((cam_mul[0] > 0.001f) && (cam_mul[2] > 0.001f))
cam_mul[1] = cam_mul[3] = 1.0f;
else
cam_mul[0] = cam_mul[2] = 0.0f;
pos = strstr(mn_text, "ccm=");
if(pos)
{
pos +=4;
char *pos2 = strstr(pos, " ");
if(pos2)
{
l = pos2 - pos;
memcpy(ccms, pos, l);
ccms[l] = '\0';
#if defined WIN32 || defined(__MINGW32__)
// Win32 strtok is already thread-safe
pos = strtok(ccms, ",");
#else
char *last=0;
pos = strtok_r(ccms, ",",&last);
#endif
if(pos)
{
for (l = 0; l < 4; l++)
{
num = 0.0;
for (c = 0; c < 3; c++)
{
imgdata.color.ccm[l][c] = (float)atoi(pos);
num += imgdata.color.ccm[l][c];
#if defined WIN32 || defined(__MINGW32__)
pos = strtok(NULL, ",");
#else
pos = strtok_r(NULL, ",",&last);
#endif
if(!pos) goto end; // broken
}
if (num > 0.01)
FORC3 imgdata.color.ccm[l][c] = imgdata.color.ccm[l][c] / num;
}
}
}
}
end:;
}
else
#endif
parse_makernote(base, 0);
break;
case 40962:
if (kodak)
raw_width = get4();
break;
case 40963:
if (kodak)
raw_height = get4();
break;
case 41730:
if (get4() == 0x20002)
for (exif_cfa = c = 0; c < 8; c += 2)
exif_cfa |= fgetc(ifp) * 0x01010101U << c;
}
fseek(ifp, save, SEEK_SET);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS parse_gps_libraw(int base)
{
unsigned entries, tag, type, len, save, c;
entries = get2();
if (entries > 200)
return;
if (entries > 0)
imgdata.other.parsed_gps.gpsparsed = 1;
while (entries--)
{
tiff_get(base, &tag, &type, &len, &save);
if (len > 1024)
{
fseek(ifp, save, SEEK_SET); // Recover tiff-read position!!
continue; // no GPS tags are 1k or larger
}
switch (tag)
{
case 1:
imgdata.other.parsed_gps.latref = getc(ifp);
break;
case 3:
imgdata.other.parsed_gps.longref = getc(ifp);
break;
case 5:
imgdata.other.parsed_gps.altref = getc(ifp);
break;
case 2:
if (len == 3)
FORC(3) imgdata.other.parsed_gps.latitude[c] = getreal(type);
break;
case 4:
if (len == 3)
FORC(3) imgdata.other.parsed_gps.longtitude[c] = getreal(type);
break;
case 7:
if (len == 3)
FORC(3) imgdata.other.parsed_gps.gpstimestamp[c] = getreal(type);
break;
case 6:
imgdata.other.parsed_gps.altitude = getreal(type);
break;
case 9:
imgdata.other.parsed_gps.gpsstatus = getc(ifp);
break;
}
fseek(ifp, save, SEEK_SET);
}
}
#endif
void CLASS parse_gps(int base)
{
unsigned entries, tag, type, len, save, c;
entries = get2();
while (entries--)
{
tiff_get(base, &tag, &type, &len, &save);
if (len > 1024)
{
fseek(ifp, save, SEEK_SET); // Recover tiff-read position!!
continue; // no GPS tags are 1k or larger
}
switch (tag)
{
case 1:
case 3:
case 5:
gpsdata[29 + tag / 2] = getc(ifp);
break;
case 2:
case 4:
case 7:
FORC(6) gpsdata[tag / 3 * 6 + c] = get4();
break;
case 6:
FORC(2) gpsdata[18 + c] = get4();
break;
case 18:
case 29:
fgets((char *)(gpsdata + 14 + tag / 3), MIN(len, 12), ifp);
}
fseek(ifp, save, SEEK_SET);
}
}
void CLASS romm_coeff(float romm_cam[3][3])
{
static const float rgb_romm[3][3] = /* ROMM == Kodak ProPhoto */
{{2.034193, -0.727420, -0.306766}, {-0.228811, 1.231729, -0.002922}, {-0.008565, -0.153273, 1.161839}};
int i, j, k;
for (i = 0; i < 3; i++)
for (j = 0; j < 3; j++)
for (cmatrix[i][j] = k = 0; k < 3; k++)
cmatrix[i][j] += rgb_romm[i][k] * romm_cam[k][j];
}
void CLASS parse_mos(int offset)
{
char data[40];
int skip, from, i, c, neut[4], planes = 0, frot = 0;
static const char *mod[] = {"",
"DCB2",
"Volare",
"Cantare",
"CMost",
"Valeo 6",
"Valeo 11",
"Valeo 22",
"Valeo 11p",
"Valeo 17",
"",
"Aptus 17",
"Aptus 22",
"Aptus 75",
"Aptus 65",
"Aptus 54S",
"Aptus 65S",
"Aptus 75S",
"AFi 5",
"AFi 6",
"AFi 7",
"AFi-II 7",
"Aptus-II 7",
"",
"Aptus-II 6",
"",
"",
"Aptus-II 10",
"Aptus-II 5",
"",
"",
"",
"",
"Aptus-II 10R",
"Aptus-II 8",
"",
"Aptus-II 12",
"",
"AFi-II 12"};
float romm_cam[3][3];
fseek(ifp, offset, SEEK_SET);
while (1)
{
if (get4() != 0x504b5453)
break;
get4();
fread(data, 1, 40, ifp);
skip = get4();
from = ftell(ifp);
// IB start
#ifdef LIBRAW_LIBRARY_BUILD
if (!strcmp(data, "CameraObj_camera_type"))
{
stmread(imgdata.lens.makernotes.body, skip, ifp);
}
if (!strcmp(data, "back_serial_number"))
{
char buffer[sizeof(imgdata.shootinginfo.BodySerial)];
char *words[4];
int nwords;
stmread(buffer, skip, ifp);
nwords = getwords(buffer, words, 4, sizeof(imgdata.shootinginfo.BodySerial));
strcpy(imgdata.shootinginfo.BodySerial, words[0]);
}
if (!strcmp(data, "CaptProf_serial_number"))
{
char buffer[sizeof(imgdata.shootinginfo.InternalBodySerial)];
char *words[4];
int nwords;
stmread(buffer, skip, ifp);
nwords = getwords(buffer, words, 4, sizeof(imgdata.shootinginfo.InternalBodySerial));
strcpy(imgdata.shootinginfo.InternalBodySerial, words[0]);
}
#endif
// IB end
if (!strcmp(data, "JPEG_preview_data"))
{
thumb_offset = from;
thumb_length = skip;
}
if (!strcmp(data, "icc_camera_profile"))
{
profile_offset = from;
profile_length = skip;
}
if (!strcmp(data, "ShootObj_back_type"))
{
fscanf(ifp, "%d", &i);
if ((unsigned)i < sizeof mod / sizeof(*mod))
strcpy(model, mod[i]);
}
if (!strcmp(data, "icc_camera_to_tone_matrix"))
{
for (i = 0; i < 9; i++)
((float *)romm_cam)[i] = int_to_float(get4());
romm_coeff(romm_cam);
}
if (!strcmp(data, "CaptProf_color_matrix"))
{
for (i = 0; i < 9; i++)
fscanf(ifp, "%f", (float *)romm_cam + i);
romm_coeff(romm_cam);
}
if (!strcmp(data, "CaptProf_number_of_planes"))
fscanf(ifp, "%d", &planes);
if (!strcmp(data, "CaptProf_raw_data_rotation"))
fscanf(ifp, "%d", &flip);
if (!strcmp(data, "CaptProf_mosaic_pattern"))
FORC4
{
fscanf(ifp, "%d", &i);
if (i == 1)
frot = c ^ (c >> 1);
}
if (!strcmp(data, "ImgProf_rotation_angle"))
{
fscanf(ifp, "%d", &i);
flip = i - flip;
}
if (!strcmp(data, "NeutObj_neutrals") && !cam_mul[0])
{
FORC4 fscanf(ifp, "%d", neut + c);
FORC3 cam_mul[c] = (float)neut[0] / neut[c + 1];
}
if (!strcmp(data, "Rows_data"))
load_flags = get4();
parse_mos(from);
fseek(ifp, skip + from, SEEK_SET);
}
if (planes)
filters = (planes == 1) * 0x01010101U * (uchar) "\x94\x61\x16\x49"[(flip / 90 + frot) & 3];
}
void CLASS linear_table(unsigned len)
{
int i;
if (len > 0x10000)
len = 0x10000;
else if(len < 1)
return;
read_shorts(curve, len);
for (i = len; i < 0x10000; i++)
curve[i] = curve[i - 1];
maximum = curve[len < 0x1000 ? 0xfff : len - 1];
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS Kodak_WB_0x08tags(int wb, unsigned type)
{
float mul[3] = {1, 1, 1}, num, mul2;
int c;
FORC3 mul[c] = (num = getreal(type)) == 0 ? 1 : num;
imgdata.color.WB_Coeffs[wb][1] = imgdata.color.WB_Coeffs[wb][3] = mul[1];
mul2 = mul[1] * mul[1];
imgdata.color.WB_Coeffs[wb][0] = mul2 / mul[0];
imgdata.color.WB_Coeffs[wb][2] = mul2 / mul[2];
return;
}
/* Thanks to Alexey Danilchenko for wb as-shot parsing code */
void CLASS parse_kodak_ifd(int base)
{
unsigned entries, tag, type, len, save;
int j, c, wbi = -2, romm_camTemp[9], romm_camScale[3];
float mul[3] = {1, 1, 1}, num;
static const int wbtag[] = {64037, 64040, 64039, 64041, -1, -1, 64042};
// int a_blck = 0;
entries = get2();
if (entries > 1024)
return;
INT64 fsize = ifp->size();
while (entries--)
{
tiff_get(base, &tag, &type, &len, &save);
INT64 savepos = ftell(ifp);
if (len > 8 && len + savepos > 2 * fsize)
{
fseek(ifp, save, SEEK_SET); // Recover tiff-read position!!
continue;
}
if (callbacks.exif_cb)
{
callbacks.exif_cb(callbacks.exifparser_data, tag | 0x20000, type, len, order, ifp);
fseek(ifp, savepos, SEEK_SET);
}
if (tag == 1003)
imgdata.sizes.raw_crop.cleft = get2();
if (tag == 1004)
imgdata.sizes.raw_crop.ctop = get2();
if (tag == 1005)
imgdata.sizes.raw_crop.cwidth = get2();
if (tag == 1006)
imgdata.sizes.raw_crop.cheight = get2();
if (tag == 1007)
imgdata.makernotes.kodak.BlackLevelTop = get2();
if (tag == 1008)
imgdata.makernotes.kodak.BlackLevelBottom = get2();
if (tag == 1011)
imgdata.other.FlashEC = getreal(type);
if (tag == 1020)
wbi = getint(type);
if (tag == 1021 && len == 72)
{ /* WB set in software */
fseek(ifp, 40, SEEK_CUR);
FORC3 cam_mul[c] = 2048.0 / fMAX(1.0f, get2());
wbi = -2;
}
if ((tag == 1030) && (len == 1))
imgdata.other.CameraTemperature = getreal(type);
if ((tag == 1043) && (len == 1))
imgdata.other.SensorTemperature = getreal(type);
if ((tag == 0x03ef) && (!strcmp(model, "EOS D2000C")))
black = get2();
if ((tag == 0x03f0) && (!strcmp(model, "EOS D2000C")))
{
if (black) // already set by tag 0x03ef
black = (black + get2()) / 2;
else
black = get2();
}
INT64 _pos2 = ftell(ifp);
if (tag == 0x0848)
Kodak_WB_0x08tags(LIBRAW_WBI_Daylight, type);
if (tag == 0x0849)
Kodak_WB_0x08tags(LIBRAW_WBI_Tungsten, type);
if (tag == 0x084a)
Kodak_WB_0x08tags(LIBRAW_WBI_Fluorescent, type);
if (tag == 0x084b)
Kodak_WB_0x08tags(LIBRAW_WBI_Flash, type);
if (tag == 0x084c)
Kodak_WB_0x08tags(LIBRAW_WBI_Custom, type);
if (tag == 0x084d)
Kodak_WB_0x08tags(LIBRAW_WBI_Auto, type);
if (tag == 0x0e93)
imgdata.color.linear_max[0] = imgdata.color.linear_max[1] = imgdata.color.linear_max[2] =
imgdata.color.linear_max[3] = get2();
if (tag == 0x09ce)
stmread(imgdata.shootinginfo.InternalBodySerial, len, ifp);
if (tag == 0xfa00)
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
if (tag == 0xfa27)
{
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c] = get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][1];
}
if (tag == 0xfa28)
{
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][c] = get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][1];
}
if (tag == 0xfa29)
{
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c] = get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][1];
}
if (tag == 0xfa2a)
{
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c] = get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][1];
}
fseek(ifp, _pos2, SEEK_SET);
if (((tag == 0x07e4) || (tag == 0xfb01)) && (len == 9))
{
short validM = 0;
if (type == 10)
{
for (j = 0; j < 9; j++)
{
((float *)imgdata.makernotes.kodak.romm_camDaylight)[j] = getreal(type);
}
validM = 1;
}
else if (type == 9)
{
FORC3
{
romm_camScale[c] = 0;
for (j = 0; j < 3; j++)
{
romm_camTemp[c * 3 + j] = get4();
romm_camScale[c] += romm_camTemp[c * 3 + j];
}
}
if ((romm_camScale[0] > 0x1fff) && (romm_camScale[1] > 0x1fff) && (romm_camScale[2] > 0x1fff))
{
FORC3 for (j = 0; j < 3; j++)
{
((float *)imgdata.makernotes.kodak.romm_camDaylight)[c * 3 + j] =
((float)romm_camTemp[c * 3 + j]) / ((float)romm_camScale[c]);
}
validM = 1;
}
}
if (validM)
{
romm_coeff(imgdata.makernotes.kodak.romm_camDaylight);
}
}
if (((tag == 0x07e5) || (tag == 0xfb02)) && (len == 9))
{
if (type == 10)
{
for (j = 0; j < 9; j++)
{
((float *)imgdata.makernotes.kodak.romm_camTungsten)[j] = getreal(type);
}
}
else if (type == 9)
{
FORC3
{
romm_camScale[c] = 0;
for (j = 0; j < 3; j++)
{
romm_camTemp[c * 3 + j] = get4();
romm_camScale[c] += romm_camTemp[c * 3 + j];
}
}
if ((romm_camScale[0] > 0x1fff) && (romm_camScale[1] > 0x1fff) && (romm_camScale[2] > 0x1fff))
{
FORC3 for (j = 0; j < 3; j++)
{
((float *)imgdata.makernotes.kodak.romm_camTungsten)[c * 3 + j] =
((float)romm_camTemp[c * 3 + j]) / ((float)romm_camScale[c]);
}
}
}
}
if (((tag == 0x07e6) || (tag == 0xfb03)) && (len == 9))
{
if (type == 10)
{
for (j = 0; j < 9; j++)
{
((float *)imgdata.makernotes.kodak.romm_camFluorescent)[j] = getreal(type);
}
}
else if (type == 9)
{
FORC3
{
romm_camScale[c] = 0;
for (j = 0; j < 3; j++)
{
romm_camTemp[c * 3 + j] = get4();
romm_camScale[c] += romm_camTemp[c * 3 + j];
}
}
if ((romm_camScale[0] > 0x1fff) && (romm_camScale[1] > 0x1fff) && (romm_camScale[2] > 0x1fff))
{
FORC3 for (j = 0; j < 3; j++)
{
((float *)imgdata.makernotes.kodak.romm_camFluorescent)[c * 3 + j] =
((float)romm_camTemp[c * 3 + j]) / ((float)romm_camScale[c]);
}
}
}
}
if (((tag == 0x07e7) || (tag == 0xfb04)) && (len == 9))
{
if (type == 10)
{
for (j = 0; j < 9; j++)
{
((float *)imgdata.makernotes.kodak.romm_camFlash)[j] = getreal(type);
}
}
else if (type == 9)
{
FORC3
{
romm_camScale[c] = 0;
for (j = 0; j < 3; j++)
{
romm_camTemp[c * 3 + j] = get4();
romm_camScale[c] += romm_camTemp[c * 3 + j];
}
}
if ((romm_camScale[0] > 0x1fff) && (romm_camScale[1] > 0x1fff) && (romm_camScale[2] > 0x1fff))
{
FORC3 for (j = 0; j < 3; j++)
{
((float *)imgdata.makernotes.kodak.romm_camFlash)[c * 3 + j] =
((float)romm_camTemp[c * 3 + j]) / ((float)romm_camScale[c]);
}
}
}
}
if (((tag == 0x07e8) || (tag == 0xfb05)) && (len == 9))
{
if (type == 10)
{
for (j = 0; j < 9; j++)
{
((float *)imgdata.makernotes.kodak.romm_camCustom)[j] = getreal(type);
}
}
else if (type == 9)
{
FORC3
{
romm_camScale[c] = 0;
for (j = 0; j < 3; j++)
{
romm_camTemp[c * 3 + j] = get4();
romm_camScale[c] += romm_camTemp[c * 3 + j];
}
}
if ((romm_camScale[0] > 0x1fff) && (romm_camScale[1] > 0x1fff) && (romm_camScale[2] > 0x1fff))
{
FORC3 for (j = 0; j < 3; j++)
{
((float *)imgdata.makernotes.kodak.romm_camCustom)[c * 3 + j] =
((float)romm_camTemp[c * 3 + j]) / ((float)romm_camScale[c]);
}
}
}
}
if (((tag == 0x07e9) || (tag == 0xfb06)) && (len == 9))
{
if (type == 10)
{
for (j = 0; j < 9; j++)
{
((float *)imgdata.makernotes.kodak.romm_camAuto)[j] = getreal(type);
}
}
else if (type == 9)
{
FORC3
{
romm_camScale[c] = 0;
for (j = 0; j < 3; j++)
{
romm_camTemp[c * 3 + j] = get4();
romm_camScale[c] += romm_camTemp[c * 3 + j];
}
}
if ((romm_camScale[0] > 0x1fff) && (romm_camScale[1] > 0x1fff) && (romm_camScale[2] > 0x1fff))
{
FORC3 for (j = 0; j < 3; j++)
{
((float *)imgdata.makernotes.kodak.romm_camAuto)[c * 3 + j] =
((float)romm_camTemp[c * 3 + j]) / ((float)romm_camScale[c]);
}
}
}
}
if (tag == 2120 + wbi || (wbi < 0 && tag == 2125)) /* use Auto WB if illuminant index is not set */
{
FORC3 mul[c] = (num = getreal(type)) == 0 ? 1 : num;
FORC3 cam_mul[c] = mul[1] / mul[c]; /* normalise against green */
}
if (tag == 2317)
linear_table(len);
if (tag == 0x903)
iso_speed = getreal(type);
// if (tag == 6020) iso_speed = getint(type);
if (tag == 64013)
wbi = fgetc(ifp);
if ((unsigned)wbi < 7 && tag == wbtag[wbi])
FORC3 cam_mul[c] = get4();
if (tag == 0xfa13)
width = getint(type);
if (tag == 0xfa14)
height = (getint(type) + 1) & -2;
/*
height = getint(type);
if (tag == 0xfa16)
raw_width = get2();
if (tag == 0xfa17)
raw_height = get2();
*/
if (tag == 0xfa18)
{
imgdata.makernotes.kodak.offset_left = getint(8);
if (type != 8)
imgdata.makernotes.kodak.offset_left += 1;
}
if (tag == 0xfa19)
{
imgdata.makernotes.kodak.offset_top = getint(8);
if (type != 8)
imgdata.makernotes.kodak.offset_top += 1;
}
if (tag == 0xfa31)
imgdata.sizes.raw_crop.cwidth = get2();
if (tag == 0xfa32)
imgdata.sizes.raw_crop.cheight = get2();
if (tag == 0xfa3e)
imgdata.sizes.raw_crop.cleft = get2();
if (tag == 0xfa3f)
imgdata.sizes.raw_crop.ctop = get2();
fseek(ifp, save, SEEK_SET);
}
}
#else
void CLASS parse_kodak_ifd(int base)
{
unsigned entries, tag, type, len, save;
int i, c, wbi = -2, wbtemp = 6500;
float mul[3] = {1, 1, 1}, num;
static const int wbtag[] = {64037, 64040, 64039, 64041, -1, -1, 64042};
entries = get2();
if (entries > 1024)
return;
while (entries--)
{
tiff_get(base, &tag, &type, &len, &save);
if (tag == 1020)
wbi = getint(type);
if (tag == 1021 && len == 72)
{ /* WB set in software */
fseek(ifp, 40, SEEK_CUR);
FORC3 cam_mul[c] = 2048.0 / fMAX(1.0, get2());
wbi = -2;
}
if (tag == 2118)
wbtemp = getint(type);
if (tag == 2120 + wbi && wbi >= 0)
FORC3 cam_mul[c] = 2048.0 / fMAX(1.0, getreal(type));
if (tag == 2130 + wbi)
FORC3 mul[c] = getreal(type);
if (tag == 2140 + wbi && wbi >= 0)
FORC3
{
for (num = i = 0; i < 4; i++)
num += getreal(type) * pow(wbtemp / 100.0, i);
cam_mul[c] = 2048 / fMAX(1.0, (num * mul[c]));
}
if (tag == 2317)
linear_table(len);
if (tag == 6020)
iso_speed = getint(type);
if (tag == 64013)
wbi = fgetc(ifp);
if ((unsigned)wbi < 7 && tag == wbtag[wbi])
FORC3 cam_mul[c] = get4();
if (tag == 64019)
width = getint(type);
if (tag == 64020)
height = (getint(type) + 1) & -2;
fseek(ifp, save, SEEK_SET);
}
}
#endif
//@end COMMON
void CLASS parse_minolta(int base);
int CLASS parse_tiff(int base);
//@out COMMON
int CLASS parse_tiff_ifd(int base)
{
unsigned entries, tag, type, len, plen = 16, save;
int ifd, use_cm = 0, cfa, i, j, c, ima_len = 0;
char *cbuf, *cp;
uchar cfa_pat[16], cfa_pc[] = {0, 1, 2, 3}, tab[256];
double fm[3][4], cc[4][4], cm[4][3], cam_xyz[4][3], num;
double ab[] = {1, 1, 1, 1}, asn[] = {0, 0, 0, 0}, xyz[] = {1, 1, 1};
unsigned sony_curve[] = {0, 0, 0, 0, 0, 4095};
unsigned *buf, sony_offset = 0, sony_length = 0, sony_key = 0;
struct jhead jh;
int pana_raw = 0;
#ifndef LIBRAW_LIBRARY_BUILD
FILE *sfp;
#endif
if (tiff_nifds >= sizeof tiff_ifd / sizeof tiff_ifd[0])
return 1;
ifd = tiff_nifds++;
for (j = 0; j < 4; j++)
for (i = 0; i < 4; i++)
cc[j][i] = i == j;
entries = get2();
if (entries > 512)
return 1;
#ifdef LIBRAW_LIBRARY_BUILD
INT64 fsize = ifp->size();
#endif
while (entries--)
{
tiff_get(base, &tag, &type, &len, &save);
#ifdef LIBRAW_LIBRARY_BUILD
INT64 savepos = ftell(ifp);
if (len > 8 && savepos + len > 2 * fsize)
{
fseek(ifp, save, SEEK_SET); // Recover tiff-read position!!
continue;
}
if (callbacks.exif_cb)
{
callbacks.exif_cb(callbacks.exifparser_data, tag | (pana_raw ? 0x30000 : ((ifd + 1) << 20)), type, len, order,
ifp);
fseek(ifp, savepos, SEEK_SET);
}
#endif
#ifdef LIBRAW_LIBRARY_BUILD
if (!strncasecmp(make, "SONY", 4) ||
(!strncasecmp(make, "Hasselblad", 10) &&
(!strncasecmp(model, "Stellar", 7) || !strncasecmp(model, "Lunar", 5) || !strncasecmp(model, "HV", 2))))
{
switch (tag)
{
case 0x7300: // SR2 black level
for (int i = 0; i < 4 && i < len; i++)
cblack[i] = get2();
break;
case 0x7302:
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c < 2)] = get2();
break;
case 0x7312:
{
int i, lc[4];
FORC4 lc[c] = get2();
i = (lc[1] == 1024 && lc[2] == 1024) << 1;
SWAP(lc[i], lc[i + 1]);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c] = lc[c];
}
break;
case 0x7480:
case 0x7820:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][1];
break;
case 0x7481:
case 0x7821:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][1];
break;
case 0x7482:
case 0x7822:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][1];
break;
case 0x7483:
case 0x7823:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1];
break;
case 0x7484:
case 0x7824:
imgdata.color.WBCT_Coeffs[0][0] = 4500;
FORC3 imgdata.color.WBCT_Coeffs[0][c + 1] = get2();
imgdata.color.WBCT_Coeffs[0][4] = imgdata.color.WBCT_Coeffs[0][2];
break;
case 0x7486:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][1];
break;
case 0x7825:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][1];
break;
case 0x7826:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][1];
break;
case 0x7827:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][1];
break;
case 0x7828:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][1];
break;
case 0x7829:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][c] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][1];
break;
case 0x782a:
imgdata.color.WBCT_Coeffs[1][0] = 8500;
FORC3 imgdata.color.WBCT_Coeffs[1][c + 1] = get2();
imgdata.color.WBCT_Coeffs[1][4] = imgdata.color.WBCT_Coeffs[1][2];
break;
case 0x782b:
imgdata.color.WBCT_Coeffs[2][0] = 6000;
FORC3 imgdata.color.WBCT_Coeffs[2][c + 1] = get2();
imgdata.color.WBCT_Coeffs[2][4] = imgdata.color.WBCT_Coeffs[2][2];
break;
case 0x782c:
imgdata.color.WBCT_Coeffs[3][0] = 3200;
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_StudioTungsten][c] = imgdata.color.WBCT_Coeffs[3][c + 1] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_StudioTungsten][3] = imgdata.color.WBCT_Coeffs[3][4] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_StudioTungsten][1];
break;
case 0x782d:
imgdata.color.WBCT_Coeffs[4][0] = 2500;
FORC3 imgdata.color.WBCT_Coeffs[4][c + 1] = get2();
imgdata.color.WBCT_Coeffs[4][4] = imgdata.color.WBCT_Coeffs[4][2];
break;
case 0x787f:
if (len == 3)
{
FORC3 imgdata.color.linear_max[c] = get2();
imgdata.color.linear_max[3] = imgdata.color.linear_max[1];
}
else if (len == 1)
{
imgdata.color.linear_max[0] = imgdata.color.linear_max[1] = imgdata.color.linear_max[2] =
imgdata.color.linear_max[3] = getreal(type); // Is non-short possible here??
}
break;
}
}
#endif
switch (tag)
{
case 1:
if (len == 4)
pana_raw = get4();
break;
case 5:
width = get2();
break;
case 6:
height = get2();
break;
case 7:
width += get2();
break;
case 9:
if ((i = get2()))
filters = i;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 10:
if (pana_raw && len == 1 && type == 3)
{
pana_bpp = get2();
}
break;
#endif
case 14:
case 15:
case 16:
#ifdef LIBRAW_LIBRARY_BUILD
if (pana_raw)
{
imgdata.color.linear_max[tag - 14] = get2();
if (tag == 15)
imgdata.color.linear_max[3] = imgdata.color.linear_max[1];
}
#endif
break;
case 17:
case 18:
if (type == 3 && len == 1)
cam_mul[(tag - 17) * 2] = get2() / 256.0;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 19:
if (pana_raw)
{
ushort nWB, cnt, tWB;
nWB = get2();
if (nWB > 0x100)
break;
for (cnt = 0; cnt < nWB; cnt++)
{
tWB = get2();
if (tWB < 0x100)
{
imgdata.color.WB_Coeffs[tWB][0] = get2();
imgdata.color.WB_Coeffs[tWB][2] = get2();
imgdata.color.WB_Coeffs[tWB][1] = imgdata.color.WB_Coeffs[tWB][3] = 0x100;
}
else
get4();
}
}
break;
case 0x0120:
if (pana_raw)
{
unsigned sorder = order;
unsigned long sbase = base;
base = ftell(ifp);
order = get2();
fseek(ifp, 2, SEEK_CUR);
fseek(ifp, get4()-8, SEEK_CUR);
parse_tiff_ifd (base);
base = sbase;
order = sorder;
}
break;
case 0x2009:
if ((pana_encoding == 4) || (pana_encoding == 5))
{
int n = MIN (8, len);
int permut[8] = {3, 2, 1, 0, 3+4, 2+4, 1+4, 0+4};
imgdata.makernotes.panasonic.BlackLevelDim = len;
for (int i=0; i < n; i++)
{
imgdata.makernotes.panasonic.BlackLevel[permut[i]] =
(float) (get2()) / (float) (powf(2.f, 14.f-pana_bpp));
}
}
break;
#endif
case 23:
if (type == 3)
iso_speed = get2();
break;
case 28:
case 29:
case 30:
#ifdef LIBRAW_LIBRARY_BUILD
if (pana_raw && len == 1 && type == 3)
{
pana_black[tag - 28] = get2();
}
else
#endif
{
cblack[tag - 28] = get2();
cblack[3] = cblack[1];
}
break;
case 36:
case 37:
case 38:
cam_mul[tag - 36] = get2();
break;
case 39:
#ifdef LIBRAW_LIBRARY_BUILD
if (pana_raw)
{
ushort nWB, cnt, tWB;
nWB = get2();
if (nWB > 0x100)
break;
for (cnt = 0; cnt < nWB; cnt++)
{
tWB = get2();
if (tWB < 0x100)
{
imgdata.color.WB_Coeffs[tWB][0] = get2();
imgdata.color.WB_Coeffs[tWB][1] = imgdata.color.WB_Coeffs[tWB][3] = get2();
imgdata.color.WB_Coeffs[tWB][2] = get2();
}
else
fseek(ifp, 6, SEEK_CUR);
}
}
break;
#endif
if (len < 50 || cam_mul[0])
break;
fseek(ifp, 12, SEEK_CUR);
FORC3 cam_mul[c] = get2();
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 45:
if (pana_raw && len == 1 && type == 3)
{
pana_encoding = get2();
}
break;
#endif
case 46:
if (type != 7 || fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8)
break;
thumb_offset = ftell(ifp) - 2;
thumb_length = len;
break;
case 61440: /* Fuji HS10 table */
fseek(ifp, get4() + base, SEEK_SET);
parse_tiff_ifd(base);
break;
case 2:
case 256:
case 61441: /* ImageWidth */
tiff_ifd[ifd].t_width = getint(type);
break;
case 3:
case 257:
case 61442: /* ImageHeight */
tiff_ifd[ifd].t_height = getint(type);
break;
case 258: /* BitsPerSample */
case 61443:
tiff_ifd[ifd].samples = len & 7;
tiff_ifd[ifd].bps = getint(type);
if (tiff_bps < tiff_ifd[ifd].bps)
tiff_bps = tiff_ifd[ifd].bps;
break;
case 61446:
raw_height = 0;
if (tiff_ifd[ifd].bps > 12)
break;
load_raw = &CLASS packed_load_raw;
load_flags = get4() ? 24 : 80;
break;
case 259: /* Compression */
tiff_ifd[ifd].comp = getint(type);
break;
case 262: /* PhotometricInterpretation */
tiff_ifd[ifd].phint = get2();
break;
case 270: /* ImageDescription */
fread(desc, 512, 1, ifp);
break;
case 271: /* Make */
fgets(make, 64, ifp);
break;
case 272: /* Model */
fgets(model, 64, ifp);
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 278:
tiff_ifd[ifd].rows_per_strip = getint(type);
break;
#endif
case 280: /* Panasonic RW2 offset */
if (type != 4)
break;
load_raw = &CLASS panasonic_load_raw;
load_flags = 0x2008;
case 273: /* StripOffset */
#ifdef LIBRAW_LIBRARY_BUILD
if (len > 1 && len < 16384)
{
off_t sav = ftell(ifp);
tiff_ifd[ifd].strip_offsets = (int *)calloc(len, sizeof(int));
tiff_ifd[ifd].strip_offsets_count = len;
for (int i = 0; i < len; i++)
tiff_ifd[ifd].strip_offsets[i] = get4() + base;
fseek(ifp, sav, SEEK_SET); // restore position
}
/* fallback */
#endif
case 513: /* JpegIFOffset */
case 61447:
tiff_ifd[ifd].offset = get4() + base;
if (!tiff_ifd[ifd].bps && tiff_ifd[ifd].offset > 0)
{
fseek(ifp, tiff_ifd[ifd].offset, SEEK_SET);
if (ljpeg_start(&jh, 1))
{
tiff_ifd[ifd].comp = 6;
tiff_ifd[ifd].t_width = jh.wide;
tiff_ifd[ifd].t_height = jh.high;
tiff_ifd[ifd].bps = jh.bits;
tiff_ifd[ifd].samples = jh.clrs;
if (!(jh.sraw || (jh.clrs & 1)))
tiff_ifd[ifd].t_width *= jh.clrs;
if ((tiff_ifd[ifd].t_width > 4 * tiff_ifd[ifd].t_height) & ~jh.clrs)
{
tiff_ifd[ifd].t_width /= 2;
tiff_ifd[ifd].t_height *= 2;
}
i = order;
parse_tiff(tiff_ifd[ifd].offset + 12);
order = i;
}
}
break;
case 274: /* Orientation */
tiff_ifd[ifd].t_flip = "50132467"[get2() & 7] - '0';
break;
case 277: /* SamplesPerPixel */
tiff_ifd[ifd].samples = getint(type) & 7;
break;
case 279: /* StripByteCounts */
#ifdef LIBRAW_LIBRARY_BUILD
if (len > 1 && len < 16384)
{
off_t sav = ftell(ifp);
tiff_ifd[ifd].strip_byte_counts = (int *)calloc(len, sizeof(int));
tiff_ifd[ifd].strip_byte_counts_count = len;
for (int i = 0; i < len; i++)
tiff_ifd[ifd].strip_byte_counts[i] = get4();
fseek(ifp, sav, SEEK_SET); // restore position
}
/* fallback */
#endif
case 514:
case 61448:
tiff_ifd[ifd].bytes = get4();
break;
case 61454: // FujiFilm "As Shot"
FORC3 cam_mul[(4 - c) % 3] = getint(type);
break;
case 305:
case 11: /* Software */
if ((pana_raw) && (tag == 11) && (type == 3))
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.makernotes.panasonic.Compression = get2();
#endif
break;
}
fgets(software, 64, ifp);
if (!strncmp(software, "Adobe", 5) || !strncmp(software, "dcraw", 5) || !strncmp(software, "UFRaw", 5) ||
!strncmp(software, "Bibble", 6) || !strcmp(software, "Digital Photo Professional"))
is_raw = 0;
break;
case 306: /* DateTime */
get_timestamp(0);
break;
case 315: /* Artist */
fread(artist, 64, 1, ifp);
break;
case 317:
tiff_ifd[ifd].predictor = getint(type);
break;
case 322: /* TileWidth */
tiff_ifd[ifd].t_tile_width = getint(type);
break;
case 323: /* TileLength */
tiff_ifd[ifd].t_tile_length = getint(type);
break;
case 324: /* TileOffsets */
tiff_ifd[ifd].offset = len > 1 ? ftell(ifp) : get4();
if (len == 1)
tiff_ifd[ifd].t_tile_width = tiff_ifd[ifd].t_tile_length = 0;
if (len == 4)
{
load_raw = &CLASS sinar_4shot_load_raw;
is_raw = 5;
}
break;
case 325:
tiff_ifd[ifd].bytes = len > 1 ? ftell(ifp) : get4();
break;
case 330: /* SubIFDs */
if (!strcmp(model, "DSLR-A100") && tiff_ifd[ifd].t_width == 3872)
{
load_raw = &CLASS sony_arw_load_raw;
data_offset = get4() + base;
ifd++;
#ifdef LIBRAW_LIBRARY_BUILD
if (ifd >= sizeof tiff_ifd / sizeof tiff_ifd[0])
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
break;
}
#ifdef LIBRAW_LIBRARY_BUILD
if (!strncmp(make, "Hasselblad", 10) && libraw_internal_data.unpacker_data.hasselblad_parser_flag)
{
fseek(ifp, ftell(ifp) + 4, SEEK_SET);
fseek(ifp, get4() + base, SEEK_SET);
parse_tiff_ifd(base);
break;
}
#endif
if (len > 1000)
len = 1000; /* 1000 SubIFDs is enough */
while (len--)
{
i = ftell(ifp);
fseek(ifp, get4() + base, SEEK_SET);
if (parse_tiff_ifd(base))
break;
fseek(ifp, i + 4, SEEK_SET);
}
break;
case 339:
tiff_ifd[ifd].sample_format = getint(type);
break;
case 400:
strcpy(make, "Sarnoff");
maximum = 0xfff;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 700:
if ((type == 1 || type == 2 || type == 6 || type == 7) && len > 1 && len < 5100000)
{
xmpdata = (char *)malloc(xmplen = len + 1);
fread(xmpdata, len, 1, ifp);
xmpdata[len] = 0;
}
break;
#endif
case 28688:
FORC4 sony_curve[c + 1] = get2() >> 2 & 0xfff;
for (i = 0; i < 5; i++)
for (j = sony_curve[i] + 1; j <= sony_curve[i + 1]; j++)
curve[j] = curve[j - 1] + (1 << i);
break;
case 29184:
sony_offset = get4();
break;
case 29185:
sony_length = get4();
break;
case 29217:
sony_key = get4();
break;
case 29264:
parse_minolta(ftell(ifp));
raw_width = 0;
break;
case 29443:
FORC4 cam_mul[c ^ (c < 2)] = get2();
break;
case 29459:
FORC4 cam_mul[c] = get2();
i = (cam_mul[1] == 1024 && cam_mul[2] == 1024) << 1;
SWAP(cam_mul[i], cam_mul[i + 1])
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 30720: // Sony matrix, Sony_SR2SubIFD_0x7800
for (i = 0; i < 3; i++)
{
float num = 0.0;
for (c = 0; c < 3; c++)
{
imgdata.color.ccm[i][c] = (float)((short)get2());
num += imgdata.color.ccm[i][c];
}
if (num > 0.01)
FORC3 imgdata.color.ccm[i][c] = imgdata.color.ccm[i][c] / num;
}
break;
#endif
case 29456: // Sony black level, Sony_SR2SubIFD_0x7310, no more needs to be divided by 4
FORC4 cblack[c ^ c >> 1] = get2();
i = cblack[3];
FORC3 if (i > cblack[c]) i = cblack[c];
FORC4 cblack[c] -= i;
black = i;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("...Sony black: %u cblack: %u %u %u %u\n"), black, cblack[0], cblack[1], cblack[2],
cblack[3]);
#endif
break;
case 33405: /* Model2 */
fgets(model2, 64, ifp);
break;
case 33421: /* CFARepeatPatternDim */
if (get2() == 6 && get2() == 6)
filters = 9;
break;
case 33422: /* CFAPattern */
if (filters == 9)
{
FORC(36)((char *)xtrans)[c] = fgetc(ifp) & 3;
break;
}
case 64777: /* Kodak P-series */
if (len == 36)
{
filters = 9;
colors = 3;
FORC(36) xtrans[0][c] = fgetc(ifp) & 3;
}
else if (len > 0)
{
if ((plen = len) > 16)
plen = 16;
fread(cfa_pat, 1, plen, ifp);
for (colors = cfa = i = 0; i < plen && colors < 4; i++)
{
if(cfa_pat[i] > 31) continue; // Skip wrong data
colors += !(cfa & (1 << cfa_pat[i]));
cfa |= 1 << cfa_pat[i];
}
if (cfa == 070)
memcpy(cfa_pc, "\003\004\005", 3); /* CMY */
if (cfa == 072)
memcpy(cfa_pc, "\005\003\004\001", 4); /* GMCY */
goto guess_cfa_pc;
}
break;
case 33424:
case 65024:
fseek(ifp, get4() + base, SEEK_SET);
parse_kodak_ifd(base);
break;
case 33434: /* ExposureTime */
tiff_ifd[ifd].t_shutter = shutter = getreal(type);
break;
case 33437: /* FNumber */
aperture = getreal(type);
break;
#ifdef LIBRAW_LIBRARY_BUILD
// IB start
case 0x9400:
imgdata.other.exifAmbientTemperature = getreal(type);
if ((imgdata.other.CameraTemperature > -273.15f) && (OlyID == 0x4434353933ULL)) // TG-5
imgdata.other.CameraTemperature += imgdata.other.exifAmbientTemperature;
break;
case 0x9401:
imgdata.other.exifHumidity = getreal(type);
break;
case 0x9402:
imgdata.other.exifPressure = getreal(type);
break;
case 0x9403:
imgdata.other.exifWaterDepth = getreal(type);
break;
case 0x9404:
imgdata.other.exifAcceleration = getreal(type);
break;
case 0x9405:
imgdata.other.exifCameraElevationAngle = getreal(type);
break;
case 0xa405: // FocalLengthIn35mmFormat
imgdata.lens.FocalLengthIn35mmFormat = get2();
break;
case 0xa431: // BodySerialNumber
case 0xc62f:
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
break;
case 0xa432: // LensInfo, 42034dec, Lens Specification per EXIF standard
imgdata.lens.MinFocal = getreal(type);
imgdata.lens.MaxFocal = getreal(type);
imgdata.lens.MaxAp4MinFocal = getreal(type);
imgdata.lens.MaxAp4MaxFocal = getreal(type);
break;
case 0xa435: // LensSerialNumber
stmread(imgdata.lens.LensSerial, len, ifp);
break;
case 0xc630: // DNG LensInfo, Lens Specification per EXIF standard
imgdata.lens.MinFocal = getreal(type);
imgdata.lens.MaxFocal = getreal(type);
imgdata.lens.MaxAp4MinFocal = getreal(type);
imgdata.lens.MaxAp4MaxFocal = getreal(type);
break;
case 0xa433: // LensMake
stmread(imgdata.lens.LensMake, len, ifp);
break;
case 0xa434: // LensModel
stmread(imgdata.lens.Lens, len, ifp);
if (!strncmp(imgdata.lens.Lens, "----", 4))
imgdata.lens.Lens[0] = 0;
break;
case 0x9205:
imgdata.lens.EXIF_MaxAp = libraw_powf64l(2.0f, (getreal(type) / 2.0f));
break;
// IB end
#endif
case 34306: /* Leaf white balance */
FORC4
{
int q = get2();
if(q > 0) cam_mul[c ^ 1] = 4096.0 / q;
}
break;
case 34307: /* Leaf CatchLight color matrix */
fread(software, 1, 7, ifp);
if (strncmp(software, "MATRIX", 6))
break;
colors = 4;
for (raw_color = i = 0; i < 3; i++)
{
FORC4 fscanf(ifp, "%f", &rgb_cam[i][c ^ 1]);
if (!use_camera_wb)
continue;
num = 0;
FORC4 num += rgb_cam[i][c];
FORC4 rgb_cam[i][c] /= MAX(1, num);
}
break;
case 34310: /* Leaf metadata */
parse_mos(ftell(ifp));
case 34303:
strcpy(make, "Leaf");
break;
case 34665: /* EXIF tag */
fseek(ifp, get4() + base, SEEK_SET);
parse_exif(base);
break;
case 34853: /* GPSInfo tag */
{
unsigned pos;
fseek(ifp, pos = (get4() + base), SEEK_SET);
parse_gps(base);
#ifdef LIBRAW_LIBRARY_BUILD
fseek(ifp, pos, SEEK_SET);
parse_gps_libraw(base);
#endif
}
break;
case 34675: /* InterColorProfile */
case 50831: /* AsShotICCProfile */
profile_offset = ftell(ifp);
profile_length = len;
break;
case 37122: /* CompressedBitsPerPixel */
kodak_cbpp = get4();
break;
case 37386: /* FocalLength */
focal_len = getreal(type);
break;
case 37393: /* ImageNumber */
shot_order = getint(type);
break;
case 37400: /* old Kodak KDC tag */
for (raw_color = i = 0; i < 3; i++)
{
getreal(type);
FORC3 rgb_cam[i][c] = getreal(type);
}
break;
case 40976:
strip_offset = get4();
switch (tiff_ifd[ifd].comp)
{
case 32770:
load_raw = &CLASS samsung_load_raw;
break;
case 32772:
load_raw = &CLASS samsung2_load_raw;
break;
case 32773:
load_raw = &CLASS samsung3_load_raw;
break;
}
break;
case 46275: /* Imacon tags */
strcpy(make, "Imacon");
data_offset = ftell(ifp);
ima_len = len;
break;
case 46279:
if (!ima_len)
break;
fseek(ifp, 38, SEEK_CUR);
case 46274:
fseek(ifp, 40, SEEK_CUR);
raw_width = get4();
raw_height = get4();
left_margin = get4() & 7;
width = raw_width - left_margin - (get4() & 7);
top_margin = get4() & 7;
height = raw_height - top_margin - (get4() & 7);
if (raw_width == 7262 && ima_len == 234317952)
{
height = 5412;
width = 7216;
left_margin = 7;
filters = 0;
}
else if (raw_width == 7262)
{
height = 5444;
width = 7244;
left_margin = 7;
}
fseek(ifp, 52, SEEK_CUR);
FORC3 cam_mul[c] = getreal(11);
fseek(ifp, 114, SEEK_CUR);
flip = (get2() >> 7) * 90;
if (width * height * 6 == ima_len)
{
if (flip % 180 == 90)
SWAP(width, height);
raw_width = width;
raw_height = height;
left_margin = top_margin = filters = flip = 0;
}
sprintf(model, "Ixpress %d-Mp", height * width / 1000000);
load_raw = &CLASS imacon_full_load_raw;
if (filters)
{
if (left_margin & 1)
filters = 0x61616161;
load_raw = &CLASS unpacked_load_raw;
}
maximum = 0xffff;
break;
case 50454: /* Sinar tag */
case 50455:
if (len < 1 || len > 2560000 || !(cbuf = (char *)malloc(len)))
break;
#ifndef LIBRAW_LIBRARY_BUILD
fread(cbuf, 1, len, ifp);
#else
if (fread(cbuf, 1, len, ifp) != len)
throw LIBRAW_EXCEPTION_IO_CORRUPT; // cbuf to be free'ed in recycle
#endif
cbuf[len - 1] = 0;
for (cp = cbuf - 1; cp && cp < cbuf + len; cp = strchr(cp, '\n'))
if (!strncmp(++cp, "Neutral ", 8))
sscanf(cp + 8, "%f %f %f", cam_mul, cam_mul + 1, cam_mul + 2);
free(cbuf);
break;
case 50458:
if (!make[0])
strcpy(make, "Hasselblad");
break;
case 50459: /* Hasselblad tag */
#ifdef LIBRAW_LIBRARY_BUILD
libraw_internal_data.unpacker_data.hasselblad_parser_flag = 1;
#endif
i = order;
j = ftell(ifp);
c = tiff_nifds;
order = get2();
fseek(ifp, j + (get2(), get4()), SEEK_SET);
parse_tiff_ifd(j);
maximum = 0xffff;
tiff_nifds = c;
order = i;
break;
case 50706: /* DNGVersion */
FORC4 dng_version = (dng_version << 8) + fgetc(ifp);
if (!make[0])
strcpy(make, "DNG");
is_raw = 1;
break;
case 50708: /* UniqueCameraModel */
#ifdef LIBRAW_LIBRARY_BUILD
stmread(imgdata.color.UniqueCameraModel, len, ifp);
imgdata.color.UniqueCameraModel[sizeof(imgdata.color.UniqueCameraModel) - 1] = 0;
#endif
if (model[0])
break;
#ifndef LIBRAW_LIBRARY_BUILD
fgets(make, 64, ifp);
#else
strncpy(make, imgdata.color.UniqueCameraModel, MIN(len, sizeof(imgdata.color.UniqueCameraModel)));
#endif
if ((cp = strchr(make, ' ')))
{
strcpy(model, cp + 1);
*cp = 0;
}
break;
case 50710: /* CFAPlaneColor */
if (filters == 9)
break;
if (len > 4)
len = 4;
colors = len;
fread(cfa_pc, 1, colors, ifp);
guess_cfa_pc:
FORCC tab[cfa_pc[c]] = c;
cdesc[c] = 0;
for (i = 16; i--;)
filters = filters << 2 | tab[cfa_pat[i % plen]];
filters -= !filters;
break;
case 50711: /* CFALayout */
if (get2() == 2)
fuji_width = 1;
break;
case 291:
case 50712: /* LinearizationTable */
#ifdef LIBRAW_LIBRARY_BUILD
tiff_ifd[ifd].dng_levels.parsedfields |= LIBRAW_DNGFM_LINTABLE;
tiff_ifd[ifd].lineartable_offset = ftell(ifp);
tiff_ifd[ifd].lineartable_len = len;
#endif
linear_table(len);
break;
case 50713: /* BlackLevelRepeatDim */
#ifdef LIBRAW_LIBRARY_BUILD
tiff_ifd[ifd].dng_levels.parsedfields |= LIBRAW_DNGFM_BLACK;
tiff_ifd[ifd].dng_levels.dng_cblack[4] =
#endif
cblack[4] = get2();
#ifdef LIBRAW_LIBRARY_BUILD
tiff_ifd[ifd].dng_levels.dng_cblack[5] =
#endif
cblack[5] = get2();
if (cblack[4] * cblack[5] > (sizeof(cblack) / sizeof(cblack[0]) - 6))
#ifdef LIBRAW_LIBRARY_BUILD
tiff_ifd[ifd].dng_levels.dng_cblack[4] = tiff_ifd[ifd].dng_levels.dng_cblack[5] =
#endif
cblack[4] = cblack[5] = 1;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 0xf00d:
if (strcmp(model, "X-A3") &&
strcmp(model, "X-A10") &&
strcmp(model, "X-A5") &&
strcmp(model, "X-A20"))
{
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][(4 - c) % 3] = getint(type);
imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][1];
}
break;
case 0xf00c:
if (strcmp(model, "X-A3") &&
strcmp(model, "X-A10") &&
strcmp(model, "X-A5") &&
strcmp(model, "X-A20"))
{
unsigned fwb[4];
FORC4 fwb[c] = get4();
if (fwb[3] < 0x100)
{
imgdata.color.WB_Coeffs[fwb[3]][0] = fwb[1];
imgdata.color.WB_Coeffs[fwb[3]][1] = imgdata.color.WB_Coeffs[fwb[3]][3] = fwb[0];
imgdata.color.WB_Coeffs[fwb[3]][2] = fwb[2];
if ((fwb[3] == 17) && (libraw_internal_data.unpacker_data.lenRAFData > 3) &&
(libraw_internal_data.unpacker_data.lenRAFData < 10240000))
{
INT64 f_save = ftell(ifp);
ushort *rafdata = (ushort *)malloc(sizeof(ushort) * libraw_internal_data.unpacker_data.lenRAFData);
fseek(ifp, libraw_internal_data.unpacker_data.posRAFData, SEEK_SET);
fread(rafdata, sizeof(ushort), libraw_internal_data.unpacker_data.lenRAFData, ifp);
fseek(ifp, f_save, SEEK_SET);
int fj, found = 0;
for (int fi = 0; fi < (libraw_internal_data.unpacker_data.lenRAFData - 3); fi++)
{
if ((fwb[0] == rafdata[fi]) && (fwb[1] == rafdata[fi + 1]) && (fwb[2] == rafdata[fi + 2]))
{
if (rafdata[fi - 15] != fwb[0])
continue;
for (int wb_ind = 0, ofst = fi - 15; wb_ind < nFuji_wb_list1; wb_ind++, ofst += 3)
{
imgdata.color.WB_Coeffs[Fuji_wb_list1[wb_ind]][1] =
imgdata.color.WB_Coeffs[Fuji_wb_list1[wb_ind]][3] = rafdata[ofst];
imgdata.color.WB_Coeffs[Fuji_wb_list1[wb_ind]][0] = rafdata[ofst + 1];
imgdata.color.WB_Coeffs[Fuji_wb_list1[wb_ind]][2] = rafdata[ofst + 2];
}
fi += 0x60;
for (fj = fi; fj < (fi + 15); fj += 3)
if (rafdata[fj] != rafdata[fi])
{
found = 1;
break;
}
if (found)
{
fj = fj - 93;
for (int iCCT = 0; iCCT < 31; iCCT++)
{
imgdata.color.WBCT_Coeffs[iCCT][0] = FujiCCT_K[iCCT];
imgdata.color.WBCT_Coeffs[iCCT][1] = rafdata[iCCT * 3 + 1 + fj];
imgdata.color.WBCT_Coeffs[iCCT][2] = imgdata.color.WBCT_Coeffs[iCCT][4] = rafdata[iCCT * 3 + fj];
imgdata.color.WBCT_Coeffs[iCCT][3] = rafdata[iCCT * 3 + 2 + fj];
}
}
free(rafdata);
break;
}
}
}
}
FORC4 fwb[c] = get4();
if (fwb[3] < 0x100)
{
imgdata.color.WB_Coeffs[fwb[3]][0] = fwb[1];
imgdata.color.WB_Coeffs[fwb[3]][1] = imgdata.color.WB_Coeffs[fwb[3]][3] = fwb[0];
imgdata.color.WB_Coeffs[fwb[3]][2] = fwb[2];
}
}
break;
#endif
#ifdef LIBRAW_LIBRARY_BUILD
case 50709:
stmread(imgdata.color.LocalizedCameraModel, len, ifp);
break;
#endif
case 61450:
cblack[4] = cblack[5] = MIN(sqrt((double)len), 64);
case 50714: /* BlackLevel */
#ifdef LIBRAW_LIBRARY_BUILD
if (tiff_ifd[ifd].samples > 1 && tiff_ifd[ifd].samples == len) // LinearDNG, per-channel black
{
tiff_ifd[ifd].dng_levels.parsedfields |= LIBRAW_DNGFM_BLACK;
for (i = 0; i < colors && i < 4 && i < len; i++)
tiff_ifd[ifd].dng_levels.dng_cblack[i] = cblack[i] = getreal(type) + 0.5;
tiff_ifd[ifd].dng_levels.dng_black = black = 0;
}
else
#endif
if ((cblack[4] * cblack[5] < 2) && len == 1)
{
#ifdef LIBRAW_LIBRARY_BUILD
tiff_ifd[ifd].dng_levels.parsedfields |= LIBRAW_DNGFM_BLACK;
tiff_ifd[ifd].dng_levels.dng_black =
#endif
black = getreal(type);
}
else if (cblack[4] * cblack[5] <= len)
{
FORC(cblack[4] * cblack[5])
cblack[6 + c] = getreal(type);
black = 0;
FORC4
cblack[c] = 0;
#ifdef LIBRAW_LIBRARY_BUILD
if (tag == 50714)
{
tiff_ifd[ifd].dng_levels.parsedfields |= LIBRAW_DNGFM_BLACK;
FORC(cblack[4] * cblack[5])
tiff_ifd[ifd].dng_levels.dng_cblack[6 + c] = cblack[6 + c];
tiff_ifd[ifd].dng_levels.dng_black = 0;
FORC4
tiff_ifd[ifd].dng_levels.dng_cblack[c] = 0;
}
#endif
}
break;
case 50715: /* BlackLevelDeltaH */
case 50716: /* BlackLevelDeltaV */
for (num = i = 0; i < len && i < 65536; i++)
num += getreal(type);
if(len>0)
{
black += num / len + 0.5;
#ifdef LIBRAW_LIBRARY_BUILD
tiff_ifd[ifd].dng_levels.dng_black += num / len + 0.5;
tiff_ifd[ifd].dng_levels.parsedfields |= LIBRAW_DNGFM_BLACK;
#endif
}
break;
case 50717: /* WhiteLevel */
#ifdef LIBRAW_LIBRARY_BUILD
tiff_ifd[ifd].dng_levels.parsedfields |= LIBRAW_DNGFM_WHITE;
tiff_ifd[ifd].dng_levels.dng_whitelevel[0] =
#endif
maximum = getint(type);
#ifdef LIBRAW_LIBRARY_BUILD
if (tiff_ifd[ifd].samples > 1) // Linear DNG case
for (i = 1; i < colors && i < 4 && i < len; i++)
tiff_ifd[ifd].dng_levels.dng_whitelevel[i] = getint(type);
#endif
break;
case 50718: /* DefaultScale */
{
float q1 = getreal(type);
float q2 = getreal(type);
if(q1 > 0.00001f && q2 > 0.00001f)
{
pixel_aspect = q1/q2;
if (pixel_aspect > 0.995 && pixel_aspect < 1.005)
pixel_aspect = 1.0;
}
}
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 50719: /* DefaultCropOrigin */
if (len == 2)
{
tiff_ifd[ifd].dng_levels.parsedfields |= LIBRAW_DNGFM_CROPORIGIN;
tiff_ifd[ifd].dng_levels.default_crop[0] = getreal(type);
tiff_ifd[ifd].dng_levels.default_crop[1] = getreal(type);
if (!strncasecmp(make, "SONY", 4))
{
imgdata.sizes.raw_crop.cleft = tiff_ifd[ifd].dng_levels.default_crop[0];
imgdata.sizes.raw_crop.ctop = tiff_ifd[ifd].dng_levels.default_crop[1];
}
}
break;
case 50720: /* DefaultCropSize */
if (len == 2)
{
tiff_ifd[ifd].dng_levels.parsedfields |= LIBRAW_DNGFM_CROPSIZE;
tiff_ifd[ifd].dng_levels.default_crop[2] = getreal(type);
tiff_ifd[ifd].dng_levels.default_crop[3] = getreal(type);
if (!strncasecmp(make, "SONY", 4))
{
imgdata.sizes.raw_crop.cwidth = tiff_ifd[ifd].dng_levels.default_crop[2];
imgdata.sizes.raw_crop.cheight = tiff_ifd[ifd].dng_levels.default_crop[3];
}
}
break;
case 0x74c7:
if ((len == 2) && !strncasecmp(make, "SONY", 4))
{
imgdata.makernotes.sony.raw_crop.cleft = get4();
imgdata.makernotes.sony.raw_crop.ctop = get4();
}
break;
case 0x74c8:
if ((len == 2) && !strncasecmp(make, "SONY", 4))
{
imgdata.makernotes.sony.raw_crop.cwidth = get4();
imgdata.makernotes.sony.raw_crop.cheight = get4();
}
break;
#endif
#ifdef LIBRAW_LIBRARY_BUILD
case 50778:
tiff_ifd[ifd].dng_color[0].illuminant = get2();
tiff_ifd[ifd].dng_color[0].parsedfields |= LIBRAW_DNGFM_ILLUMINANT;
break;
case 50779:
tiff_ifd[ifd].dng_color[1].illuminant = get2();
tiff_ifd[ifd].dng_color[1].parsedfields |= LIBRAW_DNGFM_ILLUMINANT;
break;
#endif
case 50721: /* ColorMatrix1 */
case 50722: /* ColorMatrix2 */
#ifdef LIBRAW_LIBRARY_BUILD
i = tag == 50721 ? 0 : 1;
tiff_ifd[ifd].dng_color[i].parsedfields |= LIBRAW_DNGFM_COLORMATRIX;
#endif
FORCC for (j = 0; j < 3; j++)
{
#ifdef LIBRAW_LIBRARY_BUILD
tiff_ifd[ifd].dng_color[i].colormatrix[c][j] =
#endif
cm[c][j] = getreal(type);
}
use_cm = 1;
break;
case 0xc714: /* ForwardMatrix1 */
case 0xc715: /* ForwardMatrix2 */
#ifdef LIBRAW_LIBRARY_BUILD
i = tag == 0xc714 ? 0 : 1;
tiff_ifd[ifd].dng_color[i].parsedfields |= LIBRAW_DNGFM_FORWARDMATRIX;
#endif
for (j = 0; j < 3; j++)
FORCC
{
#ifdef LIBRAW_LIBRARY_BUILD
tiff_ifd[ifd].dng_color[i].forwardmatrix[j][c] =
#endif
fm[j][c] = getreal(type);
}
break;
case 50723: /* CameraCalibration1 */
case 50724: /* CameraCalibration2 */
#ifdef LIBRAW_LIBRARY_BUILD
j = tag == 50723 ? 0 : 1;
tiff_ifd[ifd].dng_color[j].parsedfields |= LIBRAW_DNGFM_CALIBRATION;
#endif
for (i = 0; i < colors; i++)
FORCC
{
#ifdef LIBRAW_LIBRARY_BUILD
tiff_ifd[ifd].dng_color[j].calibration[i][c] =
#endif
cc[i][c] = getreal(type);
}
break;
case 50727: /* AnalogBalance */
#ifdef LIBRAW_LIBRARY_BUILD
tiff_ifd[ifd].dng_levels.parsedfields |= LIBRAW_DNGFM_ANALOGBALANCE;
#endif
FORCC
{
#ifdef LIBRAW_LIBRARY_BUILD
tiff_ifd[ifd].dng_levels.analogbalance[c] =
#endif
ab[c] = getreal(type);
}
break;
case 50728: /* AsShotNeutral */
FORCC asn[c] = getreal(type);
break;
case 50729: /* AsShotWhiteXY */
xyz[0] = getreal(type);
xyz[1] = getreal(type);
xyz[2] = 1 - xyz[0] - xyz[1];
FORC3 xyz[c] /= d65_white[c];
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 50730: /* DNG: Baseline Exposure */
baseline_exposure = getreal(type);
break;
#endif
// IB start
case 50740: /* tag 0xc634 : DNG Adobe, DNG Pentax, Sony SR2, DNG Private */
#ifdef LIBRAW_LIBRARY_BUILD
{
char mbuf[64];
unsigned short makernote_found = 0;
INT64 curr_pos, start_pos = ftell(ifp);
unsigned MakN_order, m_sorder = order;
unsigned MakN_length;
unsigned pos_in_original_raw;
fread(mbuf, 1, 6, ifp);
if (!strcmp(mbuf, "Adobe"))
{
order = 0x4d4d; // Adobe header is always in "MM" / big endian
curr_pos = start_pos + 6;
while (curr_pos + 8 - start_pos <= len)
{
fread(mbuf, 1, 4, ifp);
curr_pos += 8;
if (!strncmp(mbuf, "MakN", 4))
{
makernote_found = 1;
MakN_length = get4();
MakN_order = get2();
pos_in_original_raw = get4();
order = MakN_order;
INT64 save_pos = ifp->tell();
parse_makernote_0xc634(curr_pos + 6 - pos_in_original_raw, 0, AdobeDNG);
curr_pos = save_pos + MakN_length - 6;
fseek(ifp, curr_pos, SEEK_SET);
fread(mbuf, 1, 4, ifp);
curr_pos += 8;
if (!strncmp(mbuf, "SR2 ", 4))
{
order = 0x4d4d;
MakN_length = get4();
MakN_order = get2();
pos_in_original_raw = get4();
order = MakN_order;
unsigned *buf_SR2;
uchar *cbuf_SR2;
unsigned icbuf_SR2;
unsigned entries, tag, type, len, save;
int ival;
unsigned SR2SubIFDOffset = 0;
unsigned SR2SubIFDLength = 0;
unsigned SR2SubIFDKey = 0;
int base = curr_pos + 6 - pos_in_original_raw;
entries = get2();
while (entries--)
{
tiff_get(base, &tag, &type, &len, &save);
if (tag == 0x7200)
{
SR2SubIFDOffset = get4();
}
else if (tag == 0x7201)
{
SR2SubIFDLength = get4();
}
else if (tag == 0x7221)
{
SR2SubIFDKey = get4();
}
fseek(ifp, save, SEEK_SET);
}
if (SR2SubIFDLength && (SR2SubIFDLength < 10240000) && (buf_SR2 = (unsigned *)malloc(SR2SubIFDLength+1024))) // 1024b for safety
{
fseek(ifp, SR2SubIFDOffset + base, SEEK_SET);
fread(buf_SR2, SR2SubIFDLength, 1, ifp);
sony_decrypt(buf_SR2, SR2SubIFDLength / 4, 1, SR2SubIFDKey);
cbuf_SR2 = (uchar *)buf_SR2;
entries = sget2(cbuf_SR2);
icbuf_SR2 = 2;
while (entries--)
{
tag = sget2(cbuf_SR2 + icbuf_SR2);
icbuf_SR2 += 2;
type = sget2(cbuf_SR2 + icbuf_SR2);
icbuf_SR2 += 2;
len = sget4(cbuf_SR2 + icbuf_SR2);
icbuf_SR2 += 4;
if (len * ("11124811248484"[type < 14 ? type : 0] - '0') > 4)
{
ival = sget4(cbuf_SR2 + icbuf_SR2) - SR2SubIFDOffset;
}
else
{
ival = icbuf_SR2;
}
if(ival > SR2SubIFDLength) // points out of orig. buffer size
break; // END processing. Generally we should check against SR2SubIFDLength minus 6 of 8, depending on tag, but we allocated extra 1024b for buffer, so this does not matter
icbuf_SR2 += 4;
switch (tag)
{
case 0x7302:
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c < 2)] = sget2(cbuf_SR2 + ival + 2 * c);
break;
case 0x7312:
{
int i, lc[4];
FORC4 lc[c] = sget2(cbuf_SR2 + ival + 2 * c);
i = (lc[1] == 1024 && lc[2] == 1024) << 1;
SWAP(lc[i], lc[i + 1]);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c] = lc[c];
}
break;
case 0x7480:
case 0x7820:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][c] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][1];
break;
case 0x7481:
case 0x7821:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][c] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][1];
break;
case 0x7482:
case 0x7822:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][1];
break;
case 0x7483:
case 0x7823:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1];
break;
case 0x7484:
case 0x7824:
imgdata.color.WBCT_Coeffs[0][0] = 4500;
FORC3 imgdata.color.WBCT_Coeffs[0][c + 1] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WBCT_Coeffs[0][4] = imgdata.color.WBCT_Coeffs[0][2];
break;
case 0x7486:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][c] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_Fluorescent][1];
break;
case 0x7825:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][1];
break;
case 0x7826:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][c] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][1];
break;
case 0x7827:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][1];
break;
case 0x7828:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][1];
break;
case 0x7829:
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][c] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][3] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][1];
break;
case 0x782a:
imgdata.color.WBCT_Coeffs[1][0] = 8500;
FORC3 imgdata.color.WBCT_Coeffs[1][c + 1] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WBCT_Coeffs[1][4] = imgdata.color.WBCT_Coeffs[1][2];
break;
case 0x782b:
imgdata.color.WBCT_Coeffs[2][0] = 6000;
FORC3 imgdata.color.WBCT_Coeffs[2][c + 1] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WBCT_Coeffs[2][4] = imgdata.color.WBCT_Coeffs[2][2];
break;
case 0x782c:
imgdata.color.WBCT_Coeffs[3][0] = 3200;
FORC3 imgdata.color.WB_Coeffs[LIBRAW_WBI_StudioTungsten][c] = imgdata.color.WBCT_Coeffs[3][c + 1] =
sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WB_Coeffs[LIBRAW_WBI_StudioTungsten][3] = imgdata.color.WBCT_Coeffs[3][4] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_StudioTungsten][1];
break;
case 0x782d:
imgdata.color.WBCT_Coeffs[4][0] = 2500;
FORC3 imgdata.color.WBCT_Coeffs[4][c + 1] = sget2(cbuf_SR2 + ival + 2 * c);
imgdata.color.WBCT_Coeffs[4][4] = imgdata.color.WBCT_Coeffs[4][2];
break;
}
}
free(buf_SR2);
}
} /* SR2 processed */
break;
}
}
}
else
{
fread(mbuf + 6, 1, 2, ifp);
if (!strcmp(mbuf, "PENTAX ") || !strcmp(mbuf, "SAMSUNG"))
{
makernote_found = 1;
fseek(ifp, start_pos, SEEK_SET);
parse_makernote_0xc634(base, 0, CameraDNG);
}
}
fseek(ifp, start_pos, SEEK_SET);
order = m_sorder;
}
// IB end
#endif
if (dng_version)
break;
parse_minolta(j = get4() + base);
fseek(ifp, j, SEEK_SET);
parse_tiff_ifd(base);
break;
case 50752:
read_shorts(cr2_slice, 3);
break;
case 50829: /* ActiveArea */
top_margin = getint(type);
left_margin = getint(type);
height = getint(type) - top_margin;
width = getint(type) - left_margin;
break;
case 50830: /* MaskedAreas */
for (i = 0; i < len && i < 32; i++)
((int *)mask)[i] = getint(type);
black = 0;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 50970: /* PreviewColorSpace */
tiff_ifd[ifd].dng_levels.parsedfields |= LIBRAW_DNGFM_PREVIEWCS;
tiff_ifd[ifd].dng_levels.preview_colorspace = getint(type);
break;
#endif
case 51009: /* OpcodeList2 */
#ifdef LIBRAW_LIBRARY_BUILD
tiff_ifd[ifd].dng_levels.parsedfields |= LIBRAW_DNGFM_OPCODE2;
tiff_ifd[ifd].opcode2_offset =
#endif
meta_offset = ftell(ifp);
break;
case 64772: /* Kodak P-series */
if (len < 13)
break;
fseek(ifp, 16, SEEK_CUR);
data_offset = get4();
fseek(ifp, 28, SEEK_CUR);
data_offset += get4();
load_raw = &CLASS packed_load_raw;
break;
case 65026:
if (type == 2)
fgets(model2, 64, ifp);
}
fseek(ifp, save, SEEK_SET);
}
if (sony_length && sony_length < 10240000 && (buf = (unsigned *)malloc(sony_length)))
{
fseek(ifp, sony_offset, SEEK_SET);
fread(buf, sony_length, 1, ifp);
sony_decrypt(buf, sony_length / 4, 1, sony_key);
#ifndef LIBRAW_LIBRARY_BUILD
sfp = ifp;
if ((ifp = tmpfile()))
{
fwrite(buf, sony_length, 1, ifp);
fseek(ifp, 0, SEEK_SET);
parse_tiff_ifd(-sony_offset);
fclose(ifp);
}
ifp = sfp;
#else
if (!ifp->tempbuffer_open(buf, sony_length))
{
parse_tiff_ifd(-sony_offset);
ifp->tempbuffer_close();
}
#endif
free(buf);
}
for (i = 0; i < colors; i++)
FORCC cc[i][c] *= ab[i];
if (use_cm)
{
FORCC for (i = 0; i < 3; i++) for (cam_xyz[c][i] = j = 0; j < colors; j++) cam_xyz[c][i] +=
cc[c][j] * cm[j][i] * xyz[i];
cam_xyz_coeff(cmatrix, cam_xyz);
}
if (asn[0])
{
cam_mul[3] = 0;
FORCC
if(fabs(asn[c])>0.0001)
cam_mul[c] = 1 / asn[c];
}
if (!use_cm)
FORCC if(fabs(cc[c][c])>0.0001) pre_mul[c] /= cc[c][c];
return 0;
}
int CLASS parse_tiff(int base)
{
int doff;
fseek(ifp, base, SEEK_SET);
order = get2();
if (order != 0x4949 && order != 0x4d4d)
return 0;
get2();
while ((doff = get4()))
{
fseek(ifp, doff + base, SEEK_SET);
if (parse_tiff_ifd(base))
break;
}
return 1;
}
void CLASS apply_tiff()
{
int max_samp = 0, ties = 0, raw = -1, thm = -1, i;
unsigned long long ns, os;
struct jhead jh;
thumb_misc = 16;
if (thumb_offset)
{
fseek(ifp, thumb_offset, SEEK_SET);
if (ljpeg_start(&jh, 1))
{
if ((unsigned)jh.bits < 17 && (unsigned)jh.wide < 0x10000 && (unsigned)jh.high < 0x10000)
{
thumb_misc = jh.bits;
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
}
for (i = tiff_nifds; i--;)
{
if (tiff_ifd[i].t_shutter)
shutter = tiff_ifd[i].t_shutter;
tiff_ifd[i].t_shutter = shutter;
}
for (i = 0; i < tiff_nifds; i++)
{
if( tiff_ifd[i].t_width < 1 || tiff_ifd[i].t_width > 65535
|| tiff_ifd[i].t_height < 1 || tiff_ifd[i].t_height > 65535)
continue; /* wrong image dimensions */
if (max_samp < tiff_ifd[i].samples)
max_samp = tiff_ifd[i].samples;
if (max_samp > 3)
max_samp = 3;
os = raw_width * raw_height;
ns = tiff_ifd[i].t_width * tiff_ifd[i].t_height;
if (tiff_bps)
{
os *= tiff_bps;
ns *= tiff_ifd[i].bps;
}
if ((tiff_ifd[i].comp != 6 || tiff_ifd[i].samples != 3) &&
unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 && (unsigned)tiff_ifd[i].bps < 33 &&
(unsigned)tiff_ifd[i].samples < 13 && ns && ((ns > os && (ties = 1)) || (ns == os && shot_select == ties++)))
{
raw_width = tiff_ifd[i].t_width;
raw_height = tiff_ifd[i].t_height;
tiff_bps = tiff_ifd[i].bps;
tiff_compress = tiff_ifd[i].comp;
data_offset = tiff_ifd[i].offset;
#ifdef LIBRAW_LIBRARY_BUILD
data_size = tiff_ifd[i].bytes;
#endif
tiff_flip = tiff_ifd[i].t_flip;
tiff_samples = tiff_ifd[i].samples;
tile_width = tiff_ifd[i].t_tile_width;
tile_length = tiff_ifd[i].t_tile_length;
shutter = tiff_ifd[i].t_shutter;
raw = i;
}
}
if (is_raw == 1 && ties)
is_raw = ties;
if (!tile_width)
tile_width = INT_MAX;
if (!tile_length)
tile_length = INT_MAX;
for (i = tiff_nifds; i--;)
if (tiff_ifd[i].t_flip)
tiff_flip = tiff_ifd[i].t_flip;
if (raw >= 0 && !load_raw)
switch (tiff_compress)
{
case 32767:
#ifdef LIBRAW_LIBRARY_BUILD
if (!dng_version && INT64(tiff_ifd[raw].bytes) == INT64(raw_width) * INT64(raw_height))
#else
if (tiff_ifd[raw].bytes == raw_width * raw_height)
#endif
{
tiff_bps = 14;
load_raw = &CLASS sony_arw2_load_raw;
break;
}
#ifdef LIBRAW_LIBRARY_BUILD
if (!dng_version && !strncasecmp(make, "Sony", 4) && INT64(tiff_ifd[raw].bytes) == INT64(raw_width) * INT64(raw_height) * 2ULL)
#else
if (!strncasecmp(make, "Sony", 4) && tiff_ifd[raw].bytes == raw_width * raw_height * 2)
#endif
{
tiff_bps = 14;
load_raw = &CLASS unpacked_load_raw;
break;
}
#ifdef LIBRAW_LIBRARY_BUILD
if (INT64(tiff_ifd[raw].bytes) * 8ULL != INT64(raw_width) * INT64(raw_height) * INT64(tiff_bps))
#else
if (tiff_ifd[raw].bytes * 8 != raw_width * raw_height * tiff_bps)
#endif
{
raw_height += 8;
load_raw = &CLASS sony_arw_load_raw;
break;
}
load_flags = 79;
case 32769:
load_flags++;
case 32770:
case 32773:
goto slr;
case 0:
case 1:
#ifdef LIBRAW_LIBRARY_BUILD
// Sony 14-bit uncompressed
if (!dng_version && !strncasecmp(make, "Sony", 4) && INT64(tiff_ifd[raw].bytes) == INT64(raw_width) * INT64(raw_height) * 2ULL)
{
tiff_bps = 14;
load_raw = &CLASS unpacked_load_raw;
break;
}
if (!dng_version && !strncasecmp(make, "Sony", 4) && tiff_ifd[raw].samples == 4 &&
INT64(tiff_ifd[raw].bytes) == INT64(raw_width) * INT64(raw_height) * 8ULL) // Sony ARQ
{
tiff_bps = 14;
tiff_samples = 4;
load_raw = &CLASS sony_arq_load_raw;
filters = 0;
strcpy(cdesc, "RGBG");
break;
}
if (!strncasecmp(make, "Nikon", 5) && !strncmp(software, "Nikon Scan", 10))
{
load_raw = &CLASS nikon_coolscan_load_raw;
raw_color = 1;
filters = 0;
break;
}
if (!strncmp(make, "OLYMPUS", 7) && INT64(tiff_ifd[raw].bytes) * 2ULL == INT64(raw_width) * INT64(raw_height) * 3ULL)
#else
if (!strncmp(make, "OLYMPUS", 7) && tiff_ifd[raw].bytes * 2 == raw_width * raw_height * 3)
#endif
load_flags = 24;
#ifdef LIBRAW_LIBRARY_BUILD
if (!dng_version && INT64(tiff_ifd[raw].bytes) * 5ULL == INT64(raw_width) * INT64(raw_height) * 8ULL)
#else
if (tiff_ifd[raw].bytes * 5 == raw_width * raw_height * 8)
#endif
{
load_flags = 81;
tiff_bps = 12;
}
slr:
switch (tiff_bps)
{
case 8:
load_raw = &CLASS eight_bit_load_raw;
break;
case 12:
if (tiff_ifd[raw].phint == 2)
load_flags = 6;
load_raw = &CLASS packed_load_raw;
break;
case 14:
load_flags = 0;
case 16:
load_raw = &CLASS unpacked_load_raw;
#ifdef LIBRAW_LIBRARY_BUILD
if (!strncmp(make, "OLYMPUS", 7) && INT64(tiff_ifd[raw].bytes) * 7ULL > INT64(raw_width) * INT64(raw_height))
#else
if (!strncmp(make, "OLYMPUS", 7) && tiff_ifd[raw].bytes * 7 > raw_width * raw_height)
#endif
load_raw = &CLASS olympus_load_raw;
}
break;
case 6:
case 7:
case 99:
load_raw = &CLASS lossless_jpeg_load_raw;
break;
case 262:
load_raw = &CLASS kodak_262_load_raw;
break;
case 34713:
#ifdef LIBRAW_LIBRARY_BUILD
if ((INT64(raw_width) + 9ULL) / 10ULL * 16ULL * INT64(raw_height) == INT64(tiff_ifd[raw].bytes))
#else
if ((raw_width + 9) / 10 * 16 * raw_height == tiff_ifd[raw].bytes)
#endif
{
load_raw = &CLASS packed_load_raw;
load_flags = 1;
}
#ifdef LIBRAW_LIBRARY_BUILD
else if (INT64(raw_width) * INT64(raw_height) * 3ULL == INT64(tiff_ifd[raw].bytes) * 2ULL)
#else
else if (raw_width * raw_height * 3 == tiff_ifd[raw].bytes * 2)
#endif
{
load_raw = &CLASS packed_load_raw;
if (model[0] == 'N')
load_flags = 80;
}
#ifdef LIBRAW_LIBRARY_BUILD
else if (INT64(raw_width) * INT64(raw_height) * 3ULL == INT64(tiff_ifd[raw].bytes))
#else
else if (raw_width * raw_height * 3 == tiff_ifd[raw].bytes)
#endif
{
load_raw = &CLASS nikon_yuv_load_raw;
gamma_curve(1 / 2.4, 12.92, 1, 4095);
memset(cblack, 0, sizeof cblack);
filters = 0;
}
#ifdef LIBRAW_LIBRARY_BUILD
else if (INT64(raw_width) * INT64(raw_height) * 2ULL == INT64(tiff_ifd[raw].bytes))
#else
else if (raw_width * raw_height * 2 == tiff_ifd[raw].bytes)
#endif
{
load_raw = &CLASS unpacked_load_raw;
load_flags = 4;
order = 0x4d4d;
}
else
#ifdef LIBRAW_LIBRARY_BUILD
if (INT64(raw_width) * INT64(raw_height) * 3ULL == INT64(tiff_ifd[raw].bytes) * 2ULL)
{
load_raw = &CLASS packed_load_raw;
load_flags = 80;
}
else if (tiff_ifd[raw].rows_per_strip && tiff_ifd[raw].strip_offsets_count &&
tiff_ifd[raw].strip_offsets_count == tiff_ifd[raw].strip_byte_counts_count)
{
int fit = 1;
for (int i = 0; i < tiff_ifd[raw].strip_byte_counts_count - 1; i++) // all but last
if (INT64(tiff_ifd[raw].strip_byte_counts[i]) * 2ULL != INT64(tiff_ifd[raw].rows_per_strip) * INT64(raw_width) * 3ULL)
{
fit = 0;
break;
}
if (fit)
load_raw = &CLASS nikon_load_striped_packed_raw;
else
load_raw = &CLASS nikon_load_raw; // fallback
}
else
#endif
load_raw = &CLASS nikon_load_raw;
break;
case 65535:
load_raw = &CLASS pentax_load_raw;
break;
case 65000:
switch (tiff_ifd[raw].phint)
{
case 2:
load_raw = &CLASS kodak_rgb_load_raw;
filters = 0;
break;
case 6:
load_raw = &CLASS kodak_ycbcr_load_raw;
filters = 0;
break;
case 32803:
load_raw = &CLASS kodak_65000_load_raw;
}
case 32867:
case 34892:
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 8:
break;
#endif
default:
is_raw = 0;
}
if (!dng_version)
if (((tiff_samples == 3 && tiff_ifd[raw].bytes && tiff_bps != 14 && (tiff_compress & -16) != 32768) ||
(tiff_bps == 8 && strncmp(make, "Phase", 5) && strncmp(make, "Leaf", 4) && !strcasestr(make, "Kodak") &&
!strstr(model2, "DEBUG RAW"))) &&
strncmp(software, "Nikon Scan", 10))
is_raw = 0;
for (i = 0; i < tiff_nifds; i++)
if (i != raw &&
(tiff_ifd[i].samples == max_samp || (tiff_ifd[i].comp == 7 && tiff_ifd[i].samples == 1)) /* Allow 1-bps JPEGs */
&& tiff_ifd[i].bps > 0 && tiff_ifd[i].bps < 33 && tiff_ifd[i].phint != 32803 && tiff_ifd[i].phint != 34892 &&
unsigned(tiff_ifd[i].t_width | tiff_ifd[i].t_height) < 0x10000 &&
tiff_ifd[i].t_width * tiff_ifd[i].t_height / (SQR(tiff_ifd[i].bps) + 1) >
thumb_width * thumb_height / (SQR(thumb_misc) + 1) &&
tiff_ifd[i].comp != 34892)
{
thumb_width = tiff_ifd[i].t_width;
thumb_height = tiff_ifd[i].t_height;
thumb_offset = tiff_ifd[i].offset;
thumb_length = tiff_ifd[i].bytes;
thumb_misc = tiff_ifd[i].bps;
thm = i;
}
if (thm >= 0)
{
thumb_misc |= tiff_ifd[thm].samples << 5;
switch (tiff_ifd[thm].comp)
{
case 0:
write_thumb = &CLASS layer_thumb;
break;
case 1:
if (tiff_ifd[thm].bps <= 8)
write_thumb = &CLASS ppm_thumb;
else if (!strncmp(make, "Imacon", 6))
write_thumb = &CLASS ppm16_thumb;
else
thumb_load_raw = &CLASS kodak_thumb_load_raw;
break;
case 65000:
thumb_load_raw = tiff_ifd[thm].phint == 6 ? &CLASS kodak_ycbcr_load_raw : &CLASS kodak_rgb_load_raw;
}
}
}
void CLASS parse_minolta(int base)
{
int save, tag, len, offset, high = 0, wide = 0, i, c;
short sorder = order;
fseek(ifp, base, SEEK_SET);
if (fgetc(ifp) || fgetc(ifp) - 'M' || fgetc(ifp) - 'R')
return;
order = fgetc(ifp) * 0x101;
offset = base + get4() + 8;
#ifdef LIBRAW_LIBRARY_BUILD
if(offset>ifp->size()-8) // At least 8 bytes for tag/len
offset = ifp->size()-8;
#endif
while ((save = ftell(ifp)) < offset)
{
for (tag = i = 0; i < 4; i++)
tag = tag << 8 | fgetc(ifp);
len = get4();
if(len < 0)
return; // just ignore wrong len?? or raise bad file exception?
switch (tag)
{
case 0x505244: /* PRD */
fseek(ifp, 8, SEEK_CUR);
high = get2();
wide = get2();
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 0x524946: /* RIF */
if (!strncasecmp(model, "DSLR-A100", 9))
{
fseek(ifp, 8, SEEK_CUR);
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][2] = get2();
get4();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][0] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][2] = get2();
imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Daylight][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Cloudy][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][1] = imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][1] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][1] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_W][3] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][1] =
imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][3] = 0x100;
}
break;
#endif
case 0x574247: /* WBG */
get4();
i = strcmp(model, "DiMAGE A200") ? 0 : 3;
FORC4 cam_mul[c ^ (c >> 1) ^ i] = get2();
break;
case 0x545457: /* TTW */
parse_tiff(ftell(ifp));
data_offset = offset;
}
fseek(ifp, save + len + 8, SEEK_SET);
}
raw_height = high;
raw_width = wide;
order = sorder;
}
/*
Many cameras have a "debug mode" that writes JPEG and raw
at the same time. The raw file has no header, so try to
to open the matching JPEG file and read its metadata.
*/
void CLASS parse_external_jpeg()
{
const char *file, *ext;
char *jname, *jfile, *jext;
#ifndef LIBRAW_LIBRARY_BUILD
FILE *save = ifp;
#else
#if defined(_WIN32) && !defined(__MINGW32__) && defined(_MSC_VER) && (_MSC_VER > 1310)
if (ifp->wfname())
{
std::wstring rawfile(ifp->wfname());
rawfile.replace(rawfile.length() - 3, 3, L"JPG");
if (!ifp->subfile_open(rawfile.c_str()))
{
parse_tiff(12);
thumb_offset = 0;
is_raw = 1;
ifp->subfile_close();
}
else
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA;
return;
}
#endif
if (!ifp->fname())
{
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA;
return;
}
#endif
ext = strrchr(ifname, '.');
file = strrchr(ifname, '/');
if (!file)
file = strrchr(ifname, '\\');
#ifndef LIBRAW_LIBRARY_BUILD
if (!file)
file = ifname - 1;
#else
if (!file)
file = (char *)ifname - 1;
#endif
file++;
if (!ext || strlen(ext) != 4 || ext - file != 8)
return;
jname = (char *)malloc(strlen(ifname) + 1);
merror(jname, "parse_external_jpeg()");
strcpy(jname, ifname);
jfile = file - ifname + jname;
jext = ext - ifname + jname;
if (strcasecmp(ext, ".jpg"))
{
strcpy(jext, isupper(ext[1]) ? ".JPG" : ".jpg");
if (isdigit(*file))
{
memcpy(jfile, file + 4, 4);
memcpy(jfile + 4, file, 4);
}
}
else
while (isdigit(*--jext))
{
if (*jext != '9')
{
(*jext)++;
break;
}
*jext = '0';
}
#ifndef LIBRAW_LIBRARY_BUILD
if (strcmp(jname, ifname))
{
if ((ifp = fopen(jname, "rb")))
{
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Reading metadata from %s ...\n"), jname);
#endif
parse_tiff(12);
thumb_offset = 0;
is_raw = 1;
fclose(ifp);
}
}
#else
if (strcmp(jname, ifname))
{
if (!ifp->subfile_open(jname))
{
parse_tiff(12);
thumb_offset = 0;
is_raw = 1;
ifp->subfile_close();
}
else
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA;
}
#endif
if (!timestamp)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_METADATA;
#endif
#ifdef DCRAW_VERBOSE
fprintf(stderr, _("Failed to read metadata from %s\n"), jname);
#endif
}
free(jname);
#ifndef LIBRAW_LIBRARY_BUILD
ifp = save;
#endif
}
/*
CIFF block 0x1030 contains an 8x8 white sample.
Load this into white[][] for use in scale_colors().
*/
void CLASS ciff_block_1030()
{
static const ushort key[] = {0x410, 0x45f3};
int i, bpp, row, col, vbits = 0;
unsigned long bitbuf = 0;
if ((get2(), get4()) != 0x80008 || !get4())
return;
bpp = get2();
if (bpp != 10 && bpp != 12)
return;
for (i = row = 0; row < 8; row++)
for (col = 0; col < 8; col++)
{
if (vbits < bpp)
{
bitbuf = bitbuf << 16 | (get2() ^ key[i++ & 1]);
vbits += 16;
}
white[row][col] = bitbuf >> (vbits -= bpp) & ~(-1 << bpp);
}
}
/*
Parse a CIFF file, better known as Canon CRW format.
*/
void CLASS parse_ciff(int offset, int length, int depth)
{
int tboff, nrecs, c, type, len, save, wbi = -1;
ushort key[] = {0x410, 0x45f3};
fseek(ifp, offset + length - 4, SEEK_SET);
tboff = get4() + offset;
fseek(ifp, tboff, SEEK_SET);
nrecs = get2();
if ((nrecs | depth) > 127)
return;
while (nrecs--)
{
type = get2();
len = get4();
save = ftell(ifp) + 4;
fseek(ifp, offset + get4(), SEEK_SET);
if ((((type >> 8) + 8) | 8) == 0x38)
{
parse_ciff(ftell(ifp), len, depth + 1); /* Parse a sub-table */
}
#ifdef LIBRAW_LIBRARY_BUILD
if (type == 0x3004)
parse_ciff(ftell(ifp), len, depth + 1);
#endif
if (type == 0x0810)
fread(artist, 64, 1, ifp);
if (type == 0x080a)
{
fread(make, 64, 1, ifp);
fseek(ifp, strbuflen(make) - 63, SEEK_CUR);
fread(model, 64, 1, ifp);
}
if (type == 0x1810)
{
width = get4();
height = get4();
pixel_aspect = int_to_float(get4());
flip = get4();
}
if (type == 0x1835) /* Get the decoder table */
tiff_compress = get4();
if (type == 0x2007)
{
thumb_offset = ftell(ifp);
thumb_length = len;
}
if (type == 0x1818)
{
shutter = libraw_powf64l(2.0f, -int_to_float((get4(), get4())));
aperture = libraw_powf64l(2.0f, int_to_float(get4()) / 2);
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CurAp = aperture;
#endif
}
if (type == 0x102a)
{
// iso_speed = pow (2.0, (get4(),get2())/32.0 - 4) * 50;
iso_speed = libraw_powf64l(2.0f, ((get2(), get2()) + get2()) / 32.0f - 5.0f) * 100.0f;
#ifdef LIBRAW_LIBRARY_BUILD
aperture = _CanonConvertAperture((get2(), get2()));
imgdata.lens.makernotes.CurAp = aperture;
#else
aperture = libraw_powf64l(2.0, (get2(), (short)get2()) / 64.0);
#endif
shutter = libraw_powf64l(2.0, -((short)get2()) / 32.0);
wbi = (get2(), get2());
if (wbi > 17)
wbi = 0;
fseek(ifp, 32, SEEK_CUR);
if (shutter > 1e6)
shutter = get2() / 10.0;
}
if (type == 0x102c)
{
if (get2() > 512)
{ /* Pro90, G1 */
fseek(ifp, 118, SEEK_CUR);
FORC4 cam_mul[c ^ 2] = get2();
}
else
{ /* G2, S30, S40 */
fseek(ifp, 98, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2();
}
}
#ifdef LIBRAW_LIBRARY_BUILD
if (type == 0x10a9)
{
INT64 o = ftell(ifp);
fseek(ifp, (0x1 << 1), SEEK_CUR);
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ (c >> 1)] = get2();
Canon_WBpresets(0, 0);
fseek(ifp, o, SEEK_SET);
}
if (type == 0x102d)
{
INT64 o = ftell(ifp);
Canon_CameraSettings();
fseek(ifp, o, SEEK_SET);
}
if (type == 0x580b)
{
if (strcmp(model, "Canon EOS D30"))
sprintf(imgdata.shootinginfo.BodySerial, "%d", len);
else
sprintf(imgdata.shootinginfo.BodySerial, "%0x-%05d", len >> 16, len & 0xffff);
}
#endif
if (type == 0x0032)
{
if (len == 768)
{ /* EOS D30 */
fseek(ifp, 72, SEEK_CUR);
FORC4
{
ushort q = get2();
cam_mul[c ^ (c >> 1)] = 1024.0/ MAX(1,q);
}
if (!wbi)
cam_mul[0] = -1; /* use my auto white balance */
}
else if (!cam_mul[0])
{
if (get2() == key[0]) /* Pro1, G6, S60, S70 */
c = (strstr(model, "Pro1") ? "012346000000000000" : "01345:000000006008")[LIM(0, wbi, 17)] - '0' + 2;
else
{ /* G3, G5, S45, S50 */
c = "023457000000006000"[LIM(0, wbi, 17)] - '0';
key[0] = key[1] = 0;
}
fseek(ifp, 78 + c * 8, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1) ^ 1] = get2() ^ key[c & 1];
if (!wbi)
cam_mul[0] = -1;
}
}
if (type == 0x10a9)
{ /* D60, 10D, 300D, and clones */
if (len > 66)
wbi = "0134567028"[LIM(0, wbi, 9)] - '0';
fseek(ifp, 2 + wbi * 8, SEEK_CUR);
FORC4 cam_mul[c ^ (c >> 1)] = get2();
}
if (type == 0x1030 && wbi >= 0 && (0x18040 >> wbi & 1))
ciff_block_1030(); /* all that don't have 0x10a9 */
if (type == 0x1031)
{
raw_width = (get2(), get2());
raw_height = get2();
}
if (type == 0x501c)
{
iso_speed = len & 0xffff;
}
if (type == 0x5029)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CurFocal = len >> 16;
imgdata.lens.makernotes.FocalType = len & 0xffff;
if (imgdata.lens.makernotes.FocalType == 2)
{
imgdata.lens.makernotes.CanonFocalUnits = 32;
if (imgdata.lens.makernotes.CanonFocalUnits > 1)
imgdata.lens.makernotes.CurFocal /= (float)imgdata.lens.makernotes.CanonFocalUnits;
}
focal_len = imgdata.lens.makernotes.CurFocal;
#else
focal_len = len >> 16;
if ((len & 0xffff) == 2)
focal_len /= 32;
#endif
}
if (type == 0x5813)
flash_used = int_to_float(len);
if (type == 0x5814)
canon_ev = int_to_float(len);
if (type == 0x5817)
shot_order = len;
if (type == 0x5834)
{
unique_id = len;
#ifdef LIBRAW_LIBRARY_BUILD
unique_id = setCanonBodyFeatures(unique_id);
#endif
}
if (type == 0x580e)
timestamp = len;
if (type == 0x180e)
timestamp = get4();
#ifdef LOCALTIME
if ((type | 0x4000) == 0x580e)
timestamp = mktime(gmtime(×tamp));
#endif
fseek(ifp, save, SEEK_SET);
}
}
void CLASS parse_rollei()
{
char line[128], *val;
struct tm t;
fseek(ifp, 0, SEEK_SET);
memset(&t, 0, sizeof t);
do
{
fgets(line, 128, ifp);
if ((val = strchr(line, '=')))
*val++ = 0;
else
val = line + strbuflen(line);
if (!strcmp(line, "DAT"))
sscanf(val, "%d.%d.%d", &t.tm_mday, &t.tm_mon, &t.tm_year);
if (!strcmp(line, "TIM"))
sscanf(val, "%d:%d:%d", &t.tm_hour, &t.tm_min, &t.tm_sec);
if (!strcmp(line, "HDR"))
thumb_offset = atoi(val);
if (!strcmp(line, "X "))
raw_width = atoi(val);
if (!strcmp(line, "Y "))
raw_height = atoi(val);
if (!strcmp(line, "TX "))
thumb_width = atoi(val);
if (!strcmp(line, "TY "))
thumb_height = atoi(val);
} while (strncmp(line, "EOHD", 4));
data_offset = thumb_offset + thumb_width * thumb_height * 2;
t.tm_year -= 1900;
t.tm_mon -= 1;
if (mktime(&t) > 0)
timestamp = mktime(&t);
strcpy(make, "Rollei");
strcpy(model, "d530flex");
write_thumb = &CLASS rollei_thumb;
}
void CLASS parse_sinar_ia()
{
int entries, off;
char str[8], *cp;
order = 0x4949;
fseek(ifp, 4, SEEK_SET);
entries = get4();
fseek(ifp, get4(), SEEK_SET);
while (entries--)
{
off = get4();
get4();
fread(str, 8, 1, ifp);
if (!strcmp(str, "META"))
meta_offset = off;
if (!strcmp(str, "THUMB"))
thumb_offset = off;
if (!strcmp(str, "RAW0"))
data_offset = off;
}
fseek(ifp, meta_offset + 20, SEEK_SET);
fread(make, 64, 1, ifp);
make[63] = 0;
if ((cp = strchr(make, ' ')))
{
strcpy(model, cp + 1);
*cp = 0;
}
raw_width = get2();
raw_height = get2();
load_raw = &CLASS unpacked_load_raw;
thumb_width = (get4(), get2());
thumb_height = get2();
write_thumb = &CLASS ppm_thumb;
maximum = 0x3fff;
}
void CLASS parse_phase_one(int base)
{
unsigned entries, tag, type, len, data, save, i, c;
float romm_cam[3][3];
char *cp;
memset(&ph1, 0, sizeof ph1);
fseek(ifp, base, SEEK_SET);
order = get4() & 0xffff;
if (get4() >> 8 != 0x526177)
return; /* "Raw" */
fseek(ifp, get4() + base, SEEK_SET);
entries = get4();
get4();
while (entries--)
{
tag = get4();
type = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek(ifp, base + data, SEEK_SET);
switch (tag)
{
#ifdef LIBRAW_LIBRARY_BUILD
case 0x0102:
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
if ((imgdata.shootinginfo.BodySerial[0] == 0x4c) && (imgdata.shootinginfo.BodySerial[1] == 0x49))
{
unique_id =
(((imgdata.shootinginfo.BodySerial[0] & 0x3f) << 5) | (imgdata.shootinginfo.BodySerial[2] & 0x3f)) - 0x41;
}
else
{
unique_id =
(((imgdata.shootinginfo.BodySerial[0] & 0x3f) << 5) | (imgdata.shootinginfo.BodySerial[1] & 0x3f)) - 0x41;
}
setPhaseOneFeatures(unique_id);
break;
case 0x0211:
imgdata.other.SensorTemperature2 = int_to_float(data);
break;
case 0x0401:
if (type == 4)
imgdata.lens.makernotes.CurAp = libraw_powf64l(2.0f, (int_to_float(data) / 2.0f));
else
imgdata.lens.makernotes.CurAp = libraw_powf64l(2.0f, (getreal(type) / 2.0f));
break;
case 0x0403:
if (type == 4)
imgdata.lens.makernotes.CurFocal = int_to_float(data);
else
imgdata.lens.makernotes.CurFocal = getreal(type);
break;
case 0x0410:
stmread(imgdata.lens.makernotes.body, len, ifp);
break;
case 0x0412:
stmread(imgdata.lens.makernotes.Lens, len, ifp);
break;
case 0x0414:
if (type == 4)
{
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64l(2.0f, (int_to_float(data) / 2.0f));
}
else
{
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64l(2.0f, (getreal(type) / 2.0f));
}
break;
case 0x0415:
if (type == 4)
{
imgdata.lens.makernotes.MinAp4CurFocal = libraw_powf64l(2.0f, (int_to_float(data) / 2.0f));
}
else
{
imgdata.lens.makernotes.MinAp4CurFocal = libraw_powf64l(2.0f, (getreal(type) / 2.0f));
}
break;
case 0x0416:
if (type == 4)
{
imgdata.lens.makernotes.MinFocal = int_to_float(data);
}
else
{
imgdata.lens.makernotes.MinFocal = getreal(type);
}
if (imgdata.lens.makernotes.MinFocal > 1000.0f)
{
imgdata.lens.makernotes.MinFocal = 0.0f;
}
break;
case 0x0417:
if (type == 4)
{
imgdata.lens.makernotes.MaxFocal = int_to_float(data);
}
else
{
imgdata.lens.makernotes.MaxFocal = getreal(type);
}
break;
#endif
case 0x100:
flip = "0653"[data & 3] - '0';
break;
case 0x106:
for (i = 0; i < 9; i++)
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.color.P1_color[0].romm_cam[i] =
#endif
((float *)romm_cam)[i] = getreal(11);
romm_coeff(romm_cam);
break;
case 0x107:
FORC3 cam_mul[c] = getreal(11);
break;
case 0x108:
raw_width = data;
break;
case 0x109:
raw_height = data;
break;
case 0x10a:
left_margin = data;
break;
case 0x10b:
top_margin = data;
break;
case 0x10c:
width = data;
break;
case 0x10d:
height = data;
break;
case 0x10e:
ph1.format = data;
break;
case 0x10f:
data_offset = data + base;
break;
case 0x110:
meta_offset = data + base;
meta_length = len;
break;
case 0x112:
ph1.key_off = save - 4;
break;
case 0x210:
ph1.tag_210 = int_to_float(data);
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.other.SensorTemperature = ph1.tag_210;
#endif
break;
case 0x21a:
ph1.tag_21a = data;
break;
case 0x21c:
strip_offset = data + base;
break;
case 0x21d:
ph1.t_black = data;
break;
case 0x222:
ph1.split_col = data;
break;
case 0x223:
ph1.black_col = data + base;
break;
case 0x224:
ph1.split_row = data;
break;
case 0x225:
ph1.black_row = data + base;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 0x226:
for (i = 0; i < 9; i++)
imgdata.color.P1_color[1].romm_cam[i] = getreal(11);
break;
#endif
case 0x301:
model[63] = 0;
fread(model, 1, 63, ifp);
if ((cp = strstr(model, " camera")))
*cp = 0;
}
fseek(ifp, save, SEEK_SET);
}
#ifdef LIBRAW_LIBRARY_BUILD
if (!imgdata.lens.makernotes.body[0] && !imgdata.shootinginfo.BodySerial[0])
{
fseek(ifp, meta_offset, SEEK_SET);
order = get2();
fseek(ifp, 6, SEEK_CUR);
fseek(ifp, meta_offset + get4(), SEEK_SET);
entries = get4();
get4();
while (entries--)
{
tag = get4();
len = get4();
data = get4();
save = ftell(ifp);
fseek(ifp, meta_offset + data, SEEK_SET);
if (tag == 0x0407)
{
stmread(imgdata.shootinginfo.BodySerial, len, ifp);
if ((imgdata.shootinginfo.BodySerial[0] == 0x4c) && (imgdata.shootinginfo.BodySerial[1] == 0x49))
{
unique_id =
(((imgdata.shootinginfo.BodySerial[0] & 0x3f) << 5) | (imgdata.shootinginfo.BodySerial[2] & 0x3f)) - 0x41;
}
else
{
unique_id =
(((imgdata.shootinginfo.BodySerial[0] & 0x3f) << 5) | (imgdata.shootinginfo.BodySerial[1] & 0x3f)) - 0x41;
}
setPhaseOneFeatures(unique_id);
}
fseek(ifp, save, SEEK_SET);
}
}
#endif
load_raw = ph1.format < 3 ? &CLASS phase_one_load_raw : &CLASS phase_one_load_raw_c;
maximum = 0xffff;
strcpy(make, "Phase One");
if (model[0])
return;
switch (raw_height)
{
case 2060:
strcpy(model, "LightPhase");
break;
case 2682:
strcpy(model, "H 10");
break;
case 4128:
strcpy(model, "H 20");
break;
case 5488:
strcpy(model, "H 25");
break;
}
}
void CLASS parse_fuji(int offset)
{
unsigned entries, tag, len, save, c;
fseek(ifp, offset, SEEK_SET);
entries = get4();
if (entries > 255)
return;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_PARSEFUJI_PROCESSED;
#endif
while (entries--)
{
tag = get2();
len = get2();
save = ftell(ifp);
if (tag == 0x100)
{
raw_height = get2();
raw_width = get2();
}
else if (tag == 0x121)
{
height = get2();
if ((width = get2()) == 4284)
width += 3;
}
else if (tag == 0x130)
{
fuji_layout = fgetc(ifp) >> 7;
fuji_width = !(fgetc(ifp) & 8);
}
else if (tag == 0x131)
{
filters = 9;
FORC(36)
{
int q = fgetc(ifp);
xtrans_abs[0][35 - c] = MAX(0, MIN(q, 2)); /* & 3;*/
}
}
else if (tag == 0x2ff0)
{
FORC4 cam_mul[c ^ 1] = get2();
// IB start
#ifdef LIBRAW_LIBRARY_BUILD
}
else if (tag == 0x110)
{
imgdata.sizes.raw_crop.ctop = get2();
imgdata.sizes.raw_crop.cleft = get2();
}
else if (tag == 0x111)
{
imgdata.sizes.raw_crop.cheight = get2();
imgdata.sizes.raw_crop.cwidth = get2();
}
else if ((tag == 0x122) && !strcmp(model, "DBP for GX680"))
{
int k = get2();
int l = get2(); /* margins? */
int m = get2(); /* margins? */
int n = get2();
// printf ("==>>0x122: height= %d l= %d m= %d width= %d\n", k, l, m, n);
}
else if (tag == 0x9650)
{
short a = (short)get2();
float b = fMAX(1.0f, get2());
imgdata.makernotes.fuji.FujiExpoMidPointShift = a / b;
}
else if (tag == 0x2f00)
{
int nWBs = get4();
nWBs = MIN(nWBs, 6);
for (int wb_ind = 0; wb_ind < nWBs; wb_ind++)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Custom1 + wb_ind][c ^ 1] = get2();
fseek(ifp, 8, SEEK_CUR);
}
}
else if (tag == 0x2000)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Auto][c ^ 1] = get2();
}
else if (tag == 0x2100)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FineWeather][c ^ 1] = get2();
}
else if (tag == 0x2200)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Shade][c ^ 1] = get2();
}
else if (tag == 0x2300)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_D][c ^ 1] = get2();
}
else if (tag == 0x2301)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_N][c ^ 1] = get2();
}
else if (tag == 0x2302)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_WW][c ^ 1] = get2();
}
else if (tag == 0x2310)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_FL_L][c ^ 1] = get2();
}
else if (tag == 0x2400)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Tungsten][c ^ 1] = get2();
}
else if (tag == 0x2410)
{
FORC4 imgdata.color.WB_Coeffs[LIBRAW_WBI_Flash][c ^ 1] = get2();
#endif
// IB end
}
else if (tag == 0xc000)
/* 0xc000 tag versions, second ushort; valid if the first ushort is 0
X100F 0x0259
X100T 0x0153
X-E2 0x014f 0x024f depends on firmware
X-A1 0x014e
XQ2 0x0150
XQ1 0x0150
X100S 0x0149 0x0249 depends on firmware
X30 0x0152
X20 0x0146
X-T10 0x0154
X-T2 0x0258
X-M1 0x014d
X-E2s 0x0355
X-A2 0x014e
X-T20 0x025b
GFX 50S 0x025a
X-T1 0x0151 0x0251 0x0351 depends on firmware
X70 0x0155
X-Pro2 0x0255
*/
{
c = order;
order = 0x4949;
if ((tag = get4()) > 10000)
tag = get4();
if (tag > 10000)
tag = get4();
width = tag;
height = get4();
#ifdef LIBRAW_LIBRARY_BUILD
if (!strcmp(model, "X-A3") ||
!strcmp(model, "X-A10") ||
!strcmp(model, "X-A5") ||
!strcmp(model, "X-A20"))
{
int wb[4];
int nWB, tWB, pWB;
int iCCT = 0;
int cnt;
fseek(ifp, save + 0x200, SEEK_SET);
for (int wb_ind = 0; wb_ind < 42; wb_ind++)
{
nWB = get4();
tWB = get4();
wb[0] = get4() << 1;
wb[1] = get4();
wb[3] = get4();
wb[2] = get4() << 1;
if (tWB && (iCCT < 255))
{
imgdata.color.WBCT_Coeffs[iCCT][0] = tWB;
for (cnt = 0; cnt < 4; cnt++)
imgdata.color.WBCT_Coeffs[iCCT][cnt + 1] = wb[cnt];
iCCT++;
}
if (nWB != 70)
{
for (pWB = 1; pWB < nFuji_wb_list2; pWB += 2)
{
if (Fuji_wb_list2[pWB] == nWB)
{
for (cnt = 0; cnt < 4; cnt++)
imgdata.color.WB_Coeffs[Fuji_wb_list2[pWB - 1]][cnt] = wb[cnt];
break;
}
}
}
}
}
else
{
libraw_internal_data.unpacker_data.posRAFData = save;
libraw_internal_data.unpacker_data.lenRAFData = (len >> 1);
}
#endif
order = c;
}
fseek(ifp, save + len, SEEK_SET);
}
height <<= fuji_layout;
width >>= fuji_layout;
}
int CLASS parse_jpeg(int offset)
{
int len, save, hlen, mark;
fseek(ifp, offset, SEEK_SET);
if (fgetc(ifp) != 0xff || fgetc(ifp) != 0xd8)
return 0;
while (fgetc(ifp) == 0xff && (mark = fgetc(ifp)) != 0xda)
{
order = 0x4d4d;
len = get2() - 2;
save = ftell(ifp);
if (mark == 0xc0 || mark == 0xc3 || mark == 0xc9)
{
fgetc(ifp);
raw_height = get2();
raw_width = get2();
}
order = get2();
hlen = get4();
if (get4() == 0x48454150
#ifdef LIBRAW_LIBRARY_BUILD
&& (save + hlen) >= 0 && (save + hlen) <= ifp->size()
#endif
) /* "HEAP" */
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
#endif
parse_ciff(save + hlen, len - hlen, 0);
}
if (parse_tiff(save + 6))
apply_tiff();
fseek(ifp, save + len, SEEK_SET);
}
return 1;
}
void CLASS parse_riff()
{
unsigned i, size, end;
char tag[4], date[64], month[64];
static const char mon[12][4] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
struct tm t;
order = 0x4949;
fread(tag, 4, 1, ifp);
size = get4();
end = ftell(ifp) + size;
if (!memcmp(tag, "RIFF", 4) || !memcmp(tag, "LIST", 4))
{
int maxloop = 1000;
get4();
while (ftell(ifp) + 7 < end && !feof(ifp) && maxloop--)
parse_riff();
}
else if (!memcmp(tag, "nctg", 4))
{
while (ftell(ifp) + 7 < end)
{
i = get2();
size = get2();
if ((i + 1) >> 1 == 10 && size == 20)
get_timestamp(0);
else
fseek(ifp, size, SEEK_CUR);
}
}
else if (!memcmp(tag, "IDIT", 4) && size < 64)
{
fread(date, 64, 1, ifp);
date[size] = 0;
memset(&t, 0, sizeof t);
if (sscanf(date, "%*s %s %d %d:%d:%d %d", month, &t.tm_mday, &t.tm_hour, &t.tm_min, &t.tm_sec, &t.tm_year) == 6)
{
for (i = 0; i < 12 && strcasecmp(mon[i], month); i++)
;
t.tm_mon = i;
t.tm_year -= 1900;
if (mktime(&t) > 0)
timestamp = mktime(&t);
}
}
else
fseek(ifp, size, SEEK_CUR);
}
void CLASS parse_qt(int end)
{
unsigned save, size;
char tag[4];
order = 0x4d4d;
while (ftell(ifp) + 7 < end)
{
save = ftell(ifp);
if ((size = get4()) < 8)
return;
if ((int)size < 0) return; // 2+GB is too much
if (save + size < save) return; // 32bit overflow
fread(tag, 4, 1, ifp);
if (!memcmp(tag, "moov", 4) || !memcmp(tag, "udta", 4) || !memcmp(tag, "CNTH", 4))
parse_qt(save + size);
if (!memcmp(tag, "CNDA", 4))
parse_jpeg(ftell(ifp));
fseek(ifp, save + size, SEEK_SET);
}
}
void CLASS parse_smal(int offset, int fsize)
{
int ver;
fseek(ifp, offset + 2, SEEK_SET);
order = 0x4949;
ver = fgetc(ifp);
if (ver == 6)
fseek(ifp, 5, SEEK_CUR);
if (get4() != fsize)
return;
if (ver > 6)
data_offset = get4();
raw_height = height = get2();
raw_width = width = get2();
strcpy(make, "SMaL");
sprintf(model, "v%d %dx%d", ver, width, height);
if (ver == 6)
load_raw = &CLASS smal_v6_load_raw;
if (ver == 9)
load_raw = &CLASS smal_v9_load_raw;
}
void CLASS parse_cine()
{
unsigned off_head, off_setup, off_image, i;
order = 0x4949;
fseek(ifp, 4, SEEK_SET);
is_raw = get2() == 2;
fseek(ifp, 14, SEEK_CUR);
is_raw *= get4();
off_head = get4();
off_setup = get4();
off_image = get4();
timestamp = get4();
if ((i = get4()))
timestamp = i;
fseek(ifp, off_head + 4, SEEK_SET);
raw_width = get4();
raw_height = get4();
switch (get2(), get2())
{
case 8:
load_raw = &CLASS eight_bit_load_raw;
break;
case 16:
load_raw = &CLASS unpacked_load_raw;
}
fseek(ifp, off_setup + 792, SEEK_SET);
strcpy(make, "CINE");
sprintf(model, "%d", get4());
fseek(ifp, 12, SEEK_CUR);
switch ((i = get4()) & 0xffffff)
{
case 3:
filters = 0x94949494;
break;
case 4:
filters = 0x49494949;
break;
default:
is_raw = 0;
}
fseek(ifp, 72, SEEK_CUR);
switch ((get4() + 3600) % 360)
{
case 270:
flip = 4;
break;
case 180:
flip = 1;
break;
case 90:
flip = 7;
break;
case 0:
flip = 2;
}
cam_mul[0] = getreal(11);
cam_mul[2] = getreal(11);
maximum = ~((~0u) << get4());
fseek(ifp, 668, SEEK_CUR);
shutter = get4() / 1000000000.0;
fseek(ifp, off_image, SEEK_SET);
if (shot_select < is_raw)
fseek(ifp, shot_select * 8, SEEK_CUR);
data_offset = (INT64)get4() + 8;
data_offset += (INT64)get4() << 32;
}
void CLASS parse_redcine()
{
unsigned i, len, rdvo;
order = 0x4d4d;
is_raw = 0;
fseek(ifp, 52, SEEK_SET);
width = get4();
height = get4();
fseek(ifp, 0, SEEK_END);
fseek(ifp, -(i = ftello(ifp) & 511), SEEK_CUR);
if (get4() != i || get4() != 0x52454f42)
{
#ifdef DCRAW_VERBOSE
fprintf(stderr, _("%s: Tail is missing, parsing from head...\n"), ifname);
#endif
fseek(ifp, 0, SEEK_SET);
while ((len = get4()) != EOF)
{
if (get4() == 0x52454456)
if (is_raw++ == shot_select)
data_offset = ftello(ifp) - 8;
fseek(ifp, len - 8, SEEK_CUR);
}
}
else
{
rdvo = get4();
fseek(ifp, 12, SEEK_CUR);
is_raw = get4();
fseeko(ifp, rdvo + 8 + shot_select * 4, SEEK_SET);
data_offset = get4();
}
}
//@end COMMON
char *CLASS foveon_gets(int offset, char *str, int len)
{
int i;
fseek(ifp, offset, SEEK_SET);
for (i = 0; i < len - 1; i++)
if ((str[i] = get2()) == 0)
break;
str[i] = 0;
return str;
}
void CLASS parse_foveon()
{
int entries, img = 0, off, len, tag, save, i, wide, high, pent, poff[256][2];
char name[64], value[64];
order = 0x4949; /* Little-endian */
fseek(ifp, 36, SEEK_SET);
flip = get4();
fseek(ifp, -4, SEEK_END);
fseek(ifp, get4(), SEEK_SET);
if (get4() != 0x64434553)
return; /* SECd */
entries = (get4(), get4());
while (entries--)
{
off = get4();
len = get4();
tag = get4();
save = ftell(ifp);
fseek(ifp, off, SEEK_SET);
if (get4() != (0x20434553 | (tag << 24)))
return;
switch (tag)
{
case 0x47414d49: /* IMAG */
case 0x32414d49: /* IMA2 */
fseek(ifp, 8, SEEK_CUR);
pent = get4();
wide = get4();
high = get4();
if (wide > raw_width && high > raw_height)
{
switch (pent)
{
case 5:
load_flags = 1;
case 6:
load_raw = &CLASS foveon_sd_load_raw;
break;
case 30:
load_raw = &CLASS foveon_dp_load_raw;
break;
default:
load_raw = 0;
}
raw_width = wide;
raw_height = high;
data_offset = off + 28;
is_foveon = 1;
}
fseek(ifp, off + 28, SEEK_SET);
if (fgetc(ifp) == 0xff && fgetc(ifp) == 0xd8 && thumb_length < len - 28)
{
thumb_offset = off + 28;
thumb_length = len - 28;
write_thumb = &CLASS jpeg_thumb;
}
if (++img == 2 && !thumb_length)
{
thumb_offset = off + 24;
thumb_width = wide;
thumb_height = high;
write_thumb = &CLASS foveon_thumb;
}
break;
case 0x464d4143: /* CAMF */
meta_offset = off + 8;
meta_length = len - 28;
break;
case 0x504f5250: /* PROP */
pent = (get4(), get4());
fseek(ifp, 12, SEEK_CUR);
off += pent * 8 + 24;
if ((unsigned)pent > 256)
pent = 256;
for (i = 0; i < pent * 2; i++)
((int *)poff)[i] = off + get4() * 2;
for (i = 0; i < pent; i++)
{
foveon_gets(poff[i][0], name, 64);
foveon_gets(poff[i][1], value, 64);
if (!strcmp(name, "ISO"))
iso_speed = atoi(value);
if (!strcmp(name, "CAMMANUF"))
strcpy(make, value);
if (!strcmp(name, "CAMMODEL"))
strcpy(model, value);
if (!strcmp(name, "WB_DESC"))
strcpy(model2, value);
if (!strcmp(name, "TIME"))
timestamp = atoi(value);
if (!strcmp(name, "EXPTIME"))
shutter = atoi(value) / 1000000.0;
if (!strcmp(name, "APERTURE"))
aperture = atof(value);
if (!strcmp(name, "FLENGTH"))
focal_len = atof(value);
#ifdef LIBRAW_LIBRARY_BUILD
if (!strcmp(name, "CAMSERIAL"))
strcpy(imgdata.shootinginfo.BodySerial, value);
if (!strcmp(name, "FLEQ35MM"))
imgdata.lens.makernotes.FocalLengthIn35mmFormat = atof(value);
if (!strcmp(name, "IMAGERTEMP"))
imgdata.other.SensorTemperature = atof(value);
if (!strcmp(name, "LENSARANGE"))
{
char *sp;
imgdata.lens.makernotes.MaxAp4CurFocal = imgdata.lens.makernotes.MinAp4CurFocal = atof(value);
sp = strrchr(value, ' ');
if (sp)
{
imgdata.lens.makernotes.MinAp4CurFocal = atof(sp);
if (imgdata.lens.makernotes.MaxAp4CurFocal > imgdata.lens.makernotes.MinAp4CurFocal)
my_swap(float, imgdata.lens.makernotes.MaxAp4CurFocal, imgdata.lens.makernotes.MinAp4CurFocal);
}
}
if (!strcmp(name, "LENSFRANGE"))
{
char *sp;
imgdata.lens.makernotes.MinFocal = imgdata.lens.makernotes.MaxFocal = atof(value);
sp = strrchr(value, ' ');
if (sp)
{
imgdata.lens.makernotes.MaxFocal = atof(sp);
if ((imgdata.lens.makernotes.MaxFocal + 0.17f) < imgdata.lens.makernotes.MinFocal)
my_swap(float, imgdata.lens.makernotes.MaxFocal, imgdata.lens.makernotes.MinFocal);
}
}
if (!strcmp(name, "LENSMODEL"))
{
char *sp;
imgdata.lens.makernotes.LensID = strtol(value, &sp, 16); // atoi(value);
if (imgdata.lens.makernotes.LensID)
imgdata.lens.makernotes.LensMount = Sigma_X3F;
}
}
#endif
}
#ifdef LOCALTIME
timestamp = mktime(gmtime(×tamp));
#endif
}
fseek(ifp, save, SEEK_SET);
}
}
//@out COMMON
/*
All matrices are from Adobe DNG Converter unless otherwise noted.
*/
void CLASS adobe_coeff(const char *t_make, const char *t_model
#ifdef LIBRAW_LIBRARY_BUILD
,
int internal_only
#endif
)
{
// clang-format off
static const struct
{
const char *prefix;
int t_black, t_maximum, trans[12];
} table[] = {
{ "AgfaPhoto DC-833m", 0, 0, /* DJC */
{ 11438,-3762,-1115,-2409,9914,2497,-1227,2295,5300 } },
{ "Apple QuickTake", 0, 0, /* DJC */
{ 21392,-5653,-3353,2406,8010,-415,7166,1427,2078 } },
{"Broadcom RPi IMX219", 66, 0x3ff,
{ 5302,1083,-728,-5320,14112,1699,-863,2371,5136 } }, /* LibRaw */
{ "Broadcom RPi OV5647", 16, 0x3ff,
{ 12782,-4059,-379,-478,9066,1413,1340,1513,5176 } }, /* DJC */
{ "Canon EOS D2000", 0, 0,
{ 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } },
{ "Canon EOS D6000", 0, 0,
{ 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } },
{ "Canon EOS D30", 0, 0, /* updated */
{ 9900,-2771,-1324,-7072,14229,3140,-2790,3344,8861 } },
{ "Canon EOS D60", 0, 0xfa0, /* updated */
{ 6211,-1358,-896,-8557,15766,3012,-3001,3507,8567 } },
{ "Canon EOS 5DS", 0, 0x3c96,
{ 6250,-711,-808,-5153,12794,2636,-1249,2198,5610 } },
{ "Canon EOS 5D Mark IV", 0, 0,
{ 6446,-366,-864,-4436,12204,2513,-952,2496,6348 } },
{ "Canon EOS 5D Mark III", 0, 0x3c80,
{ 6722,-635,-963,-4287,12460,2028,-908,2162,5668 } },
{ "Canon EOS 5D Mark II", 0, 0x3cf0,
{ 4716,603,-830,-7798,15474,2480,-1496,1937,6651 } },
{ "Canon EOS 5D", 0, 0xe6c,
{ 6347,-479,-972,-8297,15954,2480,-1968,2131,7649 } },
{ "Canon EOS 6D Mark II", 0, 0x38de,
{ 6875,-970,-932,-4691,12459,2501,-874,1953,5809 } },
{ "Canon EOS 6D", 0, 0x3c82,
{7034, -804, -1014, -4420, 12564, 2058, -851, 1994, 5758 } },
{ "Canon EOS 77D", 0, 0,
{ 7377,-742,-998,-4235,11981,2549,-673,1918,5538 } },
{ "Canon EOS 7D Mark II", 0, 0x3510,
{ 7268,-1082,-969,-4186,11839,2663,-825,2029,5839 } },
{ "Canon EOS 7D", 0, 0x3510,
{ 6844,-996,-856,-3876,11761,2396,-593,1772,6198 } },
{ "Canon EOS 800D", 0, 0,
{ 6970,-512,-968,-4425,12161,2553,-739,1982,5601 } },
{ "Canon EOS 80D", 0, 0,
{ 7457,-671,-937,-4849,12495,2643,-1213,2354,5492 } },
{ "Canon EOS 10D", 0, 0xfa0, /* updated */
{ 8250,-2044,-1127,-8092,15606,2664,-2893,3453,8348 } },
{ "Canon EOS 200D", 0, 0,
{ 7377,-742,-998,-4235,11981,2549,-673,1918,5538 } },
{ "Canon EOS 20Da", 0, 0,
{ 14155,-5065,-1382,-6550,14633,2039,-1623,1824,6561 } },
{ "Canon EOS 20D", 0, 0xfff,
{ 6599,-537,-891,-8071,15783,2424,-1983,2234,7462 } },
{ "Canon EOS 30D", 0, 0,
{ 6257,-303,-1000,-7880,15621,2396,-1714,1904,7046 } },
{ "Canon EOS 40D", 0, 0x3f60,
{ 6071,-747,-856,-7653,15365,2441,-2025,2553,7315 } },
{ "Canon EOS 50D", 0, 0x3d93,
{ 4920,616,-593,-6493,13964,2784,-1774,3178,7005 } },
{ "Canon EOS 60Da", 0, 0x2ff7, /* added */
{ 17492,-7240,-2023,-1791,10323,1701,-186,1329,5406 } },
{ "Canon EOS 60D", 0, 0x2ff7,
{ 6719,-994,-925,-4408,12426,2211,-887,2129,6051 } },
{ "Canon EOS 70D", 0, 0x3bc7,
{ 7034,-804,-1014,-4420,12564,2058,-851,1994,5758 } },
{ "Canon EOS 100D", 0, 0x350f,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 300D", 0, 0xfa0, /* updated */
{ 8250,-2044,-1127,-8092,15606,2664,-2893,3453,8348 } },
{ "Canon EOS 350D", 0, 0xfff,
{ 6018,-617,-965,-8645,15881,2975,-1530,1719,7642 } },
{ "Canon EOS 400D", 0, 0xe8e,
{ 7054,-1501,-990,-8156,15544,2812,-1278,1414,7796 } },
{ "Canon EOS 450D", 0, 0x390d,
{ 5784,-262,-821,-7539,15064,2672,-1982,2681,7427 } },
{ "Canon EOS 500D", 0, 0x3479,
{ 4763,712,-646,-6821,14399,2640,-1921,3276,6561 } },
{ "Canon EOS 550D", 0, 0x3dd7,
{ 6941,-1164,-857,-3825,11597,2534,-416,1540,6039 } },
{ "Canon EOS 600D", 0, 0x3510,
{ 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } },
{ "Canon EOS 650D", 0, 0x354d,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 750D", 0, 0x3c00,
{ 6362,-823,-847,-4426,12109,2616,-743,1857,5635 } },
{ "Canon EOS 760D", 0, 0x3c00,
{ 6362,-823,-847,-4426,12109,2616,-743,1857,5635 } },
{ "Canon EOS 700D", 0, 0x3c00,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS 1000D", 0, 0xe43,
{ 6771,-1139,-977,-7818,15123,2928,-1244,1437,7533 } },
{ "Canon EOS 1100D", 0, 0x3510,
{ 6444,-904,-893,-4563,12308,2535,-903,2016,6728 } },
{ "Canon EOS 1200D", 0, 0x37c2,
{ 6461,-907,-882,-4300,12184,2378,-819,1944,5931 } },
{ "Canon EOS 1300D", 0, 0x37c2,
{ 6939,-1016,-866,-4428,12473,2177,-1175,2178,6162 } },
{ "Canon EOS M6", 0, 0,
{ 8532,-701,-1167,-4095,11879,2508,-797,2424,7010 } },
{ "Canon EOS M5", 0, 0,
{ 8532,-701,-1167,-4095,11879,2508,-797,2424,7010 } },
{ "Canon EOS M3", 0, 0,
{ 6362,-823,-847,-4426,12109,2616,-743,1857,5635 } },
{ "Canon EOS M2", 0, 0, /* added */
{ 6400,-480,-888,-5294,13416,2047,-1296,2203,6137 } },
{ "Canon EOS M100", 0, 0,
{ 8532,-701,-1167,-4095,11879,2508,-797,2424,7010 } },
{ "Canon EOS M10", 0, 0,
{ 6400,-480,-888,-5294,13416,2047,-1296,2203,6137 } },
{ "Canon EOS M", 0, 0,
{ 6602,-841,-939,-4472,12458,2247,-975,2039,6148 } },
{ "Canon EOS-1Ds Mark III", 0, 0x3bb0,
{ 5859,-211,-930,-8255,16017,2353,-1732,1887,7448 } },
{ "Canon EOS-1Ds Mark II", 0, 0xe80,
{ 6517,-602,-867,-8180,15926,2378,-1618,1771,7633 } },
{ "Canon EOS-1D Mark IV", 0, 0x3bb0,
{ 6014,-220,-795,-4109,12014,2361,-561,1824,5787 } },
{ "Canon EOS-1D Mark III", 0, 0x3bb0,
{ 6291,-540,-976,-8350,16145,2311,-1714,1858,7326 } },
{ "Canon EOS-1D Mark II N", 0, 0xe80,
{ 6240,-466,-822,-8180,15825,2500,-1801,1938,8042 } },
{ "Canon EOS-1D Mark II", 0, 0xe80,
{ 6264,-582,-724,-8312,15948,2504,-1744,1919,8664 } },
{ "Canon EOS-1DS", 0, 0xe20, /* updated */
{ 3925,4060,-1739,-8973,16552,2545,-3287,3945,8243 } },
{ "Canon EOS-1D C", 0, 0x3c4e,
{ 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } },
{ "Canon EOS-1D X Mark II", 0, 0x3c4e, /* updated */
{ 7596,-978,-967,-4808,12571,2503,-1398,2567,5752 } },
{ "Canon EOS-1D X", 0, 0x3c4e,
{ 6847,-614,-1014,-4669,12737,2139,-1197,2488,6846 } },
{ "Canon EOS-1D", 0, 0xe20,
{ 6806,-179,-1020,-8097,16415,1687,-3267,4236,7690 } },
{ "Canon EOS C500", 853, 0, /* DJC */
{ 17851,-10604,922,-7425,16662,763,-3660,3636,22278 } },
{"Canon PowerShot 600", 0, 0, /* added */
{ -3822,10019,1311,4085,-157,3386,-5341,10829,4812,-1969,10969,1126 } },
{ "Canon PowerShot A530", 0, 0,
{ 0 } }, /* don't want the A5 matrix */
{ "Canon PowerShot A50", 0, 0,
{ -5300,9846,1776,3436,684,3939,-5540,9879,6200,-1404,11175,217 } },
{ "Canon PowerShot A5", 0, 0,
{ -4801,9475,1952,2926,1611,4094,-5259,10164,5947,-1554,10883,547 } },
{ "Canon PowerShot G10", 0, 0,
{ 11093,-3906,-1028,-5047,12492,2879,-1003,1750,5561 } },
{ "Canon PowerShot G11", 0, 0,
{ 12177,-4817,-1069,-1612,9864,2049,-98,850,4471 } },
{ "Canon PowerShot G12", 0, 0,
{ 13244,-5501,-1248,-1508,9858,1935,-270,1083,4366 } },
{ "Canon PowerShot G15", 0, 0,
{ 7474,-2301,-567,-4056,11456,2975,-222,716,4181 } },
{ "Canon PowerShot G16", 0, 0, /* updated */
{ 8020,-2687,-682,-3704,11879,2052,-965,1921,5556 } },
{ "Canon PowerShot G1 X Mark III", 0, 0,
{ 8532,-701,-1167,-4095,11879,2508,-797,2424,7010 } },
{ "Canon PowerShot G1 X Mark II", 0, 0,
{ 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } },
{ "Canon PowerShot G1 X", 0, 0,
{ 7378,-1255,-1043,-4088,12251,2048,-876,1946,5805 } },
{ "Canon PowerShot G1", 0, 0, /* updated */
{ -5686,10300,2223,4725,-1157,4383,-6128,10783,6163,-2688,12093,604 } },
{ "Canon PowerShot G2", 0, 0, /* updated */
{ 9194,-2787,-1059,-8098,15657,2608,-2610,3064,7867 } },
{ "Canon PowerShot G3 X", 0, 0,
{ 9701,-3857,-921,-3149,11537,1817,-786,1817,5147 } },
{ "Canon PowerShot G3", 0, 0, /* updated */
{ 9326,-2882,-1084,-7940,15447,2677,-2620,3090,7740 } },
{ "Canon PowerShot G5 X",0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G5", 0, 0, /* updated */
{ 9869,-2972,-942,-7314,15098,2369,-1898,2536,7282 } },
{ "Canon PowerShot G6", 0, 0,
{ 9877,-3775,-871,-7613,14807,3072,-1448,1305,7485 } },
{ "Canon PowerShot G7 X Mark II", 0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G7 X", 0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G9 X Mark II", 0, 0,
{ 10056,-4131,-944,-2576,11143,1625,-238,1294,5179 } },
{ "Canon PowerShot G9 X",0, 0,
{ 9602,-3823,-937,-2984,11495,1675,-407,1415,5049 } },
{ "Canon PowerShot G9", 0, 0,
{ 7368,-2141,-598,-5621,13254,2625,-1418,1696,5743 } },
{ "Canon PowerShot Pro1", 0, 0,
{ 10062,-3522,-999,-7643,15117,2730,-765,817,7323 } },
{ "Canon PowerShot Pro70", 34, 0, /* updated */
{ -5106,10695,1576,3820,53,4566,-6497,10736,6701,-3336,11887,1394 } },
{ "Canon PowerShot Pro90", 0, 0, /* updated */
{ -5912,10768,2288,4612,-989,4333,-6153,10897,5944,-2907,12288,624 } },
{ "Canon PowerShot S30", 0, 0, /* updated */
{ 10744,-3813,-1142,-7962,15966,2075,-2492,2805,7744 } },
{ "Canon PowerShot S40", 0, 0, /* updated */
{ 8606,-2573,-949,-8237,15489,2974,-2649,3076,9100 } },
{ "Canon PowerShot S45", 0, 0, /* updated */
{ 8251,-2410,-964,-8047,15430,2823,-2380,2824,8119 } },
{ "Canon PowerShot S50", 0, 0, /* updated */
{ 8979,-2658,-871,-7721,15500,2357,-1773,2366,6634 } },
{ "Canon PowerShot S60", 0, 0,
{ 8795,-2482,-797,-7804,15403,2573,-1422,1996,7082 } },
{ "Canon PowerShot S70", 0, 0,
{ 9976,-3810,-832,-7115,14463,2906,-901,989,7889 } },
{ "Canon PowerShot S90", 0, 0,
{ 12374,-5016,-1049,-1677,9902,2078,-83,852,4683 } },
{ "Canon PowerShot S95", 0, 0,
{ 13440,-5896,-1279,-1236,9598,1931,-180,1001,4651 } },
{ "Canon PowerShot S120", 0, 0,
{ 6961,-1685,-695,-4625,12945,1836,-1114,2152,5518 } },
{ "Canon PowerShot S110", 0, 0,
{ 8039,-2643,-654,-3783,11230,2930,-206,690,4194 } },
{ "Canon PowerShot S100", 0, 0,
{ 7968,-2565,-636,-2873,10697,2513,180,667,4211 } },
{ "Canon PowerShot SX1 IS", 0, 0,
{ 6578,-259,-502,-5974,13030,3309,-308,1058,4970 } },
{ "Canon PowerShot SX50 HS", 0, 0,
{ 12432,-4753,-1247,-2110,10691,1629,-412,1623,4926 } },
{ "Canon PowerShot SX60 HS", 0, 0,
{ 13161,-5451,-1344,-1989,10654,1531,-47,1271,4955 } },
{ "Canon PowerShot A3300", 0, 0, /* DJC */
{ 10826,-3654,-1023,-3215,11310,1906,0,999,4960 } },
{ "Canon PowerShot A470", 0, 0, /* DJC */
{ 12513,-4407,-1242,-2680,10276,2405,-878,2215,4734 } },
{ "Canon PowerShot A610", 0, 0, /* DJC */
{ 15591,-6402,-1592,-5365,13198,2168,-1300,1824,5075 } },
{ "Canon PowerShot A620", 0, 0, /* DJC */
{ 15265,-6193,-1558,-4125,12116,2010,-888,1639,5220 } },
{ "Canon PowerShot A630", 0, 0, /* DJC */
{ 14201,-5308,-1757,-6087,14472,1617,-2191,3105,5348 } },
{ "Canon PowerShot A640", 0, 0, /* DJC */
{ 13124,-5329,-1390,-3602,11658,1944,-1612,2863,4885 } },
{ "Canon PowerShot A650", 0, 0, /* DJC */
{ 9427,-3036,-959,-2581,10671,1911,-1039,1982,4430 } },
{ "Canon PowerShot A720", 0, 0, /* DJC */
{ 14573,-5482,-1546,-1266,9799,1468,-1040,1912,3810 } },
{ "Canon PowerShot D10", 127, 0, /* DJC */
{ 14052,-5229,-1156,-1325,9420,2252,-498,1957,4116 } },
{ "Canon PowerShot S3 IS", 0, 0, /* DJC */
{ 14062,-5199,-1446,-4712,12470,2243,-1286,2028,4836 } },
{ "Canon PowerShot SX110 IS", 0, 0, /* DJC */
{ 14134,-5576,-1527,-1991,10719,1273,-1158,1929,3581 } },
{ "Canon PowerShot SX220", 0, 0, /* DJC */
{ 13898,-5076,-1447,-1405,10109,1297,-244,1860,3687 } },
{ "Canon IXUS 160", 0, 0, /* DJC */
{ 11657,-3781,-1136,-3544,11262,2283,-160,1219,4700 } },
{ "Casio EX-F1", 0, 0, /* added */
{ 9084,-2016,-848,-6711,14351,2570,-1059,1725,6135 } },
{ "Casio EX-FH100", 0, 0, /* added */
{ 12771,-4179,-1558,-2149,10938,1375,-453,1751,4494 } },
{ "Casio EX-S20", 0, 0, /* DJC */
{ 11634,-3924,-1128,-4968,12954,2015,-1588,2648,7206 } },
{ "Casio EX-Z750", 0, 0, /* DJC */
{ 10819,-3873,-1099,-4903,13730,1175,-1755,3751,4632 } },
{ "Casio EX-Z10", 128, 0xfff, /* DJC */
{ 9790,-3338,-603,-2321,10222,2099,-344,1273,4799 } },
{ "CINE 650", 0, 0,
{ 3390,480,-500,-800,3610,340,-550,2336,1192 } },
{ "CINE 660", 0, 0,
{ 3390,480,-500,-800,3610,340,-550,2336,1192 } },
{ "CINE", 0, 0,
{ 20183,-4295,-423,-3940,15330,3985,-280,4870,9800 } },
{ "Contax N Digital", 0, 0xf1e,
{ 7777,1285,-1053,-9280,16543,2916,-3677,5679,7060 } },
{ "DXO ONE", 0, 0,
{ 6596,-2079,-562,-4782,13016,1933,-970,1581,5181 } },
{ "Epson R-D1", 0, 0,
{ 6827,-1878,-732,-8429,16012,2564,-704,592,7145 } },
{ "Fujifilm E550", 0, 0, /* updated */
{ 11044,-3888,-1120,-7248,15167,2208,-1531,2276,8069 } },
{ "Fujifilm E900", 0, 0,
{ 9183,-2526,-1078,-7461,15071,2574,-2022,2440,8639 } },
{ "Fujifilm F5", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F6", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F77", 0, 0xfe9,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm F7", 0, 0,
{ 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } },
{ "Fujifilm F810", 0, 0, /* added */
{ 11044,-3888,-1120,-7248,15167,2208,-1531,2276,8069 } },
{ "Fujifilm F8", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm S100FS", 514, 0,
{ 11521,-4355,-1065,-6524,13767,3058,-1466,1984,6045 } },
{ "Fujifilm S1", 0, 0,
{ 12297,-4882,-1202,-2106,10691,1623,-88,1312,4790 } },
{ "Fujifilm S20Pro", 0, 0,
{ 10004,-3219,-1201,-7036,15047,2107,-1863,2565,7736 } },
{ "Fujifilm S20", 512, 0x3fff,
{ 11401,-4498,-1312,-5088,12751,2613,-838,1568,5941 } },
{ "Fujifilm S2Pro", 128, 0, /* updated */
{ 12741,-4916,-1420,-8510,16791,1715,-1767,2302,7771 } },
{ "Fujifilm S3Pro", 0, 0,
{ 11807,-4612,-1294,-8927,16968,1988,-2120,2741,8006 } },
{ "Fujifilm S5Pro", 0, 0,
{ 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } },
{ "Fujifilm S5000", 0, 0,
{ 8754,-2732,-1019,-7204,15069,2276,-1702,2334,6982 } },
{ "Fujifilm S5100", 0, 0,
{ 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } },
{ "Fujifilm S5500", 0, 0,
{ 11940,-4431,-1255,-6766,14428,2542,-993,1165,7421 } },
{ "Fujifilm S5200", 0, 0,
{ 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } },
{ "Fujifilm S5600", 0, 0,
{ 9636,-2804,-988,-7442,15040,2589,-1803,2311,8621 } },
{ "Fujifilm S6", 0, 0,
{ 12628,-4887,-1401,-6861,14996,1962,-2198,2782,7091 } },
{ "Fujifilm S7000", 0, 0,
{ 10190,-3506,-1312,-7153,15051,2238,-2003,2399,7505 } },
{ "Fujifilm S9000", 0, 0,
{ 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } },
{ "Fujifilm S9500", 0, 0,
{ 10491,-3423,-1145,-7385,15027,2538,-1809,2275,8692 } },
{ "Fujifilm S9100", 0, 0,
{ 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } },
{ "Fujifilm S9600", 0, 0,
{ 12343,-4515,-1285,-7165,14899,2435,-1895,2496,8800 } },
{ "Fujifilm SL1000", 0, 0,
{ 11705,-4262,-1107,-2282,10791,1709,-555,1713,4945 } },
{ "Fujifilm IS-1", 0, 0,
{ 21461,-10807,-1441,-2332,10599,1999,289,875,7703 } },
{ "Fujifilm IS Pro", 0, 0,
{ 12300,-5110,-1304,-9117,17143,1998,-1947,2448,8100 } },
{ "Fujifilm HS10 HS11", 0, 0xf68,
{ 12440,-3954,-1183,-1123,9674,1708,-83,1614,4086 } },
{ "Fujifilm HS2", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm HS3", 0, 0,
{ 13690,-5358,-1474,-3369,11600,1998,-132,1554,4395 } },
{ "Fujifilm HS50EXR", 0, 0,
{ 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } },
{ "Fujifilm F900EXR", 0, 0,
{ 12085,-4727,-953,-3257,11489,2002,-511,2046,4592 } },
{ "Fujifilm X100S", 0, 0,
{ 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } },
{ "Fujifilm X100F", 0, 0,
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm X100T", 0, 0,
{ 10592,-4262,-1008,-3514,11355,2465,-870,2025,6386 } },
{ "Fujifilm X100", 0, 0,
{ 12161,-4457,-1069,-5034,12874,2400,-795,1724,6904 } },
{ "Fujifilm X10", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X20", 0, 0,
{ 11768,-4971,-1133,-4904,12927,2183,-480,1723,4605 } },
{ "Fujifilm X30", 0, 0,
{ 12328,-5256,-1144,-4469,12927,1675,-87,1291,4351 } },
{ "Fujifilm X70", 0, 0,
{ 10450,-4329,-878,-3217,11105,2421,-752,1758,6519 } },
{ "Fujifilm X-Pro1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-Pro2", 0, 0,
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm X-A10", 0, 0,
{ 11540,-4999,-991,-2949,10963,2278,-382,1049,5605} },
{ "Fujifilm X-A20", 0, 0, /* temp */
{ 11540,-4999,-991,-2949,10963,2278,-382,1049,5605} },
{ "Fujifilm X-A1", 0, 0,
{ 11086,-4555,-839,-3512,11310,2517,-815,1341,5940 } },
{ "Fujifilm X-A2", 0, 0,
{ 10763,-4560,-917,-3346,11311,2322,-475,1135,5843 } },
{ "Fujifilm X-A3", 0, 0,
{ 12407,-5222,-1086,-2971,11116,2120,-294,1029,5284 } },
{ "Fujifilm X-A5", 0, 0, /* temp */
{ 12407,-5222,-1086,-2971,11116,2120,-294,1029,5284 } },
{ "Fujifilm X-E1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-E2S", 0, 0,
{ 11562,-5118,-961,-3022,11007,2311,-525,1569,6097 } },
{ "Fujifilm X-E2", 0, 0,
{ 8458,-2451,-855,-4597,12447,2407,-1475,2482,6526 } },
{ "Fujifilm X-E3", 0, 0,
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm XF1", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X-M1", 0, 0,
{ 10413,-3996,-993,-3721,11640,2361,-733,1540,6011 } },
{ "Fujifilm X-S1", 0, 0,
{ 13509,-6199,-1254,-4430,12733,1865,-331,1441,5022 } },
{ "Fujifilm X-T20", 0, 0,
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm X-T2", 0, 0,
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm X-T10", 0, 0, /* updated */
{ 8458,-2451,-855,-4597,12447,2407,-1475,2482,6526 } },
{ "Fujifilm X-T1", 0, 0,
{ 8458,-2451,-855,-4597,12447,2407,-1475,2482,6526 } },
{ "Fujifilm X-H1", 0, 0,
{ 11434,-4948,-1210,-3746,12042,1903,-666,1479,5235 } },
{ "Fujifilm XQ1", 0, 0,
{ 9252,-2704,-1064,-5893,14265,1717,-1101,2341,4349 } },
{ "Fujifilm XQ2", 0, 0,
{ 9252,-2704,-1064,-5893,14265,1717,-1101,2341,4349 } },
{ "Fujifilm GFX 50S", 0, 0,
{ 11756,-4754,-874,-3056,11045,2305,-381,1457,6006 } },
{ "GITUP GIT2P", 4160, 0,
{ 8489, -2583,-1036,-8051,15583,2643,-1307,1407,7354 } },
{ "GITUP GIT2", 3200, 0,
{ 8489, -2583,-1036,-8051,15583,2643,-1307,1407,7354 } },
{ "Hasselblad HV", 0, 0, /* added */
{ 6344,-1612,-461,-4862,12476,2680,-864,1785,6898 } },
{ "Hasselblad Lunar", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Hasselblad Lusso", 0, 0, /* added */
{ 4912,-540,-201,-6129,13513,2906,-1563,2151,7182 } },
{ "Hasselblad Stellar", -800, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{ "Hasselblad 500 mech.", 0, 0, /* added */
{ 8519,-3260,-280,-5081,13459,1738,-1449,2960,7809 } },
{ "Hasselblad CFV", 0, 0,
{ 8519,-3260,-280,-5081,13459,1738,-1449,2960,7809 } },
{ "Hasselblad H-16MP", 0, 0, /* LibRaw */
{ 17765,-5322,-1734,-6168,13354,2135,-264,2524,7440 } },
{ "Hasselblad H-22MP", 0, 0, /* LibRaw */
{ 17765,-5322,-1734,-6168,13354,2135,-264,2524,7440 } },
{ "Hasselblad H-31MP",0, 0, /* LibRaw */
{ 14480,-5448,-1686,-3534,13123,2260,384,2952,7232 } },
{ "Hasselblad 39-Coated", 0, 0, /* added */
{ 3857,452,-46,-6008,14477,1596,-2627,4481,5718 } },
{ "Hasselblad H-39MP",0, 0,
{ 3857,452,-46,-6008,14477,1596,-2627,4481,5718 } },
{ "Hasselblad H2D-39", 0, 0, /* added */
{ 3894,-110,287,-4672,12610,2295,-2092,4100,6196 } },
{ "Hasselblad H3D-50", 0, 0,
{ 3857,452,-46,-6008,14477,1596,-2627,4481,5718 } },
{ "Hasselblad H3D", 0, 0, /* added */
{ 3857,452,-46,-6008,14477,1596,-2627,4481,5718 } },
{ "Hasselblad H4D-40",0, 0, /* LibRaw */
{ 6325,-860,-957,-6559,15945,266,167,770,5936 } },
{ "Hasselblad H4D-50",0, 0, /* LibRaw */
{ 15283,-6272,-465,-2030,16031,478,-2379,390,7965 } },
{ "Hasselblad H4D-60",0, 0,
{ 9662,-684,-279,-4903,12293,2950,-344,1669,6024 } },
{ "Hasselblad H5D-50c",0, 0,
{ 4932,-835,141,-4878,11868,3437,-1138,1961,7067 } },
{ "Hasselblad H5D-50",0, 0,
{ 5656,-659,-346,-3923,12306,1791,-1602,3509,5442 } },
{ "Hasselblad H6D-100c",0, 0,
{ 5110,-1357,-308,-5573,12835,3077,-1279,2025,7010 } },
{ "Hasselblad X1D",0, 0,
{ 4932,-835,141,-4878,11868,3437,-1138,1961,7067 } },
{ "HTC One A9", 64, 1023, /* this is CM1 transposed */
{ 101, -20, -2, -11, 145, 41, -24, 1, 56 } },
{ "Imacon Ixpress", 0, 0, /* DJC */
{ 7025,-1415,-704,-5188,13765,1424,-1248,2742,6038 } },
{ "Kodak NC2000", 0, 0,
{ 13891,-6055,-803,-465,9919,642,2121,82,1291 } },
{ "Kodak DCS315C", -8, 0,
{ 17523,-4827,-2510,756,8546,-137,6113,1649,2250 } },
{ "Kodak DCS330C", -8, 0,
{ 20620,-7572,-2801,-103,10073,-396,3551,-233,2220 } },
{ "Kodak DCS420", 0, 0,
{ 10868,-1852,-644,-1537,11083,484,2343,628,2216 } },
{ "Kodak DCS460", 0, 0,
{ 10592,-2206,-967,-1944,11685,230,2206,670,1273 } },
{ "Kodak EOSDCS1", 0, 0,
{ 10592,-2206,-967,-1944,11685,230,2206,670,1273 } },
{ "Kodak EOSDCS3B", 0, 0,
{ 9898,-2700,-940,-2478,12219,206,1985,634,1031 } },
{ "Kodak DCS520C", -178, 0,
{ 24542,-10860,-3401,-1490,11370,-297,2858,-605,3225 } },
{ "Kodak DCS560C", -177, 0,
{ 20482,-7172,-3125,-1033,10410,-285,2542,226,3136 } },
{ "Kodak DCS620C", -177, 0,
{ 23617,-10175,-3149,-2054,11749,-272,2586,-489,3453 } },
{ "Kodak DCS620X", -176, 0,
{ 13095,-6231,154,12221,-21,-2137,895,4602,2258 } },
{ "Kodak DCS660C", -173, 0,
{ 18244,-6351,-2739,-791,11193,-521,3711,-129,2802 } },
{ "Kodak DCS720X", 0, 0,
{ 11775,-5884,950,9556,1846,-1286,-1019,6221,2728 } },
{ "Kodak DCS760C", 0, 0,
{ 16623,-6309,-1411,-4344,13923,323,2285,274,2926 } },
{ "Kodak DCS Pro SLR", 0, 0,
{ 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } },
{ "Kodak DCS Pro 14nx", 0, 0,
{ 5494,2393,-232,-6427,13850,2846,-1876,3997,5445 } },
{ "Kodak DCS Pro 14", 0, 0,
{ 7791,3128,-776,-8588,16458,2039,-2455,4006,6198 } },
{ "Photo Control Camerz ZDS 14", 0, 0,
{ 7791,3128,-776,-8588,16458,2039,-2455,4006,6198 } },
{ "Kodak ProBack645", 0, 0,
{ 16414,-6060,-1470,-3555,13037,473,2545,122,4948 } },
{ "Kodak ProBack", 0, 0,
{ 21179,-8316,-2918,-915,11019,-165,3477,-180,4210 } },
{ "Kodak P712", 0, 0,
{ 9658,-3314,-823,-5163,12695,2768,-1342,1843,6044 } },
{ "Kodak P850", 0, 0xf7c,
{ 10511,-3836,-1102,-6946,14587,2558,-1481,1792,6246 } },
{ "Kodak P880", 0, 0xfff,
{ 12805,-4662,-1376,-7480,15267,2360,-1626,2194,7904 } },
{ "Kodak EasyShare Z980", 0, 0,
{ 11313,-3559,-1101,-3893,11891,2257,-1214,2398,4908 } },
{ "Kodak EasyShare Z981", 0, 0,
{ 12729,-4717,-1188,-1367,9187,2582,274,860,4411 } },
{ "Kodak EasyShare Z990", 0, 0xfed,
{ 11749,-4048,-1309,-1867,10572,1489,-138,1449,4522 } },
{ "Kodak EASYSHARE Z1015", 0, 0xef1,
{ 11265,-4286,-992,-4694,12343,2647,-1090,1523,5447 } },
{ "Leaf C-Most", 0, 0, /* updated */
{ 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } },
{ "Leaf Valeo 6", 0, 0,
{ 3952,2189,449,-6701,14585,2275,-4536,7349,6536 } },
{ "Leaf Aptus 54S", 0, 0,
{ 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } },
{ "Leaf Aptus-II 8", 0, 0, /* added */
{ 7361,1257,-163,-6929,14061,3176,-1839,3454,5603 } },
{ "Leaf AFi-II 7", 0, 0, /* added */
{ 7691,-108,-339,-6185,13627,2833,-2046,3899,5952 } },
{ "Leaf Aptus-II 5", 0, 0, /* added */
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf Aptus 65", 0, 0,
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf AFi 65S", 0, 0, /* added */
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf Aptus 75", 0, 0,
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf AFi 75S", 0, 0, /* added */
{ 7914,1414,-1190,-8777,16582,2280,-2811,4605,5562 } },
{ "Leaf Credo 40", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Leaf Credo 50", 0, 0,
{ 3984,0,0,0,10000,0,0,0,7666 } },
{ "Leaf Credo 60", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Leaf Credo 80", 0, 0,
{ 6294,686,-712,-5435, 13417,2211,-1006,2435,5042 } },
{ "Leaf", 0, 0,
{ 8236,1746,-1314,-8251,15953,2428,-3673,5786,5771 } },
{ "Leica M10", 0, 0, /* added */
{ 9090,-3342,-740,-4006,13456,493,-569,2266,6871 } },
{ "Leica M9", 0, 0, /* added */
{ 6687,-1751,-291,-3556,11373,2492,-548,2204,7146 } },
{ "Leica M8", 0, 0, /* added */
{ 7675,-2196,-305,-5860,14119,1856,-2425,4006,6578 } },
{ "Leica M (Typ 240)", 0, 0, /* added */
{ 7199,-2140,-712,-4005,13327,649,-810,2521,6673 } },
{ "Leica M (Typ 262)", 0, 0,
{ 7199,-2140,-712,-4005,13327,649,-810,2521,6673 } },
{ "Leica SL (Typ 601)", 0, 0,
{ 11865,-4523,-1441,-5423,14458,935,-1587,2687,4830} },
{ "Leica S2", 0, 0, /* added */
{ 5627,-721,-447,-4423,12456,2192,-1048,2948,7379 } },
{"Leica S-E (Typ 006)", 0, 0, /* added */
{ 5749,-1072,-382,-4274,12432,2048,-1166,3104,7105 } },
{"Leica S (Typ 006)", 0, 0, /* added */
{ 5749,-1072,-382,-4274,12432,2048,-1166,3104,7105 } },
{ "Leica S (Typ 007)", 0, 0,
{ 6063,-2234,-231,-5210,13787,1500,-1043,2866,6997 } },
{ "Leica Q (Typ 116)", 0, 0, /* updated */
{ 10068,-4043,-1068,-5319,14268,1044,-765,1701,6522 } },
{ "Leica T (Typ 701)", 0, 0, /* added */
{ 6295 ,-1679 ,-475 ,-5586 ,13046 ,2837 ,-1410 ,1889 ,7075 } },
{ "Leica X2", 0, 0, /* added */
{ 8336,-2853,-699,-4425,11989,2760,-954,1625,6396 } },
{ "Leica X1", 0, 0, /* added */
{ 9055,-2611,-666,-4906,12652,2519,-555,1384,7417 } },
{ "Leica X", 0, 0, /* X(113), X-U(113), XV, X Vario(107) */ /* updated */
{ 9062,-3198,-828,-4065,11772,2603,-761,1468,6458 } },
{ "Mamiya M31", 0, 0, /* added */
{ 4516 ,-244 ,-36 ,-7020 ,14976 ,2174 ,-3206 ,4670 ,7087 } },
{ "Mamiya M22", 0, 0, /* added */
{ 2905 ,732 ,-237 ,-8135 ,16626 ,1476 ,-3038 ,4253 ,7517 } },
{ "Mamiya M18", 0, 0, /* added */
{ 6516 ,-2050 ,-507 ,-8217 ,16703 ,1479 ,-3492 ,4741 ,8489 } },
{ "Mamiya ZD", 0, 0,
{ 7645,2579,-1363,-8689,16717,2015,-3712,5941,5961 } },
{ "Micron 2010", 110, 0, /* DJC */
{ 16695,-3761,-2151,155,9682,163,3433,951,4904 } },
{ "Minolta DiMAGE 5", 0, 0xf7d, /* updated */
{ 9117,-3063,-973,-7949,15763,2306,-2752,3136,8093 } },
{ "Minolta DiMAGE 7Hi", 0, 0xf7d, /* updated */
{ 11555,-4064,-1256,-7903,15633,2409,-2811,3320,7358 } },
{ "Minolta DiMAGE 7i", 0, 0xf7d, /* added */
{ 11050,-3791,-1199,-7875,15585,2434,-2797,3359,7560 } },
{ "Minolta DiMAGE 7", 0, 0xf7d, /* updated */
{ 9258,-2879,-1008,-8076,15847,2351,-2806,3280,7821 } },
{ "Minolta DiMAGE A1", 0, 0xf8b, /* updated */
{ 9274,-2548,-1167,-8220,16324,1943,-2273,2721,8340 } },
{ "Minolta DiMAGE A200", 0, 0,
{ 8560,-2487,-986,-8112,15535,2771,-1209,1324,7743 } },
{ "Minolta DiMAGE A2", 0, 0xf8f,
{ 9097,-2726,-1053,-8073,15506,2762,-966,981,7763 } },
{ "Minolta DiMAGE Z2", 0, 0, /* DJC */
{ 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } },
{ "Minolta DYNAX 5", 0, 0xffb,
{ 10284,-3283,-1086,-7957,15762,2316,-829,882,6644 } },
{ "Minolta Maxxum 5D", 0, 0xffb, /* added */
{ 10284,-3283,-1086,-7957,15762,2316,-829,882,6644 } },
{ "Minolta ALPHA-5 DIGITAL", 0, 0xffb, /* added */
{ 10284,-3283,-1086,-7957,15762,2316,-829,882,6644 } },
{ "Minolta ALPHA SWEET DIGITAL", 0, 0xffb, /* added */
{ 10284,-3283,-1086,-7957,15762,2316,-829,882,6644 } },
{ "Minolta DYNAX 7", 0, 0xffb,
{ 10239,-3104,-1099,-8037,15727,2451,-927,925,6871 } },
{ "Minolta Maxxum 7D", 0, 0xffb, /* added */
{ 10239,-3104,-1099,-8037,15727,2451,-927,925,6871 } },
{ "Minolta ALPHA-7 DIGITAL", 0, 0xffb, /* added */
{ 10239,-3104,-1099,-8037,15727,2451,-927,925,6871 } },
{ "Motorola PIXL", 0, 0, /* DJC */
{ 8898,-989,-1033,-3292,11619,1674,-661,3178,5216 } },
{ "Nikon D100", 0, 0,
{ 5902,-933,-782,-8983,16719,2354,-1402,1455,6464 } },
{ "Nikon D1H", 0, 0, /* updated */
{ 7659,-2238,-935,-8942,16969,2004,-2701,3051,8690 } },
{ "Nikon D1X", 0, 0,
{ 7702,-2245,-975,-9114,17242,1875,-2679,3055,8521 } },
{ "Nikon D1", 0, 0, /* multiplied by 2.218750, 1.0, 1.148438 */
{ 16772,-4726,-2141,-7611,15713,1972,-2846,3494,9521 } },
{ "Nikon D200", 0, 0xfbc,
{ 8367,-2248,-763,-8758,16447,2422,-1527,1550,8053 } },
{ "Nikon D2H", 0, 0,
{ 5733,-911,-629,-7967,15987,2055,-3050,4013,7048 } },
{ "Nikon D2X", 0, 0, /* updated */
{ 10231,-2768,-1254,-8302,15900,2551,-797,681,7148 } },
{ "Nikon D3000", 0, 0,
{ 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } },
{ "Nikon D3100", 0, 0,
{ 7911,-2167,-813,-5327,13150,2408,-1288,2483,7968 } },
{ "Nikon D3200", 0, 0xfb9,
{ 7013,-1408,-635,-5268,12902,2640,-1470,2801,7379 } },
{ "Nikon D3300", 0, 0,
{ 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } },
{ "Nikon D3400", 0, 0,
{ 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } },
{ "Nikon D300", 0, 0,
{ 9030,-1992,-715,-8465,16302,2255,-2689,3217,8069 } },
{ "Nikon D3X", 0, 0,
{ 7171,-1986,-648,-8085,15555,2718,-2170,2512,7457 } },
{ "Nikon D3S", 0, 0,
{ 8828,-2406,-694,-4874,12603,2541,-660,1509,7587 } },
{ "Nikon D3", 0, 0,
{ 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } },
{ "Nikon D40X", 0, 0,
{ 8819,-2543,-911,-9025,16928,2151,-1329,1213,8449 } },
{ "Nikon D40", 0, 0,
{ 6992,-1668,-806,-8138,15748,2543,-874,850,7897 } },
{ "Nikon D4S", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon D4", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon Df", 0, 0,
{ 8598,-2848,-857,-5618,13606,2195,-1002,1773,7137 } },
{ "Nikon D5000", 0, 0xf00,
{ 7309,-1403,-519,-8474,16008,2622,-2433,2826,8064 } },
{ "Nikon D5100", 0, 0x3de6,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon D5200", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D5300", 0, 0,
{ 6988,-1384,-714,-5631,13410,2447,-1485,2204,7318 } },
{ "Nikon D5500", 0, 0,
{ 8821,-2938,-785,-4178,12142,2287,-824,1651,6860 } },
{ "Nikon D5600", 0, 0,
{ 8821,-2938,-785,-4178,12142,2287,-824,1651,6860 } },
{ "Nikon D500", 0, 0,
{ 8813,-3210,-1036,-4703,12868,2021,-1054,1940,6129 } },
{ "Nikon D50", 0, 0,
{ 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } },
{ "Nikon D5", 0, 0,
{ 9200,-3522,-992,-5755,13803,2117,-753,1486,6338 } },
{ "Nikon D600", 0, 0x3e07,
{ 8178,-2245,-609,-4857,12394,2776,-1207,2086,7298 } },
{ "Nikon D610",0, 0, /* updated */
{ 8178,-2245,-609,-4857,12394,2776,-1207,2086,7298 } },
{ "Nikon D60", 0, 0,
{ 8736,-2458,-935,-9075,16894,2251,-1354,1242,8263 } },
{ "Nikon D7000", 0, 0,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon D7100", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D7200", 0, 0,
{ 8322,-3112,-1047,-6367,14342,2179,-988,1638,6394 } },
{ "Nikon D7500", 0, 0,
{ 8813,-3210,-1036,-4703,12868,2021,-1054,1940,6129 } },
{ "Nikon D750", -600, 0,
{ 9020,-2890,-715,-4535,12436,2348,-934,1919,7086 } },
{ "Nikon D700", 0, 0,
{ 8139,-2171,-663,-8747,16541,2295,-1925,2008,8093 } },
{ "Nikon D70", 0, 0,
{ 7732,-2422,-789,-8238,15884,2498,-859,783,7330 } },
{ "Nikon D850", 0, 0,
{ 10405,-3755,-1270,-5461,13787,1793,-1040,2015,6785 } },
{ "Nikon D810A", 0, 0,
{ 11973,-5685,-888,-1965,10326,1901,-115,1123,7169 } },
{ "Nikon D810", 0, 0,
{ 9369,-3195,-791,-4488,12430,2301,-893,1796,6872 } },
{ "Nikon D800", 0, 0,
{ 7866,-2108,-555,-4869,12483,2681,-1176,2069,7501 } },
{ "Nikon D80", 0, 0,
{ 8629,-2410,-883,-9055,16940,2171,-1490,1363,8520 } },
{ "Nikon D90", 0, 0xf00,
{ 7309,-1403,-519,-8474,16008,2622,-2434,2826,8064 } },
{ "Nikon E700", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E800", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E950", 0, 0x3dd, /* DJC */
{ -3746,10611,1665,9621,-1734,2114,-2389,7082,3064,3406,6116,-244 } },
{ "Nikon E995", 0, 0, /* copied from E5000 */
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E2100", 0, 0, /* copied from Z2, new white balance */
{ 13142,-4152,-1596,-4655,12374,2282,-1769,2696,6711 } },
{ "Nikon E2500", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E3200", 0, 0, /* DJC */
{ 9846,-2085,-1019,-3278,11109,2170,-774,2134,5745 } },
{ "Nikon E4300", 0, 0, /* copied from Minolta DiMAGE Z2 */
{ 11280,-3564,-1370,-4655,12374,2282,-1423,2168,5396 } },
{ "Nikon E4500", 0, 0,
{ -5547,11762,2189,5814,-558,3342,-4924,9840,5949,688,9083,96 } },
{ "Nikon E5000", 0, 0, /* updated */
{ -6678,12805,2248,5725,-499,3375,-5903,10713,6034,-270,9976,134 } },
{ "Nikon E5400", 0, 0, /* updated */
{ 9349,-2988,-1001,-7918,15766,2266,-2097,2680,6839 } },
{ "Nikon E5700", 0, 0, /* updated */
{ -6475,12496,2428,5409,-16,3180,-5965,10912,5866,-177,9918,248 } },
{ "Nikon E8400", 0, 0,
{ 7842,-2320,-992,-8154,15718,2599,-1098,1342,7560 } },
{ "Nikon E8700", 0, 0,
{ 8489,-2583,-1036,-8051,15583,2643,-1307,1407,7354 } },
{ "Nikon E8800", 0, 0,
{ 7971,-2314,-913,-8451,15762,2894,-1442,1520,7610 } },
{ "Nikon COOLPIX A", 0, 0,
{ 8198,-2239,-724,-4871,12389,2798,-1043,2050,7181 } },
{ "Nikon COOLPIX B700", 0, 0,
{ 14387,-6014,-1299,-1357,9975,1616,467,1047,4744 } },
{ "Nikon COOLPIX P330", -200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P340", -200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX Kalon", 0, 0, /* added */
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX Deneb", 0, 0, /* added */
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P6000", 0, 0,
{ 9698,-3367,-914,-4706,12584,2368,-837,968,5801 } },
{ "Nikon COOLPIX P7000", 0, 0,
{ 11432,-3679,-1111,-3169,11239,2202,-791,1380,4455 } },
{ "Nikon COOLPIX P7100", 0, 0,
{ 11053,-4269,-1024,-1976,10182,2088,-526,1263,4469 } },
{ "Nikon COOLPIX P7700", -3200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon COOLPIX P7800", -3200, 0,
{ 10321,-3920,-931,-2750,11146,1824,-442,1545,5539 } },
{ "Nikon 1 V3", -200, 0,
{ 5958,-1559,-571,-4021,11453,2939,-634,1548,5087 } },
{ "Nikon 1 J4", 0, 0,
{ 5958,-1559,-571,-4021,11453,2939,-634,1548,5087 } },
{ "Nikon 1 J5", 0, 0,
{ 7520,-2518,-645,-3844,12102,1945,-913,2249,6835 } },
{ "Nikon 1 S2", -200, 0,
{ 6612,-1342,-618,-3338,11055,2623,-174,1792,5075 } },
{ "Nikon 1 V2", 0, 0,
{ 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } },
{ "Nikon 1 J3", 0, 0, /* updated */
{ 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } },
{ "Nikon 1 AW1", 0, 0,
{ 6588,-1305,-693,-3277,10987,2634,-355,2016,5106 } },
{ "Nikon 1 ", 0, 0, /* J1, J2, S1, V1 */
{ 8994,-2667,-865,-4594,12324,2552,-699,1786,6260 } },
{ "Olympus AIR-A01", 0, 0xfe1,
{ 8992,-3093,-639,-2563,10721,2122,-437,1270,5473 } },
{ "Olympus C5050", 0, 0, /* updated */
{ 10633,-3234,-1285,-7460,15570,1967,-1917,2510,6299 } },
{ "Olympus C5060", 0, 0,
{ 10445,-3362,-1307,-7662,15690,2058,-1135,1176,7602 } },
{ "Olympus C7070", 0, 0,
{ 10252,-3531,-1095,-7114,14850,2436,-1451,1723,6365 } },
{ "Olympus C70", 0, 0,
{ 10793,-3791,-1146,-7498,15177,2488,-1390,1577,7321 } },
{ "Olympus C80", 0, 0,
{ 8606,-2509,-1014,-8238,15714,2703,-942,979,7760 } },
{ "Olympus E-10", 0, 0xffc, /* updated */
{ 12970,-4703,-1433,-7466,15843,1644,-2191,2451,6668 } },
{ "Olympus E-1", 0, 0,
{ 11846,-4767,-945,-7027,15878,1089,-2699,4122,8311 } },
{ "Olympus E-20", 0, 0xffc, /* updated */
{ 13414,-4950,-1517,-7166,15293,1960,-2325,2664,7212 } },
{ "Olympus E-300", 0, 0,
{ 7828,-1761,-348,-5788,14071,1830,-2853,4518,6557 } },
{ "Olympus E-330", 0, 0,
{ 8961,-2473,-1084,-7979,15990,2067,-2319,3035,8249 } },
{ "Olympus E-30", 0, 0xfbc,
{ 8144,-1861,-1111,-7763,15894,1929,-1865,2542,7607 } },
{ "Olympus E-3", 0, 0xf99,
{ 9487,-2875,-1115,-7533,15606,2010,-1618,2100,7389 } },
{ "Olympus E-400", 0, 0,
{ 6169,-1483,-21,-7107,14761,2536,-2904,3580,8568 } },
{ "Olympus E-410", 0, 0xf6a,
{ 8856,-2582,-1026,-7761,15766,2082,-2009,2575,7469 } },
{ "Olympus E-420", 0, 0xfd7,
{ 8746,-2425,-1095,-7594,15612,2073,-1780,2309,7416 } },
{ "Olympus E-450", 0, 0xfd2,
{ 8745,-2425,-1095,-7594,15613,2073,-1780,2309,7416 } },
{ "Olympus E-500", 0, 0,
{ 8136,-1968,-299,-5481,13742,1871,-2556,4205,6630 } },
{ "Olympus E-510", 0, 0xf6a,
{ 8785,-2529,-1033,-7639,15624,2112,-1783,2300,7817 } },
{ "Olympus E-520", 0, 0xfd2,
{ 8344,-2322,-1020,-7596,15635,2048,-1748,2269,7287 } },
{ "Olympus E-5", 0, 0xeec,
{ 11200,-3783,-1325,-4576,12593,2206,-695,1742,7504 } },
{ "Olympus E-600", 0, 0xfaf,
{ 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } },
{ "Olympus E-620", 0, 0xfaf,
{ 8453,-2198,-1092,-7609,15681,2008,-1725,2337,7824 } },
{ "Olympus E-P1", 0, 0xffd,
{ 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } },
{ "Olympus E-P2", 0, 0xffd,
{ 8343,-2050,-1021,-7715,15705,2103,-1831,2380,8235 } },
{ "Olympus E-P3", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-P5", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL1s", 0, 0,
{ 11409,-3872,-1393,-4572,12757,2003,-709,1810,7415 } },
{ "Olympus E-PL1", 0, 0,
{ 11408,-4289,-1215,-4286,12385,2118,-387,1467,7787 } },
{ "Olympus E-PL2", 0, 0xcf3,
{ 15030,-5552,-1806,-3987,12387,1767,-592,1670,7023 } },
{ "Olympus E-PL3", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-PL5", 0, 0xfcb,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL6", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PL7", 0, 0,
{ 9197,-3190,-659,-2606,10830,2039,-458,1250,5458 } },
{ "Olympus E-PL8", 0, 0,
{ 9197,-3190,-659,-2606,10830,2039,-458,1250,5458 } },
{ "Olympus E-PL9", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-PM1", 0, 0,
{ 7575,-2159,-571,-3722,11341,2725,-1434,2819,6271 } },
{ "Olympus E-PM2", 0, 0,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-M10", 0, 0, /* Same for E-M10MarkII, E-M10MarkIII */
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus E-M1MarkII", 0, 0,
{ 9383,-3170,-763,-2457,10702,2020,-384,1236,5552 } },
{ "Olympus E-M1", 0, 0,
{ 7687,-1984,-606,-4327,11928,2721,-1381,2339,6452 } },
{ "Olympus E-M5MarkII", 0, 0,
{ 9422,-3258,-711,-2655,10898,2015,-512,1354,5512 } },
{ "Olympus E-M5", 0, 0xfe1,
{ 8380,-2630,-639,-2887,10725,2496,-627,1427,5438 } },
{ "Olympus PEN-F",0, 0,
{ 9476,-3182,-765,-2613,10958,1893,-449,1315,5268 } },
{ "Olympus SP350", 0, 0,
{ 12078,-4836,-1069,-6671,14306,2578,-786,939,7418 } },
{ "Olympus SP3", 0, 0,
{ 11766,-4445,-1067,-6901,14421,2707,-1029,1217,7572 } },
{ "Olympus SP500UZ", 0, 0xfff,
{ 9493,-3415,-666,-5211,12334,3260,-1548,2262,6482 } },
{ "Olympus SP510UZ", 0, 0xffe,
{ 10593,-3607,-1010,-5881,13127,3084,-1200,1805,6721 } },
{ "Olympus SP550UZ", 0, 0xffe,
{ 11597,-4006,-1049,-5432,12799,2957,-1029,1750,6516 } },
{ "Olympus SP560UZ", 0, 0xff9,
{ 10915,-3677,-982,-5587,12986,2911,-1168,1968,6223 } },
{ "Olympus SP565UZ", 0, 0, /* added */
{ 11856,-4469,-1159,-4814,12368,2756,-993,1779,5589 } },
{ "Olympus SP570UZ", 0, 0,
{ 11522,-4044,-1146,-4736,12172,2904,-988,1829,6039 } },
{ "Olympus SH-2", 0, 0,
{ 10156,-3425,-1077,-2611,11177,1624,-385,1592,5080 } },
{ "Olympus SH-3", 0, 0, /* Alias of SH-2 */
{ 10156,-3425,-1077,-2611,11177,1624,-385,1592,5080 } },
{ "Olympus STYLUS1",0, 0, /* updated */
{ 8360,-2420,-880,-3928,12353,1739,-1381,2416,5173 } },
{ "Olympus TG-4", 0, 0,
{ 11426,-4159,-1126,-2066,10678,1593,-120,1327,4998 } },
{ "Olympus TG-5", 0, 0,
{ 10899,-3833,-1082,-2112,10736,1575,-267,1452,5269 } },
{ "Olympus XZ-10", 0, 0,
{ 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } },
{ "Olympus XZ-1", 0, 0,
{ 10901,-4095,-1074,-1141,9208,2293,-62,1417,5158 } },
{ "Olympus XZ-2", 0, 0,
{ 9777,-3483,-925,-2886,11297,1800,-602,1663,5134 } },
{ "OmniVision", 16, 0x3ff,
{ 12782,-4059,-379,-478,9066,1413,1340,1513,5176 } }, /* DJC */
{ "Pentax *ist DL2", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Pentax *ist DL", 0, 0,
{ 10829,-2838,-1115,-8339,15817,2696,-837,680,11939 } },
{ "Pentax *ist DS2", 0, 0,
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Pentax *ist DS", 0, 0,
{ 10371,-2333,-1206,-8688,16231,2602,-1230,1116,11282 } },
{ "Pentax *ist D", 0, 0,
{ 9651,-2059,-1189,-8881,16512,2487,-1460,1345,10687 } },
{ "Pentax GR", 0, 0, /* added */
{ 5329,-1459,-390,-5407,12930,2768,-1119,1772,6046 } },
{ "Pentax K-01", 0, 0, /* added */
{ 8134,-2728,-645,-4365,11987,2694,-838,1509,6498 } },
{ "Pentax K10D", 0, 0, /* updated */
{ 9679,-2965,-811,-8622,16514,2182,-975,883,9793 } },
{ "Pentax K1", 0, 0,
{ 11095,-3157,-1324,-8377,15834,2720,-1108,947,11688 } },
{ "Pentax K20D", 0, 0,
{ 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } },
{ "Pentax K200D", 0, 0,
{ 9186,-2678,-907,-8693,16517,2260,-1129,1094,8524 } },
{ "Pentax K2000", 0, 0, /* updated */
{ 9730,-2989,-970,-8527,16258,2381,-1060,970,8362 } },
{ "Pentax K-m", 0, 0, /* updated */
{ 9730,-2989,-970,-8527,16258,2381,-1060,970,8362 } },
{ "Pentax KP", 0, 0,
{ 7825,-2160,-1403,-4841,13555,1349,-1559,2449,5814 } },
{ "Pentax K-x", 0, 0,
{ 8843,-2837,-625,-5025,12644,2668,-411,1234,7410 } },
{ "Pentax K-r", 0, 0,
{ 9895,-3077,-850,-5304,13035,2521,-883,1768,6936 } },
{ "Pentax K-1", 0, 0, /* updated */
{ 8596,-2981,-639,-4202,12046,2431,-685,1424,6122 } },
{ "Pentax K-30", 0, 0, /* updated */
{ 8134,-2728,-645,-4365,11987,2694,-838,1509,6498 } },
{ "Pentax K-3 II", 0, 0, /* updated */
{ 7415,-2052,-721,-5186,12788,2682,-1446,2157,6773 } },
{ "Pentax K-3", 0, 0,
{ 7415,-2052,-721,-5186,12788,2682,-1446,2157,6773 } },
{ "Pentax K-5 II", 0, 0,
{ 8170,-2725,-639,-4440,12017,2744,-771,1465,6599 } },
{ "Pentax K-500", 0, 0, /* added */
{ 8109,-2740,-608,-4593,12175,2731,-1006,1515,6545 } },
{ "Pentax K-50", 0, 0, /* added */
{ 8109,-2740,-608,-4593,12175,2731,-1006,1515,6545 } },
{ "Pentax K-5", 0, 0,
{ 8713,-2833,-743,-4342,11900,2772,-722,1543,6247 } },
{ "Pentax K-70", 0, 0,
{ 8766,-3149,-747,-3976,11943,2292,-517,1259,5552 } },
{ "Pentax K-7", 0, 0,
{ 9142,-2947,-678,-8648,16967,1663,-2224,2898,8615 } },
{ "Pentax KP", 0, 0, /* temp */
{ 8626,-2607,-1155,-3995,12301,1881,-1039,1822,6925 } },
{ "Pentax K-S1", 0, 0,
{ 8512,-3211,-787,-4167,11966,2487,-638,1288,6054 } },
{ "Pentax K-S2", 0, 0,
{ 8662,-3280,-798,-3928,11771,2444,-586,1232,6054 } },
{ "Pentax Q-S1", 0, 0,
{ 12995,-5593,-1107,-1879,10139,2027,-64,1233,4919 } },
{ "Pentax Q7", 0, 0, /* added */
{ 10901,-3938,-1025,-2743,11210,1738,-823,1805,5344 } },
{ "Pentax Q10", 0, 0, /* updated */
{ 11562,-4183,-1172,-2357,10919,1641,-582,1726,5112 } },
{ "Pentax Q", 0, 0, /* added */
{ 11731,-4169,-1267,-2015,10727,1473,-217,1492,4870 } },
{ "Pentax MX-1", 0, 0, /* updated */
{ 9296,-3146,-888,-2860,11287,1783,-618,1698,5151 } },
{ "Pentax 645D", 0, 0x3e00,
{ 10646,-3593,-1158,-3329,11699,1831,-667,2874,6287 } },
{ "Pentax 645Z", 0, 0, /* updated */
{ 9519,-3591,-664,-4074,11725,2671,-624,1501,6653 } },
{ "Panasonic DMC-CM10", -15, 0,
{ 8770,-3194,-820,-2871,11281,1803,-513,1552,4434 } },
{ "Panasonic DMC-CM1", -15, 0,
{ 8770,-3194,-820,-2871,11281,1803,-513,1552,4434 } },
{ "Panasonic DC-FZ82", -15, 0, /* markets: FZ80 FZ82 */
{ 8550,-2908,-842,-3195,11529,1881,-338,1603,4631 } },
{ "Panasonic DC-FZ80", -15, 0, /* markets: FZ80 FZ82 */
{ 8550,-2908,-842,-3195,11529,1881,-338,1603,4631 } },
{ "Panasonic DMC-FZ8", 0, 0xf7f,
{ 8986,-2755,-802,-6341,13575,3077,-1476,2144,6379 } },
{ "Panasonic DMC-FZ18", 0, 0,
{ 9932,-3060,-935,-5809,13331,2753,-1267,2155,5575 } },
{ "Panasonic DMC-FZ28", -15, 0xf96,
{ 10109,-3488,-993,-5412,12812,2916,-1305,2140,5543 } },
{ "Panasonic DMC-FZ300", -15, 0xfff,
{ 8378,-2798,-769,-3068,11410,1877,-538,1792,4623 } },
{ "Panasonic DMC-FZ330", -15, 0xfff,
{ 8378,-2798,-769,-3068,11410,1877,-538,1792,4623 } },
{ "Panasonic DMC-FZ30", 0, 0xf94,
{ 10976,-4029,-1141,-7918,15491,2600,-1670,2071,8246 } },
{ "Panasonic DMC-FZ3", -15, 0,
{ 9938,-2780,-890,-4604,12393,2480,-1117,2304,4620 } },
{ "Panasonic DMC-FZ4", -15, 0, /* 40,42,45 */
{ 13639,-5535,-1371,-1698,9633,2430,316,1152,4108 } },
{ "Panasonic DMC-FZ50", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Panasonic DMC-FZ7", -15, 0,
{ 11532,-4324,-1066,-2375,10847,1749,-564,1699,4351 } },
{ "Leica V-LUX1", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Leica V-LUX 1", 0, 0,
{ 7906,-2709,-594,-6231,13351,3220,-1922,2631,6537 } },
{ "Panasonic DMC-L10", -15, 0xf96,
{ 8025,-1942,-1050,-7920,15904,2100,-2456,3005,7039 } },
{ "Panasonic DMC-L1", 0, 0xf7f,
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Leica DIGILUX3", 0, 0xf7f, /* added */
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Leica DIGILUX 3", 0, 0xf7f,
{ 8054,-1885,-1025,-8349,16367,2040,-2805,3542,7629 } },
{ "Panasonic DMC-LC1", 0, 0,
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Leica DIGILUX2", 0, 0, /* added */
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Leica DIGILUX 2", 0, 0,
{ 11340,-4069,-1275,-7555,15266,2448,-2960,3426,7685 } },
{ "Panasonic DMC-LX100", -15, 0,
{ 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } },
{ "Leica D-LUX (Typ 109)", -15, 0,
{ 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } },
{ "Panasonic DMC-LF1", -15, 0,
{ 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } },
{ "Leica C (Typ 112)", -15, 0,
{ 9379,-3267,-816,-3227,11560,1881,-926,1928,5340 } },
{ "Panasonic DMC-LX9", -15, 0, /* markets: LX9 LX10 LX15 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-LX10", -15, 0, /* markets: LX9 LX10 LX15 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-LX15", -15, 0, /* markets: LX9 LX10 LX15 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-LX1", 0, 0xf7f,
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Leica D-Lux (Typ 109)", 0, 0xf7f,
{ 8844,-3538,-768,-3709,11762,2200,-698,1792,5220 } },
{ "Leica D-LUX2", 0, 0xf7f,
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Leica D-LUX 2", 0, 0xf7f, /* added */
{ 10704,-4187,-1230,-8314,15952,2501,-920,945,8927 } },
{ "Panasonic DMC-LX2", 0, 0,
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Leica D-LUX3", 0, 0,
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Leica D-LUX 3", 0, 0, /* added */
{ 8048,-2810,-623,-6450,13519,3272,-1700,2146,7049 } },
{ "Panasonic DMC-LX3", -15, 0,
{ 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } },
{ "Leica D-LUX 4", -15, 0,
{ 8128,-2668,-655,-6134,13307,3161,-1782,2568,6083 } },
{ "Panasonic DMC-LX5", -15, 0,
{ 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } },
{ "Leica D-LUX 5", -15, 0,
{ 10909,-4295,-948,-1333,9306,2399,22,1738,4582 } },
{ "Panasonic DMC-LX7", -15, 0,
{ 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } },
{ "Leica D-LUX 6", -15, 0,
{ 10148,-3743,-991,-2837,11366,1659,-701,1893,4899 } },
{ "Panasonic DMC-FZ1000", -15, 0,
{ 7830,-2696,-763,-3325,11667,1866,-641,1712,4824 } },
{ "Leica V-LUX (Typ 114)", 15, 0,
{ 7830,-2696,-763,-3325,11667,1866,-641,1712,4824 } },
{ "Panasonic DMC-FZ100", -15, 0xfff,
{ 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } },
{ "Leica V-LUX 2", -15, 0xfff,
{ 16197,-6146,-1761,-2393,10765,1869,366,2238,5248 } },
{ "Panasonic DMC-FZ150", -15, 0xfff,
{ 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } },
{ "Leica V-LUX 3", -15, 0xfff,
{ 11904,-4541,-1189,-2355,10899,1662,-296,1586,4289 } },
{ "Panasonic DMC-FZ2000", -15, 0, /* markets: DMC-FZ2000, DMC-FZ2500 ,FZH1 */
{ 7386,-2443,-743,-3437,11864,1757,-608,1660,4766 } },
{ "Panasonic DMC-FZ2500", -15, 0,
{ 7386,-2443,-743,-3437,11864,1757,-608,1660,4766 } },
{ "Panasonic DMC-FZH1", -15, 0,
{ 7386,-2443,-743,-3437,11864,1757,-608,1660,4766 } },
{ "Panasonic DMC-FZ200", -15, 0xfff,
{ 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } },
{ "Leica V-LUX 4", -15, 0xfff,
{ 8112,-2563,-740,-3730,11784,2197,-941,2075,4933 } },
{ "Panasonic DMC-FX150", -15, 0xfff,
{ 9082,-2907,-925,-6119,13377,3058,-1797,2641,5609 } },
{ "Panasonic DMC-FX180", -15, 0xfff, /* added */
{ 9082,-2907,-925,-6119,13377,3058,-1797,2641,5609 } },
{ "Panasonic DMC-G10", 0, 0,
{ 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } },
{ "Panasonic DMC-G1", -15, 0xf94,
{ 8199,-2065,-1056,-8124,16156,2033,-2458,3022,7220 } },
{ "Panasonic DMC-G2", -15, 0xf3c,
{ 10113,-3400,-1114,-4765,12683,2317,-377,1437,6710 } },
{ "Panasonic DMC-G3", -15, 0xfff,
{ 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } },
{ "Panasonic DMC-G5", -15, 0xfff,
{ 7798,-2562,-740,-3879,11584,2613,-1055,2248,5434 } },
{ "Panasonic DMC-G6", -15, 0xfff,
{ 8294,-2891,-651,-3869,11590,2595,-1183,2267,5352 } },
{ "Panasonic DMC-G7", -15, 0xfff,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-G8", -15, 0xfff, /* markets: DMC-G8, DMC-G80, DMC-G81, DMC-G85 */
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DC-G9", -15, 0,
{ 7685,-2375,-634,-3687,11700,2249,-748,1546,5111 } },
{ "Panasonic DMC-GF1", -15, 0xf92,
{ 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } },
{ "Panasonic DMC-GF2", -15, 0xfff,
{ 7888,-1902,-1011,-8106,16085,2099,-2353,2866,7330 } },
{ "Panasonic DMC-GF3", -15, 0xfff,
{ 9051,-2468,-1204,-5212,13276,2121,-1197,2510,6890 } },
{ "Panasonic DMC-GF5", -15, 0xfff,
{ 8228,-2945,-660,-3938,11792,2430,-1094,2278,5793 } },
{ "Panasonic DMC-GF6", -15, 0,
{ 8130,-2801,-946,-3520,11289,2552,-1314,2511,5791 } },
{ "Panasonic DMC-GF7", -15, 0,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GF8", -15, 0,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GH1", -15, 0xf92,
{ 6299,-1466,-532,-6535,13852,2969,-2331,3112,5984 } },
{ "Panasonic DMC-GH2", -15, 0xf95,
{ 7780,-2410,-806,-3913,11724,2484,-1018,2390,5298 } },
{ "Panasonic DMC-GH3", -15, 0,
{ 6559,-1752,-491,-3672,11407,2586,-962,1875,5130 } },
{ "Panasonic DMC-GH4", -15, 0,
{ 7122,-2108,-512,-3155,11201,2231,-541,1423,5045 } },
{ "Panasonic AG-GH4", -15, 0, /* added */
{ 7122,-2108,-512,-3155,11201,2231,-541,1423,5045 } },
{"Panasonic DC-GH5s", -15, 0,
{ 6929,-2355,-708,-4192,12534,1828,-1097,1989,5195 } },
{ "Panasonic DC-GH5", -15, 0,
{ 7641,-2336,-605,-3218,11299,2187,-485,1338,5121 } },
{ "Yuneec CGO4", -15, 0,
{ 7122,-2108,-512,-3155,11201,2231,-541,1423,5045 } },
{ "Panasonic DMC-GM1", -15, 0,
{ 6770,-1895,-744,-5232,13145,2303,-1664,2691,5703 } },
{ "Panasonic DMC-GM5", -15, 0,
{ 8238,-3244,-679,-3921,11814,2384,-836,2022,5852 } },
{ "Panasonic DMC-GX1", -15, 0,
{ 6763,-1919,-863,-3868,11515,2684,-1216,2387,5879 } },
{ "Panasonic DC-GF10", -15, 0, /* temp, markets: GF10, GF90 */
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DC-GF90", -15, 0, /* temp, markets: GF10, GF90 */
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DC-GX850", -15, 0, /* markets: GX850 GX800 GF9 */
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DC-GX800", -15, 0, /* markets: GX850 GX800 GF9 */
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DC-GF9", -15, 0, /* markets: GX850 GX800 GF9 */
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GX85", -15, 0, /* markets: GX85 GX80 GX7MK2 */
{ 7771,-3020,-629,-4029,11950,2345,-821,1977,6119 } },
{ "Panasonic DMC-GX80", -15, 0, /* markets: GX85 GX80 GX7MK2 */
{ 7771,-3020,-629,-4029,11950,2345,-821,1977,6119 } },
{ "Panasonic DMC-GX7MK2", -15, 0, /* markets: GX85 GX80 GX7MK2 */
{ 7771,-3020,-629,-4029,11950,2345,-821,1977,6119 } },
{ "Panasonic DMC-GX7", -15,0,
{ 7610,-2780,-576,-4614,12195,2733,-1375,2393,6490 } },
{ "Panasonic DMC-GX8", -15,0,
{ 7564,-2263,-606,-3148,11239,2177,-540,1435,4853 } },
{ "Panasonic DC-GX9", -15, 0, /* temp */
{ 7685,-2375,-634,-3687,11700,2249,-748,1546,5111 } },
{ "Panasonic DMC-TZ6", -15, 0, /* markets: ZS40 TZ60 TZ61 */
{ 8607,-2822,-808,-3755,11930,2049,-820,2060,5224 } },
{ "Panasonic DMC-TZ8", -15, 0, /* markets: ZS60 TZ80 TZ81 TZ82 TZ85 */
{ 8550,-2908,-842,-3195,11529,1881,-338,1603,4631 } },
{ "Panasonic DC-TZ90", -15, 0, /* markets: ZS70 TZ90 TZ91 TZ92 T93 */
{ 9052,-3117,-883,-3045,11346,1927,-205,1520,4730 } },
{ "Panasonic DC-TZ91", -15, 0, /* markets: ZS70 TZ90 TZ91 TZ92 T93 */
{ 9052,-3117,-883,-3045,11346,1927,-205,1520,4730 } },
{ "Panasonic DC-TZ92", -15, 0, /* markets: ZS70 TZ90 TZ91 TZ92 T93 */
{ 9052,-3117,-883,-3045,11346,1927,-205,1520,4730 } },
{ "Panasonic DC-T93", -15, 0, /* markets: ZS70 TZ90 TZ91 TZ92 T93 */
{ 9052,-3117,-883,-3045,11346,1927,-205,1520,4730 } },
{ "Panasonic DMC-ZS4", -15, 0, /* markets: ZS40 TZ60 TZ61 */
{ 8607,-2822,-808,-3755,11930,2049,-820,2060,5224 } },
{ "Panasonic DMC-TZ7", -15, 0, /* markets: ZS50 TZ70 TZ71 */
{ 8802,-3135,-789,-3151,11468,1904,-550,1745,4810 } },
{ "Panasonic DMC-ZS5", -15, 0, /* markets: ZS50 TZ70 TZ71 */
{ 8802,-3135,-789,-3151,11468,1904,-550,1745,4810 } },
{ "Panasonic DMC-ZS6", -15, 0, /* markets: ZS60 TZ80 TZ81 TZ85 */
{ 8550,-2908,-842,-3195,11529,1881,-338,1603,4631 } },
{ "Panasonic DC-ZS70", -15, 0, /* markets: ZS70 TZ90 TZ91 TZ92 T93 */
{ 9052,-3117,-883,-3045,11346,1927,-205,1520,4730 } },
{ "Panasonic DMC-ZS100", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-ZS110", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-TZ100", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-TZ101", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-TZ110", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DMC-TX1", -15, 0, /* markets: ZS100 ZS110 TZ100 TZ101 TZ110 TX1 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DC-ZS200", -15, 0, /* temp, markets: ZS200 TZ200 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Panasonic DC-TZ200", -15, 0, /* temp, markets: ZS200 TZ200 */
{ 7790,-2736,-755,-3452,11870,1769,-628,1647,4898 } },
{ "Phase One H 20", 0, 0, /* DJC */
{ 1313,1855,-109,-6715,15908,808,-327,1840,6020 } },
{ "Phase One H20", 0, 0, /* DJC */
{ 1313,1855,-109,-6715,15908,808,-327,1840,6020 } },
{ "Phase One H 25", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One H25", 0, 0, /* added */
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One IQ280", 0, 0, /* added */
{ 6294,686,-712,-5435,13417,2211,-1006,2435,5042 } },
{ "Phase One IQ260", 0, 0, /* added */
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Phase One IQ250",0, 0,
// {3984,0,0,0,10000,0,0,0,7666}},
{10325,845,-604,-4113,13385,481,-1791,4163,6924}}, /* emb */
{ "Phase One IQ180", 0, 0, /* added */
{ 6294,686,-712,-5435,13417,2211,-1006,2435,5042 } },
{ "Phase One IQ160", 0, 0, /* added */
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Phase One IQ150", 0, 0, /* added */
{10325,845,-604,-4113,13385,481,-1791,4163,6924}}, /* temp */ /* emb */
// { 3984,0,0,0,10000,0,0,0,7666 } },
{ "Phase One IQ140", 0, 0, /* added */
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Phase One P65", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Phase One P 65", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Phase One P45", 0, 0, /* added */
{ 5053,-24,-117,-5685,14077,1703,-2619,4491,5850 } },
{ "Phase One P 45", 0, 0, /* added */
{ 5053,-24,-117,-5685,14077,1703,-2619,4491,5850 } },
{ "Phase One P40", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Phase One P 40", 0, 0,
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Phase One P30", 0, 0, /* added */
{ 4516,-244,-36,-7020,14976,2174,-3206,4670,7087 } },
{ "Phase One P 30", 0, 0, /* added */
{ 4516,-244,-36,-7020,14976,2174,-3206,4670,7087 } },
{ "Phase One P25", 0, 0, /* added */
{ 2905,732,-237,-8135,16626,1476,-3038,4253,7517 } },
{ "Phase One P 25", 0, 0, /* added */
{ 2905,732,-237,-8135,16626,1476,-3038,4253,7517 } },
{ "Phase One P21", 0, 0, /* added */
{ 6516,-2050,-507,-8217,16703,1479,-3492,4741,8489 } },
{ "Phase One P 21", 0, 0, /* added */
{ 6516,-2050,-507,-8217,16703,1479,-3492,4741,8489 } },
{ "Phase One P20", 0, 0, /* added */
{ 2905,732,-237,-8135,16626,1476,-3038,4253,7517 } },
{ "Phase One P20", 0, 0, /* added */
{ 2905,732,-237,-8135,16626,1476,-3038,4253,7517 } },
{ "Phase One P 2", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One P2", 0, 0,
{ 2905,732,-237,-8134,16626,1476,-3038,4253,7517 } },
{ "Phase One IQ3 100MP", 0, 0, /* added */
// {2423,0,0,0,9901,0,0,0,7989}},
{ 10999,354,-742,-4590,13342,937,-1060,2166,8120} }, /* emb */
{ "Phase One IQ3 80MP", 0, 0, /* added */
{ 6294,686,-712,-5435,13417,2211,-1006,2435,5042 } },
{ "Phase One IQ3 60MP", 0, 0, /* added */
{ 8035,435,-962,-6001,13872,2320,-1159,3065,5434 } },
{ "Phase One IQ3 50MP", 0, 0, /* added */
// { 3984,0,0,0,10000,0,0,0,7666 } },
{10058,1079,-587,-4135,12903,944,-916,2726,7480}}, /* emb */
{ "Photron BC2-HD", 0, 0, /* DJC */
{ 14603,-4122,-528,-1810,9794,2017,-297,2763,5936 } },
{ "Polaroid x530", 0, 0,
{ 13458,-2556,-510,-5444,15081,205,0,0,12120 } },
{ "Red One", 704, 0xffff, /* DJC */
{ 21014,-7891,-2613,-3056,12201,856,-2203,5125,8042 } },
{ "Ricoh S10 24-72mm F2.5-4.4 VC", 0, 0, /* added */
{ 10531,-4043,-878,-2038,10270,2052,-107,895,4577 } },
{ "Ricoh GR A12 50mm F2.5 MACRO", 0, 0, /* added */
{ 8849,-2560,-689,-5092,12831,2520,-507,1280,7104 } },
{ "Ricoh GR DIGITAL 3", 0, 0, /* added */
{ 8170,-2496,-655,-5147,13056,2312,-1367,1859,5265 } },
{ "Ricoh GR DIGITAL 4", 0, 0, /* added */
{ 8771,-3151,-837,-3097,11015,2389,-703,1343,4924 } },
{ "Ricoh GR II", 0, 0,
{ 4630,-834,-423,-4977,12805,2417,-638,1467,6115 } },
{ "Ricoh GR", 0, 0,
{ 3708,-543,-160,-5381,12254,3556,-1471,1929,8234 } },
{ "Ricoh GX200", 0, 0, /* added */
{ 8040,-2368,-626,-4659,12543,2363,-1125,1581,5660 } },
{ "Ricoh RICOH GX200", 0, 0, /* added */
{ 8040,-2368,-626,-4659,12543,2363,-1125,1581,5660 } },
{ "Ricoh GXR MOUNT A12", 0, 0, /* added */
{ 7834,-2182,-739,-5453,13409,2241,-952,2005,6620 } },
{ "Ricoh GXR A16", 0, 0, /* added */
{ 7837,-2538,-730,-4370,12184,2461,-868,1648,5830 } },
{ "Ricoh GXR A12", 0, 0, /* added */
{ 10228,-3159,-933,-5304,13158,2371,-943,1873,6685 } },
{ "Samsung EK-GN100", 0, 0, /* added */ /* Galaxy NX */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung EK-GN110", 0, 0, /* added */ /* Galaxy NX */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung EK-GN120", 0, 0, /* Galaxy NX */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung EK-KN120", 0, 0, /* added */ /* Galaxy NX */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung EX1", 0, 0x3e00,
{ 8898,-2498,-994,-3144,11328,2066,-760,1381,4576 } },
{ "Samsung EX2F", 0, 0x7ff,
{ 10648,-3897,-1055,-2022,10573,1668,-492,1611,4742 } },
{ "Samsung Galaxy S7 Edge", 0, 0, /* added */
{ 9927,-3704,-1024,-3935,12758,1257,-389,1512,4993 } },
{ "Samsung Galaxy S7", 0, 0, /* added */
{ 9927,-3704,-1024,-3935,12758,1257,-389,1512,4993 } },
{ "Samsung Galaxy NX", 0, 0, /* added */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX U", 0, 0, /* added */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX mini", 0, 0,
{ 5222,-1196,-550,-6540,14649,2009,-1666,2819,5657 } },
{ "Samsung NX3300", 0, 0,
{ 8060,-2933,-761,-4504,12890,1762,-630,1489,5227 } },
{ "Samsung NX3000", 0, 0,
{ 8060,-2933,-761,-4504,12890,1762,-630,1489,5227 } },
{ "Samsung NX30", 0, 0, /* used for NX30/NX300/NX300M */
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX2000", 0, 0,
{ 7557,-2522,-739,-4679,12949,1894,-840,1777,5311 } },
{ "Samsung NX2", 0, 0xfff, /* used for NX20/NX200/NX210 */
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX1000", 0, 0,
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX1100", 0, 0,
{ 6933,-2268,-753,-4921,13387,1647,-803,1641,6096 } },
{ "Samsung NX11", 0, 0,
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX10", 0, 0, /* used for NX10/NX100 */
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX500", 0, 0,
{ 10686,-4042,-1052,-3595,13238,276,-464,1259,5931 } },
{ "Samsung NX5", 0, 0,
{ 10332,-3234,-1168,-6111,14639,1520,-1352,2647,8331 } },
{ "Samsung NX1", 0, 0,
{ 10686,-4042,-1052,-3595,13238,276,-464,1259,5931 } },
{ "Samsung NXF1", 0, 0, /* added */
{ 5222,-1196,-550,-6540,14649,2009,-1666,2819,5657 } },
{ "Samsung WB2000", 0, 0xfff,
{ 12093,-3557,-1155,-1000,9534,1733,-22,1787,4576 } },
{ "Samsung GX10", 0, 0, /* added */ /* Pentax K10D */
{ 9679,-2965,-811,-8622,16514,2182,-975,883,9793 } },
{ "Samsung GX-10", 0, 0, /* added */ /* Pentax K10D */
{ 9679,-2965,-811,-8622,16514,2182,-975,883,9793 } },
{ "Samsung GX-1", 0, 0, /* used for GX-1L/GX-1S */
{ 10504,-2438,-1189,-8603,16207,2531,-1022,863,12242 } },
{ "Samsung GX20", 0, 0, /* copied from Pentax K20D */
{ 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } },
{ "Samsung GX-20", 0, 0, /* added */ /* copied from Pentax K20D */
{ 9427,-2714,-868,-7493,16092,1373,-2199,3264,7180 } },
{ "Samsung S85", 0, 0, /* DJC */
{ 11885,-3968,-1473,-4214,12299,1916,-835,1655,5549 } },
// Foveon: LibRaw color data
{ "Sigma dp0 Quattro", 2047, 0,
{ 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } },
{ "Sigma dp1 Quattro", 2047, 0,
{ 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } },
{ "Sigma dp2 Quattro", 2047, 0,
{ 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } },
{ "Sigma dp3 Quattro", 2047, 0,
{ 13801,-3390,-1016,5535,3802,877,1848,4245,3730 } },
{ "Sigma sd Quattro H", 256, 0,
{ 1295,108,-311, 256,828,-65,-28,750,254 } }, /* temp */
{ "Sigma sd Quattro", 2047, 0,
{ 1295,108,-311, 256,828,-65,-28,750,254 } }, /* temp */
{ "Sigma SD9", 15, 4095, /* updated */
{ 13564,-2537,-751,-5465,15154,194,-67,116,10425 } },
{ "Sigma SD10", 15, 16383, /* updated */
{ 6787,-1682,575,-3091,8357,160,217,-369,12314 } },
{ "Sigma SD14", 15, 16383, /* updated */
{ 13589,-2509,-739,-5440,15104,193,-61,105,10554 } },
{ "Sigma SD15", 15, 4095, /* updated */
{ 13556,-2537,-730,-5462,15144,195,-61,106,10577 } },
// Merills + SD1
{ "Sigma SD1", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP1 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP2 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
{ "Sigma DP3 Merrill", 31, 4095, /* LibRaw */
{ 5133,-1895,-353,4978,744,144,3837,3069,2777 } },
// Sigma DP (non-Merill Versions)
{ "Sigma DP1X", 0, 4095, /* updated */
{ 13704,-2452,-857,-5413,15073,186,-89,151,9820 } },
{ "Sigma DP1", 0, 4095, /* updated */
{ 12774,-2591,-394,-5333,14676,207,15,-21,12127 } },
{ "Sigma DP", 0, 4095, /* LibRaw */
// { 7401,-1169,-567,2059,3769,1510,664,3367,5328 } },
{ 13100,-3638,-847,6855,2369,580,2723,3218,3251 } },
{ "Sinar", 0, 0, /* DJC */
{ 16442,-2956,-2422,-2877,12128,750,-1136,6066,4559 } },
{ "Sony DSC-F828", 0, 0,
{ 7924,-1910,-777,-8226,15459,2998,-1517,2199,6818,-7242,11401,3481 } },
{ "Sony DSC-R1", 0, 0,
{ 8512,-2641,-694,-8042,15670,2526,-1821,2117,7414 } },
{ "Sony DSC-V3", 0, 0,
{ 7511,-2571,-692,-7894,15088,3060,-948,1111,8128 } },
{"Sony DSC-RX100M5", -800, 0,
{ 6596,-2079,-562,-4782,13016,1933,-970,1581,5181 } },
{ "Sony DSC-RX100M", -800, 0, /* used for M2/M3/M4 */
{ 6596,-2079,-562,-4782,13016,1933,-970,1581,5181 } },
{ "Sony DSC-RX100", 0, 0,
{ 8651,-2754,-1057,-3464,12207,1373,-568,1398,4434 } },
{"Sony DSC-RX10M4", -800, 0,
{ 7699,-2566,-629,-2967,11270,1928,-378,1286,4807 } },
{ "Sony DSC-RX10",0, 0, /* same for M2/M3 */
{ 6679,-1825,-745,-5047,13256,1953,-1580,2422,5183 } },
{ "Sony DSC-RX1RM2", 0, 0,
{ 6629,-1900,-483,-4618,12349,2550,-622,1381,6514 } },
{ "Sony DSC-RX1R", 0, 0, /* updated */
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
{ "Sony DSC-RX1", 0, 0,
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
{"Sony DSC-RX0", -800, 0, /* temp */
{ 9396,-3507,-843,-2497,11111,1572,-343,1355,5089 } },
{ "Sony DSLR-A100", 0, 0xfeb,
{ 9437,-2811,-774,-8405,16215,2290,-710,596,7181 } },
{ "Sony DSLR-A290", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A2", 0, 0,
{ 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } },
{ "Sony DSLR-A300", 0, 0,
{ 9847,-3091,-928,-8485,16345,2225,-715,595,7103 } },
{ "Sony DSLR-A330", 0, 0,
{ 9847,-3091,-929,-8485,16346,2225,-714,595,7103 } },
{ "Sony DSLR-A350", 0, 0xffc,
{ 6038,-1484,-578,-9146,16746,2513,-875,746,7217 } },
{ "Sony DSLR-A380", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A390", 0, 0,
{ 6038,-1484,-579,-9145,16746,2512,-875,746,7218 } },
{ "Sony DSLR-A450", 0, 0xfeb,
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A580", 0, 16596,
{ 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } },
{ "Sony DSLR-A500", 0, 16596,
{ 6046,-1127,-278,-5574,13076,2786,-691,1419,7625 } },
{ "Sony DSLR-A550", 0, 16596,
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A5", 0, 0xfeb, /* Is there any cameras not covered above? */
{ 4950,-580,-103,-5228,12542,3029,-709,1435,7371 } },
{ "Sony DSLR-A700", 0, 0,
{ 5775,-805,-359,-8574,16295,2391,-1943,2341,7249 } },
{ "Sony DSLR-A850", 0, 0,
{ 5413,-1162,-365,-5665,13098,2866,-608,1179,8440 } },
{ "Sony DSLR-A900", 0, 0,
{ 5209,-1072,-397,-8845,16120,2919,-1618,1803,8654 } },
{ "Sony ILCA-68", 0, 0,
{ 6435,-1903,-536,-4722,12449,2550,-663,1363,6517 } },
{ "Sony ILCA-77M2", 0, 0,
{ 5991,-1732,-443,-4100,11989,2381,-704,1467,5992 } },
{ "Sony ILCA-99M2", 0, 0,
{ 6660,-1918,-471,-4613,12398,2485,-649,1433,6447 } },
{ "Sony ILCE-9", 0, 0,
{ 6389,-1703,-378,-4562,12265,2587,-670,1489,6550 } },
{ "Sony ILCE-7M2", 0, 0,
{ 5271,-712,-347,-6153,13653,2763,-1601,2366,7242 } },
{ "Sony ILCE-7SM2", 0, 0,
{ 5838,-1430,-246,-3497,11477,2297,-748,1885,5778 } },
{ "Sony ILCE-7S", 0, 0,
{ 5838,-1430,-246,-3497,11477,2297,-748,1885,5778 } },
{ "Sony ILCE-7RM3", 0, 0,
{ 6640,-1847,-503,-5238,13010,2474,-993,1673,6527 } },
{ "Sony ILCE-7RM2", 0, 0,
{ 6629,-1900,-483,-4618,12349,2550,-622,1381,6514 } },
{ "Sony ILCE-7R", 0, 0,
{ 4913,-541,-202,-6130,13513,2906,-1564,2151,7183 } },
{ "Sony ILCE-7", 0, 0,
{ 5271,-712,-347,-6153,13653,2763,-1601,2366,7242 } },
{ "Sony ILCE-6300", 0, 0,
{ 5973,-1695,-419,-3826,11797,2293,-639,1398,5789 } },
{ "Sony ILCE-6500", 0, 0,
{ 5973,-1695,-419,-3826,11797,2293,-639,1398,5789 } },
{ "Sony ILCE", 0, 0, /* 3000, 5000, 5100, 6000, and QX1 */
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony MODEL-NAME", 0, 0, /* added */
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony NEX-5N", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony NEX-5R", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-5T", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-3N", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-3", 0, 0,
{ 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } },
{ "Sony NEX-5", 0, 0,
{ 6549,-1550,-436,-4880,12435,2753,-854,1868,6976 } },
{ "Sony NEX-6", 0, 0,
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-7", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony NEX-VG30", 0, 0, /* added */
{ 6129,-1545,-418,-4930,12490,2743,-977,1693,6615 } },
{ "Sony NEX-VG900", 0, 0, /* added */
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
{ "Sony NEX", 0, 0, /* NEX-C3, NEX-F3, NEX-VG20 */
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A33", 0, 0,
{ 6069,-1221,-366,-5221,12779,2734,-1024,2066,6834 } },
{ "Sony SLT-A35", 0, 0,
{ 5986,-1618,-415,-4557,11820,3120,-681,1404,6971 } },
{ "Sony SLT-A37", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A55", 0, 0,
{ 5932,-1492,-411,-4813,12285,2856,-741,1524,6739 } },
{ "Sony SLT-A57", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A58", 0, 0,
{ 5991,-1456,-455,-4764,12135,2980,-707,1425,6701 } },
{ "Sony SLT-A65", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony SLT-A77", 0, 0,
{ 5491,-1192,-363,-4951,12342,2948,-911,1722,7192 } },
{ "Sony SLT-A99", 0, 0,
{ 6344,-1612,-462,-4863,12477,2681,-865,1786,6899 } },
};
// clang-format on
double cam_xyz[4][3];
char name[130];
int i, j;
if (colors > 4 || colors < 1)
return;
int bl4 = (cblack[0] + cblack[1] + cblack[2] + cblack[3]) / 4, bl64 = 0;
if (cblack[4] * cblack[5] > 0)
{
for (unsigned c = 0; c < 4096 && c < cblack[4] * cblack[5]; c++)
bl64 += cblack[c + 6];
bl64 /= cblack[4] * cblack[5];
}
int rblack = black + bl4 + bl64;
sprintf(name, "%s %s", t_make, t_model);
for (i = 0; i < sizeof table / sizeof *table; i++)
if (!strncasecmp(name, table[i].prefix, strlen(table[i].prefix)))
{
if (!dng_version)
{
if (table[i].t_black > 0)
{
black = (ushort)table[i].t_black;
memset(cblack, 0, sizeof(cblack));
}
else if (table[i].t_black < 0 && rblack == 0)
{
black = (ushort)(-table[i].t_black);
memset(cblack, 0, sizeof(cblack));
}
if (table[i].t_maximum)
maximum = (ushort)table[i].t_maximum;
}
if (table[i].trans[0])
{
for (raw_color = j = 0; j < 12; j++)
#ifdef LIBRAW_LIBRARY_BUILD
if (internal_only)
imgdata.color.cam_xyz[0][j] = table[i].trans[j] / 10000.0;
else
imgdata.color.cam_xyz[0][j] =
#endif
((double *)cam_xyz)[j] = table[i].trans[j] / 10000.0;
#ifdef LIBRAW_LIBRARY_BUILD
if (!internal_only)
#endif
cam_xyz_coeff(rgb_cam, cam_xyz);
}
break;
}
}
void CLASS simple_coeff(int index)
{
static const float table[][12] = {/* index 0 -- all Foveon cameras */
{1.4032, -0.2231, -0.1016, -0.5263, 1.4816, 0.017, -0.0112, 0.0183, 0.9113},
/* index 1 -- Kodak DC20 and DC25 */
{2.25, 0.75, -1.75, -0.25, -0.25, 0.75, 0.75, -0.25, -0.25, -1.75, 0.75, 2.25},
/* index 2 -- Logitech Fotoman Pixtura */
{1.893, -0.418, -0.476, -0.495, 1.773, -0.278, -1.017, -0.655, 2.672},
/* index 3 -- Nikon E880, E900, and E990 */
{-1.936280, 1.800443, -1.448486, 2.584324, 1.405365, -0.524955, -0.289090, 0.408680,
-1.204965, 1.082304, 2.941367, -1.818705}};
int i, c;
for (raw_color = i = 0; i < 3; i++)
FORCC rgb_cam[i][c] = table[index][i * colors + c];
}
short CLASS guess_byte_order(int words)
{
uchar test[4][2];
int t = 2, msb;
double diff, sum[2] = {0, 0};
fread(test[0], 2, 2, ifp);
for (words -= 2; words--;)
{
fread(test[t], 2, 1, ifp);
for (msb = 0; msb < 2; msb++)
{
diff = (test[t ^ 2][msb] << 8 | test[t ^ 2][!msb]) - (test[t][msb] << 8 | test[t][!msb]);
sum[msb] += diff * diff;
}
t = (t + 1) & 3;
}
return sum[0] < sum[1] ? 0x4d4d : 0x4949;
}
float CLASS find_green(int bps, int bite, int off0, int off1)
{
UINT64 bitbuf = 0;
int vbits, col, i, c;
ushort img[2][2064];
double sum[] = {0, 0};
if(width > 2064) return 0.f; // too wide
FORC(2)
{
fseek(ifp, c ? off1 : off0, SEEK_SET);
for (vbits = col = 0; col < width; col++)
{
for (vbits -= bps; vbits < 0; vbits += bite)
{
bitbuf <<= bite;
for (i = 0; i < bite; i += 8)
bitbuf |= (unsigned)(fgetc(ifp) << i);
}
img[c][col] = bitbuf << (64 - bps - vbits) >> (64 - bps);
}
}
FORC(width - 1)
{
sum[c & 1] += ABS(img[0][c] - img[1][c + 1]);
sum[~c & 1] += ABS(img[1][c] - img[0][c + 1]);
}
return 100 * log(sum[0] / sum[1]);
}
#ifdef LIBRAW_LIBRARY_BUILD
static void remove_trailing_spaces(char *string, size_t len)
{
if (len < 1)
return; // not needed, b/c sizeof of make/model is 64
string[len - 1] = 0;
if (len < 3)
return; // also not needed
len = strnlen(string, len - 1);
for (int i = len - 1; i >= 0; i--)
{
if (isspace((unsigned char)string[i]))
string[i] = 0;
else
break;
}
}
void CLASS initdata()
{
tiff_flip = flip = filters = UINT_MAX; /* unknown */
raw_height = raw_width = fuji_width = fuji_layout = cr2_slice[0] = 0;
maximum = height = width = top_margin = left_margin = 0;
cdesc[0] = desc[0] = artist[0] = make[0] = model[0] = model2[0] = 0;
iso_speed = shutter = aperture = focal_len = unique_id = 0;
tiff_nifds = 0;
memset(tiff_ifd, 0, sizeof tiff_ifd);
for (int i = 0; i < LIBRAW_IFD_MAXCOUNT; i++)
{
tiff_ifd[i].dng_color[0].illuminant = tiff_ifd[i].dng_color[1].illuminant = 0xffff;
for (int c = 0; c < 4; c++)
tiff_ifd[i].dng_levels.analogbalance[c] = 1.0f;
}
for (int i = 0; i < 0x10000; i++)
curve[i] = i;
memset(gpsdata, 0, sizeof gpsdata);
memset(cblack, 0, sizeof cblack);
memset(white, 0, sizeof white);
memset(mask, 0, sizeof mask);
thumb_offset = thumb_length = thumb_width = thumb_height = 0;
load_raw = thumb_load_raw = 0;
write_thumb = &CLASS jpeg_thumb;
data_offset = meta_offset = meta_length = tiff_bps = tiff_compress = 0;
kodak_cbpp = zero_after_ff = dng_version = load_flags = 0;
timestamp = shot_order = tiff_samples = black = is_foveon = 0;
mix_green = profile_length = data_error = zero_is_bad = 0;
pixel_aspect = is_raw = raw_color = 1;
tile_width = tile_length = 0;
}
#endif
/*
Identify which camera created this file, and set global variables
accordingly.
*/
void CLASS identify()
{
static const short pana[][6] = {
{3130, 1743, 4, 0, -6, 0}, {3130, 2055, 4, 0, -6, 0}, {3130, 2319, 4, 0, -6, 0},
{3170, 2103, 18, 0, -42, 20}, {3170, 2367, 18, 13, -42, -21}, {3177, 2367, 0, 0, -1, 0},
{3304, 2458, 0, 0, -1, 0}, {3330, 2463, 9, 0, -5, 0}, {3330, 2479, 9, 0, -17, 4},
{3370, 1899, 15, 0, -44, 20}, {3370, 2235, 15, 0, -44, 20}, {3370, 2511, 15, 10, -44, -21},
{3690, 2751, 3, 0, -8, -3}, {3710, 2751, 0, 0, -3, 0}, {3724, 2450, 0, 0, 0, -2},
{3770, 2487, 17, 0, -44, 19}, {3770, 2799, 17, 15, -44, -19}, {3880, 2170, 6, 0, -6, 0},
{4060, 3018, 0, 0, 0, -2}, {4290, 2391, 3, 0, -8, -1}, {4330, 2439, 17, 15, -44, -19},
{4508, 2962, 0, 0, -3, -4}, {4508, 3330, 0, 0, -3, -6},
};
static const ushort canon[][11] = {
{1944, 1416, 0, 0, 48, 0},
{2144, 1560, 4, 8, 52, 2, 0, 0, 0, 25},
{2224, 1456, 48, 6, 0, 2},
{2376, 1728, 12, 6, 52, 2},
{2672, 1968, 12, 6, 44, 2},
{3152, 2068, 64, 12, 0, 0, 16},
{3160, 2344, 44, 12, 4, 4},
{3344, 2484, 4, 6, 52, 6},
{3516, 2328, 42, 14, 0, 0},
{3596, 2360, 74, 12, 0, 0},
{3744, 2784, 52, 12, 8, 12},
{3944, 2622, 30, 18, 6, 2},
{3948, 2622, 42, 18, 0, 2},
{3984, 2622, 76, 20, 0, 2, 14},
{4104, 3048, 48, 12, 24, 12},
{4116, 2178, 4, 2, 0, 0},
{4152, 2772, 192, 12, 0, 0},
{4160, 3124, 104, 11, 8, 65},
{4176, 3062, 96, 17, 8, 0, 0, 16, 0, 7, 0x49},
{4192, 3062, 96, 17, 24, 0, 0, 16, 0, 0, 0x49},
{4312, 2876, 22, 18, 0, 2},
{4352, 2874, 62, 18, 0, 0},
{4476, 2954, 90, 34, 0, 0},
{4480, 3348, 12, 10, 36, 12, 0, 0, 0, 18, 0x49},
{4480, 3366, 80, 50, 0, 0},
{4496, 3366, 80, 50, 12, 0},
{4768, 3516, 96, 16, 0, 0, 0, 16},
{4832, 3204, 62, 26, 0, 0},
{4832, 3228, 62, 51, 0, 0},
{5108, 3349, 98, 13, 0, 0},
{5120, 3318, 142, 45, 62, 0},
{5280, 3528, 72, 52, 0, 0}, /* EOS M */
{5344, 3516, 142, 51, 0, 0},
{5344, 3584, 126, 100, 0, 2},
{5360, 3516, 158, 51, 0, 0},
{5568, 3708, 72, 38, 0, 0},
{5632, 3710, 96, 17, 0, 0, 0, 16, 0, 0, 0x49},
{5712, 3774, 62, 20, 10, 2},
{5792, 3804, 158, 51, 0, 0},
{5920, 3950, 122, 80, 2, 0},
{6096, 4056, 72, 34, 0, 0}, /* EOS M3 */
{6288, 4056, 266, 36, 0, 0}, /* EOS 80D */
{6384, 4224, 120, 44, 0, 0}, /* 6D II */
{6880, 4544, 136, 42, 0, 0}, /* EOS 5D4 */
{8896, 5920, 160, 64, 0, 0},
};
static const struct
{
ushort id;
char t_model[20];
} unique[] =
{
{0x001, "EOS-1D"},
{0x167, "EOS-1DS"},
{0x168, "EOS 10D"},
{0x169, "EOS-1D Mark III"},
{0x170, "EOS 300D"},
{0x174, "EOS-1D Mark II"},
{0x175, "EOS 20D"},
{0x176, "EOS 450D"},
{0x188, "EOS-1Ds Mark II"},
{0x189, "EOS 350D"},
{0x190, "EOS 40D"},
{0x213, "EOS 5D"},
{0x215, "EOS-1Ds Mark III"},
{0x218, "EOS 5D Mark II"},
{0x232, "EOS-1D Mark II N"},
{0x234, "EOS 30D"},
{0x236, "EOS 400D"},
{0x250, "EOS 7D"},
{0x252, "EOS 500D"},
{0x254, "EOS 1000D"},
{0x261, "EOS 50D"},
{0x269, "EOS-1D X"},
{0x270, "EOS 550D"},
{0x281, "EOS-1D Mark IV"},
{0x285, "EOS 5D Mark III"},
{0x286, "EOS 600D"},
{0x287, "EOS 60D"},
{0x288, "EOS 1100D"},
{0x289, "EOS 7D Mark II"},
{0x301, "EOS 650D"},
{0x302, "EOS 6D"},
{0x324, "EOS-1D C"},
{0x325, "EOS 70D"},
{0x326, "EOS 700D"},
{0x327, "EOS 1200D"},
{0x328, "EOS-1D X Mark II"},
{0x331, "EOS M"},
{0x335, "EOS M2"},
{0x374, "EOS M3"}, /* temp */
{0x384, "EOS M10"}, /* temp */
{0x394, "EOS M5"}, /* temp */
{0x398, "EOS M100"}, /* temp */
{0x346, "EOS 100D"},
{0x347, "EOS 760D"},
{0x349, "EOS 5D Mark IV"},
{0x350, "EOS 80D"},
{0x382, "EOS 5DS"},
{0x393, "EOS 750D"},
{0x401, "EOS 5DS R"},
{0x404, "EOS 1300D"},
{0x405, "EOS 800D"},
{0x406, "EOS 6D Mark II"},
{0x407, "EOS M6"},
{0x408, "EOS 77D"},
{0x417, "EOS 200D"},
},
sonique[] = {
{0x002, "DSC-R1"}, {0x100, "DSLR-A100"}, {0x101, "DSLR-A900"}, {0x102, "DSLR-A700"},
{0x103, "DSLR-A200"}, {0x104, "DSLR-A350"}, {0x105, "DSLR-A300"}, {0x106, "DSLR-A900"},
{0x107, "DSLR-A380"}, {0x108, "DSLR-A330"}, {0x109, "DSLR-A230"}, {0x10a, "DSLR-A290"},
{0x10d, "DSLR-A850"}, {0x10e, "DSLR-A850"}, {0x111, "DSLR-A550"}, {0x112, "DSLR-A500"},
{0x113, "DSLR-A450"}, {0x116, "NEX-5"}, {0x117, "NEX-3"}, {0x118, "SLT-A33"},
{0x119, "SLT-A55V"}, {0x11a, "DSLR-A560"}, {0x11b, "DSLR-A580"}, {0x11c, "NEX-C3"},
{0x11d, "SLT-A35"}, {0x11e, "SLT-A65V"}, {0x11f, "SLT-A77V"}, {0x120, "NEX-5N"},
{0x121, "NEX-7"}, {0x122, "NEX-VG20E"}, {0x123, "SLT-A37"}, {0x124, "SLT-A57"},
{0x125, "NEX-F3"}, {0x126, "SLT-A99V"}, {0x127, "NEX-6"}, {0x128, "NEX-5R"},
{0x129, "DSC-RX100"}, {0x12a, "DSC-RX1"}, {0x12b, "NEX-VG900"}, {0x12c, "NEX-VG30E"},
{0x12e, "ILCE-3000"}, {0x12f, "SLT-A58"}, {0x131, "NEX-3N"}, {0x132, "ILCE-7"},
{0x133, "NEX-5T"}, {0x134, "DSC-RX100M2"}, {0x135, "DSC-RX10"}, {0x136, "DSC-RX1R"},
{0x137, "ILCE-7R"}, {0x138, "ILCE-6000"}, {0x139, "ILCE-5000"}, {0x13d, "DSC-RX100M3"},
{0x13e, "ILCE-7S"}, {0x13f, "ILCA-77M2"}, {0x153, "ILCE-5100"}, {0x154, "ILCE-7M2"},
{0x155, "DSC-RX100M4"}, {0x156, "DSC-RX10M2"}, {0x158, "DSC-RX1RM2"}, {0x15a, "ILCE-QX1"},
{0x15b, "ILCE-7RM2"}, {0x15e, "ILCE-7SM2"}, {0x161, "ILCA-68"}, {0x162, "ILCA-99M2"},
{0x163, "DSC-RX10M3"}, {0x164, "DSC-RX100M5"}, {0x165, "ILCE-6300"}, {0x166, "ILCE-9"},
{0x168, "ILCE-6500"}, {0x16a, "ILCE-7RM3"}, {0x16c, "DSC-RX0"}, {0x16d, "DSC-RX10M4"},
};
#ifdef LIBRAW_LIBRARY_BUILD
static const libraw_custom_camera_t const_table[]
#else
static const struct
{
unsigned fsize;
ushort rw, rh;
uchar lm, tm, rm, bm, lf, cf, max, flags;
char t_make[10], t_model[20];
ushort offset;
} table[]
#endif
= {
{786432, 1024, 768, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-080C"},
{1447680, 1392, 1040, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-145C"},
{1920000, 1600, 1200, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-201C"},
{5067304, 2588, 1958, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-510C"},
{5067316, 2588, 1958, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-510C", 12},
{10134608, 2588, 1958, 0, 0, 0, 0, 9, 0x94, 0, 0, "AVT", "F-510C"},
{10134620, 2588, 1958, 0, 0, 0, 0, 9, 0x94, 0, 0, "AVT", "F-510C", 12},
{16157136, 3272, 2469, 0, 0, 0, 0, 9, 0x94, 0, 0, "AVT", "F-810C"},
{15980544, 3264, 2448, 0, 0, 0, 0, 8, 0x61, 0, 1, "AgfaPhoto", "DC-833m"},
{9631728, 2532, 1902, 0, 0, 0, 0, 96, 0x61, 0, 0, "Alcatel", "5035D"},
{31850496, 4608, 3456, 0, 0, 0, 0, 0, 0x94, 0, 0, "GITUP", "GIT2 4:3"},
{23887872, 4608, 2592, 0, 0, 0, 0, 0, 0x94, 0, 0, "GITUP", "GIT2 16:9"},
{32257024, 4624, 3488, 8, 2, 16, 2, 0, 0x94, 0, 0, "GITUP", "GIT2P 4:3"},
// Android Raw dumps id start
// File Size in bytes Horizontal Res Vertical Flag then bayer order eg 0x16 bbgr 0x94 rggb
{1540857, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "Samsung", "S3"},
{2658304, 1212, 1096, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G3FrontMipi"},
{2842624, 1296, 1096, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G3FrontQCOM"},
{2969600, 1976, 1200, 0, 0, 0, 0, 1, 0x16, 0, 0, "Xiaomi", "MI3wMipi"},
{3170304, 1976, 1200, 0, 0, 0, 0, 1, 0x16, 0, 0, "Xiaomi", "MI3wQCOM"},
{3763584, 1584, 1184, 0, 0, 0, 0, 96, 0x61, 0, 0, "I_Mobile", "I_StyleQ6"},
{5107712, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "UltraPixel1"},
{5382640, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "UltraPixel2"},
{5664912, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "4688"},
{5664912, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "4688"},
{5364240, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "4688"},
{6299648, 2592, 1944, 0, 0, 0, 0, 1, 0x16, 0, 0, "OmniVisi", "OV5648"},
{6721536, 2592, 1944, 0, 0, 0, 0, 0, 0x16, 0, 0, "OmniVisi", "OV56482"},
{6746112, 2592, 1944, 0, 0, 0, 0, 0, 0x16, 0, 0, "HTC", "OneSV"},
{9631728, 2532, 1902, 0, 0, 0, 0, 96, 0x61, 0, 0, "Sony", "5mp"},
{9830400, 2560, 1920, 0, 0, 0, 0, 96, 0x61, 0, 0, "NGM", "ForwardArt"},
{10186752, 3264, 2448, 0, 0, 0, 0, 1, 0x94, 0, 0, "Sony", "IMX219-mipi 8mp"},
{10223360, 2608, 1944, 0, 0, 0, 0, 96, 0x16, 0, 0, "Sony", "IMX"},
{10782464, 3282, 2448, 0, 0, 0, 0, 0, 0x16, 0, 0, "HTC", "MyTouch4GSlide"},
{10788864, 3282, 2448, 0, 0, 0, 0, 0, 0x16, 0, 0, "Xperia", "L"},
{15967488, 3264, 2446, 0, 0, 0, 0, 96, 0x16, 0, 0, "OmniVison", "OV8850"},
{16224256, 4208, 3082, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G3MipiL"},
{16424960, 4208, 3120, 0, 0, 0, 0, 1, 0x16, 0, 0, "IMX135", "MipiL"},
{17326080, 4164, 3120, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G3LQCom"},
{17522688, 4212, 3120, 0, 0, 0, 0, 0, 0x16, 0, 0, "Sony", "IMX135-QCOM"},
{19906560, 4608, 3456, 0, 0, 0, 0, 1, 0x16, 0, 0, "Gione", "E7mipi"},
{19976192, 5312, 2988, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G4"},
{20389888, 4632, 3480, 0, 0, 0, 0, 1, 0x16, 0, 0, "Xiaomi", "RedmiNote3Pro"},
{20500480, 4656, 3496, 0, 0, 0, 0, 1, 0x94, 0, 0, "Sony", "IMX298-mipi 16mp"},
{21233664, 4608, 3456, 0, 0, 0, 0, 1, 0x16, 0, 0, "Gione", "E7qcom"},
{26023936, 4192, 3104, 0, 0, 0, 0, 96, 0x94, 0, 0, "THL", "5000"},
{26257920, 4208, 3120, 0, 0, 0, 0, 96, 0x94, 0, 0, "Sony", "IMX214"},
{26357760, 4224, 3120, 0, 0, 0, 0, 96, 0x61, 0, 0, "OV", "13860"},
{41312256, 5248, 3936, 0, 0, 0, 0, 96, 0x61, 0, 0, "Meizu", "MX4"},
{42923008, 5344, 4016, 0, 0, 0, 0, 96, 0x61, 0, 0, "Sony", "IMX230"},
// Android Raw dumps id end
{20137344, 3664, 2748, 0, 0, 0, 0, 0x40, 0x49, 0, 0, "Aptina", "MT9J003", 0xffff},
{2868726, 1384, 1036, 0, 0, 0, 0, 64, 0x49, 0, 8, "Baumer", "TXG14", 1078},
{5298000, 2400, 1766, 12, 12, 44, 2, 40, 0x94, 0, 2, "Canon", "PowerShot SD300"},
{6553440, 2664, 1968, 4, 4, 44, 4, 40, 0x94, 0, 2, "Canon", "PowerShot A460"},
{6573120, 2672, 1968, 12, 8, 44, 0, 40, 0x94, 0, 2, "Canon", "PowerShot A610"},
{6653280, 2672, 1992, 10, 6, 42, 2, 40, 0x94, 0, 2, "Canon", "PowerShot A530"},
{7710960, 2888, 2136, 44, 8, 4, 0, 40, 0x94, 0, 2, "Canon", "PowerShot S3 IS"},
{9219600, 3152, 2340, 36, 12, 4, 0, 40, 0x94, 0, 2, "Canon", "PowerShot A620"},
{9243240, 3152, 2346, 12, 7, 44, 13, 40, 0x49, 0, 2, "Canon", "PowerShot A470"},
{10341600, 3336, 2480, 6, 5, 32, 3, 40, 0x94, 0, 2, "Canon", "PowerShot A720 IS"},
{10383120, 3344, 2484, 12, 6, 44, 6, 40, 0x94, 0, 2, "Canon", "PowerShot A630"},
{12945240, 3736, 2772, 12, 6, 52, 6, 40, 0x94, 0, 2, "Canon", "PowerShot A640"},
{15636240, 4104, 3048, 48, 12, 24, 12, 40, 0x94, 0, 2, "Canon", "PowerShot A650"},
{15467760, 3720, 2772, 6, 12, 30, 0, 40, 0x94, 0, 2, "Canon", "PowerShot SX110 IS"},
{15534576, 3728, 2778, 12, 9, 44, 9, 40, 0x94, 0, 2, "Canon", "PowerShot SX120 IS"},
{18653760, 4080, 3048, 24, 12, 24, 12, 40, 0x94, 0, 2, "Canon", "PowerShot SX20 IS"},
{18763488, 4104, 3048, 10, 22, 82, 22, 8, 0x49, 0, 0, "Canon", "PowerShot D10"},
{19131120, 4168, 3060, 92, 16, 4, 1, 40, 0x94, 0, 2, "Canon", "PowerShot SX220 HS"},
{21936096, 4464, 3276, 25, 10, 73, 12, 40, 0x16, 0, 2, "Canon", "PowerShot SX30 IS"},
{24724224, 4704, 3504, 8, 16, 56, 8, 40, 0x49, 0, 2, "Canon", "PowerShot A3300 IS"},
{30858240, 5248, 3920, 8, 16, 56, 16, 40, 0x94, 0, 2, "Canon", "IXUS 160"},
{1976352, 1632, 1211, 0, 2, 0, 1, 0, 0x94, 0, 1, "Casio", "QV-2000UX"},
{3217760, 2080, 1547, 0, 0, 10, 1, 0, 0x94, 0, 1, "Casio", "QV-3*00EX"},
{6218368, 2585, 1924, 0, 0, 9, 0, 0, 0x94, 0, 1, "Casio", "QV-5700"},
{7816704, 2867, 2181, 0, 0, 34, 36, 0, 0x16, 0, 1, "Casio", "EX-Z60"},
{2937856, 1621, 1208, 0, 0, 1, 0, 0, 0x94, 7, 13, "Casio", "EX-S20"},
{4948608, 2090, 1578, 0, 0, 32, 34, 0, 0x94, 7, 1, "Casio", "EX-S100"},
{6054400, 2346, 1720, 2, 0, 32, 0, 0, 0x94, 7, 1, "Casio", "QV-R41"},
{7426656, 2568, 1928, 0, 0, 0, 0, 0, 0x94, 0, 1, "Casio", "EX-P505"},
{7530816, 2602, 1929, 0, 0, 22, 0, 0, 0x94, 7, 1, "Casio", "QV-R51"},
{7542528, 2602, 1932, 0, 0, 32, 0, 0, 0x94, 7, 1, "Casio", "EX-Z50"},
{7562048, 2602, 1937, 0, 0, 25, 0, 0, 0x16, 7, 1, "Casio", "EX-Z500"},
{7753344, 2602, 1986, 0, 0, 32, 26, 0, 0x94, 7, 1, "Casio", "EX-Z55"},
{9313536, 2858, 2172, 0, 0, 14, 30, 0, 0x94, 7, 1, "Casio", "EX-P600"},
{10834368, 3114, 2319, 0, 0, 27, 0, 0, 0x94, 0, 1, "Casio", "EX-Z750"},
{10843712, 3114, 2321, 0, 0, 25, 0, 0, 0x94, 0, 1, "Casio", "EX-Z75"},
{10979200, 3114, 2350, 0, 0, 32, 32, 0, 0x94, 7, 1, "Casio", "EX-P700"},
{12310144, 3285, 2498, 0, 0, 6, 30, 0, 0x94, 0, 1, "Casio", "EX-Z850"},
{12489984, 3328, 2502, 0, 0, 47, 35, 0, 0x94, 0, 1, "Casio", "EX-Z8"},
{15499264, 3754, 2752, 0, 0, 82, 0, 0, 0x94, 0, 1, "Casio", "EX-Z1050"},
{18702336, 4096, 3044, 0, 0, 24, 0, 80, 0x94, 7, 1, "Casio", "EX-ZR100"},
{7684000, 2260, 1700, 0, 0, 0, 0, 13, 0x94, 0, 1, "Casio", "QV-4000"},
{787456, 1024, 769, 0, 1, 0, 0, 0, 0x49, 0, 0, "Creative", "PC-CAM 600"},
{28829184, 4384, 3288, 0, 0, 0, 0, 36, 0x61, 0, 0, "DJI"},
{15151104, 4608, 3288, 0, 0, 0, 0, 0, 0x94, 0, 0, "Matrix"},
{3840000, 1600, 1200, 0, 0, 0, 0, 65, 0x49, 0, 0, "Foculus", "531C"},
{307200, 640, 480, 0, 0, 0, 0, 0, 0x94, 0, 0, "Generic"},
{62464, 256, 244, 1, 1, 6, 1, 0, 0x8d, 0, 0, "Kodak", "DC20"},
{124928, 512, 244, 1, 1, 10, 1, 0, 0x8d, 0, 0, "Kodak", "DC20"},
{1652736, 1536, 1076, 0, 52, 0, 0, 0, 0x61, 0, 0, "Kodak", "DCS200"},
{4159302, 2338, 1779, 1, 33, 1, 2, 0, 0x94, 0, 0, "Kodak", "C330"},
{4162462, 2338, 1779, 1, 33, 1, 2, 0, 0x94, 0, 0, "Kodak", "C330", 3160},
{2247168, 1232, 912, 0, 0, 16, 0, 0, 0x00, 0, 0, "Kodak", "C330"},
{3370752, 1232, 912, 0, 0, 16, 0, 0, 0x00, 0, 0, "Kodak", "C330"},
{6163328, 2864, 2152, 0, 0, 0, 0, 0, 0x94, 0, 0, "Kodak", "C603"},
{6166488, 2864, 2152, 0, 0, 0, 0, 0, 0x94, 0, 0, "Kodak", "C603", 3160},
{460800, 640, 480, 0, 0, 0, 0, 0, 0x00, 0, 0, "Kodak", "C603"},
{9116448, 2848, 2134, 0, 0, 0, 0, 0, 0x00, 0, 0, "Kodak", "C603"},
{12241200, 4040, 3030, 2, 0, 0, 13, 0, 0x49, 0, 0, "Kodak", "12MP"},
{12272756, 4040, 3030, 2, 0, 0, 13, 0, 0x49, 0, 0, "Kodak", "12MP", 31556},
{18000000, 4000, 3000, 0, 0, 0, 0, 0, 0x00, 0, 0, "Kodak", "12MP"},
{614400, 640, 480, 0, 3, 0, 0, 64, 0x94, 0, 0, "Kodak", "KAI-0340"},
{15360000, 3200, 2400, 0, 0, 0, 0, 96, 0x16, 0, 0, "Lenovo", "A820"},
{3884928, 1608, 1207, 0, 0, 0, 0, 96, 0x16, 0, 0, "Micron", "2010", 3212},
{1138688, 1534, 986, 0, 0, 0, 0, 0, 0x61, 0, 0, "Minolta", "RD175", 513},
{1581060, 1305, 969, 0, 0, 18, 6, 6, 0x1e, 4, 1, "Nikon", "E900"},
{2465792, 1638, 1204, 0, 0, 22, 1, 6, 0x4b, 5, 1, "Nikon", "E950"},
{2940928, 1616, 1213, 0, 0, 0, 7, 30, 0x94, 0, 1, "Nikon", "E2100"},
{4771840, 2064, 1541, 0, 0, 0, 1, 6, 0xe1, 0, 1, "Nikon", "E990"},
{4775936, 2064, 1542, 0, 0, 0, 0, 30, 0x94, 0, 1, "Nikon", "E3700"},
{5865472, 2288, 1709, 0, 0, 0, 1, 6, 0xb4, 0, 1, "Nikon", "E4500"},
{5869568, 2288, 1710, 0, 0, 0, 0, 6, 0x16, 0, 1, "Nikon", "E4300"},
{7438336, 2576, 1925, 0, 0, 0, 1, 6, 0xb4, 0, 1, "Nikon", "E5000"},
{8998912, 2832, 2118, 0, 0, 0, 0, 30, 0x94, 7, 1, "Nikon", "COOLPIX S6"},
{5939200, 2304, 1718, 0, 0, 0, 0, 30, 0x16, 0, 0, "Olympus", "C770UZ"},
{3178560, 2064, 1540, 0, 0, 0, 0, 0, 0x94, 0, 1, "Pentax", "Optio S"},
{4841984, 2090, 1544, 0, 0, 22, 0, 0, 0x94, 7, 1, "Pentax", "Optio S"},
{6114240, 2346, 1737, 0, 0, 22, 0, 0, 0x94, 7, 1, "Pentax", "Optio S4"},
{10702848, 3072, 2322, 0, 0, 0, 21, 30, 0x94, 0, 1, "Pentax", "Optio 750Z"},
{4147200, 1920, 1080, 0, 0, 0, 0, 0, 0x49, 0, 0, "Photron", "BC2-HD"},
{4151666, 1920, 1080, 0, 0, 0, 0, 0, 0x49, 0, 0, "Photron", "BC2-HD", 8},
{13248000, 2208, 3000, 0, 0, 0, 0, 13, 0x61, 0, 0, "Pixelink", "A782"},
{6291456, 2048, 1536, 0, 0, 0, 0, 96, 0x61, 0, 0, "RoverShot", "3320AF"},
{311696, 644, 484, 0, 0, 0, 0, 0, 0x16, 0, 8, "ST Micro", "STV680 VGA"},
{16098048, 3288, 2448, 0, 0, 24, 0, 9, 0x94, 0, 1, "Samsung", "S85"},
{16215552, 3312, 2448, 0, 0, 48, 0, 9, 0x94, 0, 1, "Samsung", "S85"},
{20487168, 3648, 2808, 0, 0, 0, 0, 13, 0x94, 5, 1, "Samsung", "WB550"},
{24000000, 4000, 3000, 0, 0, 0, 0, 13, 0x94, 5, 1, "Samsung", "WB550"},
{12582980, 3072, 2048, 0, 0, 0, 0, 33, 0x61, 0, 0, "Sinar", "", 68},
{33292868, 4080, 4080, 0, 0, 0, 0, 33, 0x61, 0, 0, "Sinar", "", 68},
{44390468, 4080, 5440, 0, 0, 0, 0, 33, 0x61, 0, 0, "Sinar", "", 68},
{1409024, 1376, 1024, 0, 0, 1, 0, 0, 0x49, 0, 0, "Sony", "XCD-SX910CR"},
{2818048, 1376, 1024, 0, 0, 1, 0, 97, 0x49, 0, 0, "Sony", "XCD-SX910CR"},
};
#ifdef LIBRAW_LIBRARY_BUILD
libraw_custom_camera_t table[64 + sizeof(const_table) / sizeof(const_table[0])];
#endif
static const char *corp[] = {"AgfaPhoto", "Canon", "Casio", "Epson", "Fujifilm", "Mamiya", "Minolta",
"Motorola", "Kodak", "Konica", "Leica", "Nikon", "Nokia", "Olympus",
"Pentax", "Phase One", "Ricoh", "Samsung", "Sigma", "Sinar", "Sony"};
#ifdef LIBRAW_LIBRARY_BUILD
char head[64], *cp;
#else
char head[32], *cp;
#endif
int hlen, flen, fsize, zero_fsize = 1, i, c;
struct jhead jh;
#ifdef LIBRAW_LIBRARY_BUILD
unsigned camera_count = parse_custom_cameras(64, table, imgdata.params.custom_camera_strings);
for (int q = 0; q < sizeof(const_table) / sizeof(const_table[0]); q++)
memmove(&table[q + camera_count], &const_table[q], sizeof(const_table[0]));
camera_count += sizeof(const_table) / sizeof(const_table[0]);
#endif
tiff_flip = flip = filters = UINT_MAX; /* unknown */
raw_height = raw_width = fuji_width = fuji_layout = cr2_slice[0] = 0;
maximum = height = width = top_margin = left_margin = 0;
cdesc[0] = desc[0] = artist[0] = make[0] = model[0] = model2[0] = 0;
iso_speed = shutter = aperture = focal_len = unique_id = 0;
tiff_nifds = 0;
memset(tiff_ifd, 0, sizeof tiff_ifd);
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.other.CameraTemperature = imgdata.other.SensorTemperature = imgdata.other.SensorTemperature2 =
imgdata.other.LensTemperature = imgdata.other.AmbientTemperature = imgdata.other.BatteryTemperature =
imgdata.other.exifAmbientTemperature = -1000.0f;
for (i = 0; i < LIBRAW_IFD_MAXCOUNT; i++)
{
tiff_ifd[i].dng_color[0].illuminant = tiff_ifd[i].dng_color[1].illuminant = 0xffff;
for (int c = 0; c < 4; c++)
tiff_ifd[i].dng_levels.analogbalance[c] = 1.0f;
}
#endif
memset(gpsdata, 0, sizeof gpsdata);
memset(cblack, 0, sizeof cblack);
memset(white, 0, sizeof white);
memset(mask, 0, sizeof mask);
thumb_offset = thumb_length = thumb_width = thumb_height = 0;
load_raw = thumb_load_raw = 0;
write_thumb = &CLASS jpeg_thumb;
data_offset = meta_offset = meta_length = tiff_bps = tiff_compress = 0;
kodak_cbpp = zero_after_ff = dng_version = load_flags = 0;
timestamp = shot_order = tiff_samples = black = is_foveon = 0;
mix_green = profile_length = data_error = zero_is_bad = 0;
pixel_aspect = is_raw = raw_color = 1;
tile_width = tile_length = 0;
for (i = 0; i < 4; i++)
{
cam_mul[i] = i == 1;
pre_mul[i] = i < 3;
FORC3 cmatrix[c][i] = 0;
FORC3 rgb_cam[c][i] = c == i;
}
colors = 3;
for (i = 0; i < 0x10000; i++)
curve[i] = i;
order = get2();
hlen = get4();
fseek(ifp, 0, SEEK_SET);
#ifdef LIBRAW_LIBRARY_BUILD
if(fread(head, 1, 64, ifp) < 64) throw LIBRAW_EXCEPTION_IO_CORRUPT;
libraw_internal_data.unpacker_data.lenRAFData = libraw_internal_data.unpacker_data.posRAFData = 0;
#else
fread(head, 1, 32, ifp);
#endif
fseek(ifp, 0, SEEK_END);
flen = fsize = ftell(ifp);
if ((cp = (char *)memmem(head, 32, (char *)"MMMM", 4)) || (cp = (char *)memmem(head, 32, (char *)"IIII", 4)))
{
parse_phase_one(cp - head);
if (cp - head && parse_tiff(0))
apply_tiff();
}
else if (order == 0x4949 || order == 0x4d4d)
{
if (!memcmp(head + 6, "HEAPCCDR", 8))
{
data_offset = hlen;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
#endif
parse_ciff(hlen, flen - hlen, 0);
load_raw = &CLASS canon_load_raw;
}
else if (parse_tiff(0))
apply_tiff();
}
else if (!memcmp(head, "\xff\xd8\xff\xe1", 4) && !memcmp(head + 6, "Exif", 4))
{
fseek(ifp, 4, SEEK_SET);
data_offset = 4 + get2();
fseek(ifp, data_offset, SEEK_SET);
if (fgetc(ifp) != 0xff)
parse_tiff(12);
thumb_offset = 0;
}
else if (!memcmp(head + 25, "ARECOYK", 7))
{
strcpy(make, "Contax");
strcpy(model, "N Digital");
fseek(ifp, 33, SEEK_SET);
get_timestamp(1);
fseek(ifp, 52, SEEK_SET);
switch (get4())
{
case 7:
iso_speed = 25;
break;
case 8:
iso_speed = 32;
break;
case 9:
iso_speed = 40;
break;
case 10:
iso_speed = 50;
break;
case 11:
iso_speed = 64;
break;
case 12:
iso_speed = 80;
break;
case 13:
iso_speed = 100;
break;
case 14:
iso_speed = 125;
break;
case 15:
iso_speed = 160;
break;
case 16:
iso_speed = 200;
break;
case 17:
iso_speed = 250;
break;
case 18:
iso_speed = 320;
break;
case 19:
iso_speed = 400;
break;
}
shutter = libraw_powf64l(2.0f, (((float)get4()) / 8.0f)) / 16000.0f;
FORC4 cam_mul[c ^ (c >> 1)] = get4();
fseek(ifp, 88, SEEK_SET);
aperture = libraw_powf64l(2.0f, ((float)get4()) / 16.0f);
fseek(ifp, 112, SEEK_SET);
focal_len = get4();
#ifdef LIBRAW_LIBRARY_BUILD
fseek(ifp, 104, SEEK_SET);
imgdata.lens.makernotes.MaxAp4CurFocal = libraw_powf64l(2.0f, ((float)get4()) / 16.0f);
fseek(ifp, 124, SEEK_SET);
stmread(imgdata.lens.makernotes.Lens, 32, ifp);
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_Contax_N;
if (imgdata.lens.makernotes.Lens[0])
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_Contax_N;
#endif
}
else if (!strcmp(head, "PXN"))
{
strcpy(make, "Logitech");
strcpy(model, "Fotoman Pixtura");
}
else if (!strcmp(head, "qktk"))
{
strcpy(make, "Apple");
strcpy(model, "QuickTake 100");
load_raw = &CLASS quicktake_100_load_raw;
}
else if (!strcmp(head, "qktn"))
{
strcpy(make, "Apple");
strcpy(model, "QuickTake 150");
load_raw = &CLASS kodak_radc_load_raw;
}
else if (!memcmp(head, "FUJIFILM", 8))
{
#ifdef LIBRAW_LIBRARY_BUILD
strncpy(model, head + 0x1c,0x20);
model[0x20]=0;
memcpy(model2, head + 0x3c, 4);
model2[4] = 0;
#endif
fseek(ifp, 84, SEEK_SET);
thumb_offset = get4();
thumb_length = get4();
fseek(ifp, 92, SEEK_SET);
parse_fuji(get4());
if (thumb_offset > 120)
{
fseek(ifp, 120, SEEK_SET);
is_raw += (i = get4()) ? 1 : 0;
if (is_raw == 2 && shot_select)
parse_fuji(i);
}
load_raw = &CLASS unpacked_load_raw;
fseek(ifp, 100 + 28 * (shot_select > 0), SEEK_SET);
parse_tiff(data_offset = get4());
parse_tiff(thumb_offset + 12);
apply_tiff();
}
else if (!memcmp(head, "RIFF", 4))
{
fseek(ifp, 0, SEEK_SET);
parse_riff();
}
else if (!memcmp(head + 4, "ftypqt ", 9))
{
fseek(ifp, 0, SEEK_SET);
parse_qt(fsize);
is_raw = 0;
}
else if (!memcmp(head, "\0\001\0\001\0@", 6))
{
fseek(ifp, 6, SEEK_SET);
fread(make, 1, 8, ifp);
fread(model, 1, 8, ifp);
fread(model2, 1, 16, ifp);
data_offset = get2();
get2();
raw_width = get2();
raw_height = get2();
load_raw = &CLASS nokia_load_raw;
filters = 0x61616161;
}
else if (!memcmp(head, "NOKIARAW", 8))
{
strcpy(make, "NOKIA");
order = 0x4949;
fseek(ifp, 300, SEEK_SET);
data_offset = get4();
i = get4(); // bytes count
width = get2();
height = get2();
#ifdef LIBRAW_LIBRARY_BUILD
// Data integrity check
if (width < 1 || width > 16000 || height < 1 || height > 16000 || i < (width * height) || i > (2 * width * height))
throw LIBRAW_EXCEPTION_IO_CORRUPT;
#endif
switch (tiff_bps = i * 8 / (width * height))
{
case 8:
load_raw = &CLASS eight_bit_load_raw;
break;
case 10:
load_raw = &CLASS nokia_load_raw;
break;
case 0:
throw LIBRAW_EXCEPTION_IO_CORRUPT;
break;
}
raw_height = height + (top_margin = i / (width * tiff_bps / 8) - height);
mask[0][3] = 1;
filters = 0x61616161;
}
else if (!memcmp(head, "ARRI", 4))
{
order = 0x4949;
fseek(ifp, 20, SEEK_SET);
width = get4();
height = get4();
strcpy(make, "ARRI");
fseek(ifp, 668, SEEK_SET);
fread(model, 1, 64, ifp);
data_offset = 4096;
load_raw = &CLASS packed_load_raw;
load_flags = 88;
filters = 0x61616161;
}
else if (!memcmp(head, "XPDS", 4))
{
order = 0x4949;
fseek(ifp, 0x800, SEEK_SET);
fread(make, 1, 41, ifp);
raw_height = get2();
raw_width = get2();
fseek(ifp, 56, SEEK_CUR);
fread(model, 1, 30, ifp);
data_offset = 0x10000;
load_raw = &CLASS canon_rmf_load_raw;
gamma_curve(0, 12.25, 1, 1023);
}
else if (!memcmp(head + 4, "RED1", 4))
{
strcpy(make, "Red");
strcpy(model, "One");
parse_redcine();
load_raw = &CLASS redcine_load_raw;
gamma_curve(1 / 2.4, 12.92, 1, 4095);
filters = 0x49494949;
}
else if (!memcmp(head, "DSC-Image", 9))
parse_rollei();
else if (!memcmp(head, "PWAD", 4))
parse_sinar_ia();
else if (!memcmp(head, "\0MRM", 4))
parse_minolta(0);
else if (!memcmp(head, "FOVb", 4))
{
#ifdef LIBRAW_LIBRARY_BUILD
/* no foveon support for dcraw build from libraw source */
parse_x3f();
#endif
}
else if (!memcmp(head, "CI", 2))
parse_cine();
if (make[0] == 0)
#ifdef LIBRAW_LIBRARY_BUILD
for (zero_fsize = i = 0; i < camera_count; i++)
#else
for (zero_fsize = i = 0; i < sizeof table / sizeof *table; i++)
#endif
if (fsize == table[i].fsize)
{
strcpy(make, table[i].t_make);
#ifdef LIBRAW_LIBRARY_BUILD
if (!strncmp(make, "Canon", 5))
{
imgdata.lens.makernotes.CameraMount = LIBRAW_MOUNT_FixedLens;
imgdata.lens.makernotes.LensMount = LIBRAW_MOUNT_FixedLens;
}
#endif
strcpy(model, table[i].t_model);
flip = table[i].flags >> 2;
zero_is_bad = table[i].flags & 2;
if (table[i].flags & 1)
parse_external_jpeg();
data_offset = table[i].offset == 0xffff ? 0 : table[i].offset;
raw_width = table[i].rw;
raw_height = table[i].rh;
left_margin = table[i].lm;
top_margin = table[i].tm;
width = raw_width - left_margin - table[i].rm;
height = raw_height - top_margin - table[i].bm;
filters = 0x1010101U * table[i].cf;
colors = 4 - !((filters & filters >> 1) & 0x5555);
load_flags = table[i].lf;
switch (tiff_bps = (fsize - data_offset) * 8 / (raw_width * raw_height))
{
case 6:
load_raw = &CLASS minolta_rd175_load_raw;
break;
case 8:
load_raw = &CLASS eight_bit_load_raw;
break;
case 10:
if ((fsize - data_offset) / raw_height * 3 >= raw_width * 4)
{
load_raw = &CLASS android_loose_load_raw;
break;
}
else if (load_flags & 1)
{
load_raw = &CLASS android_tight_load_raw;
break;
}
case 12:
load_flags |= 128;
load_raw = &CLASS packed_load_raw;
break;
case 16:
order = 0x4949 | 0x404 * (load_flags & 1);
tiff_bps -= load_flags >> 4;
tiff_bps -= load_flags = load_flags >> 1 & 7;
load_raw = table[i].offset == 0xffff ? &CLASS unpacked_load_raw_reversed : &CLASS unpacked_load_raw;
}
maximum = (1 << tiff_bps) - (1 << table[i].max);
break;
}
if (zero_fsize)
fsize = 0;
if (make[0] == 0)
parse_smal(0, flen);
if (make[0] == 0)
{
parse_jpeg(0);
fseek(ifp, 0, SEEK_END);
int sz = ftell(ifp);
#ifdef LIBRAW_LIBRARY_BUILD
if (!strncmp(model, "RP_imx219", 9) && sz >= 0x9cb600 && !fseek(ifp, -0x9cb600, SEEK_END) &&
fread(head, 1, 0x20, ifp) && !strncmp(head, "BRCM", 4))
{
strcpy(make, "Broadcom");
strcpy(model, "RPi IMX219");
if (raw_height > raw_width)
flip = 5;
data_offset = ftell(ifp) + 0x8000 - 0x20;
parse_broadcom();
black = 66;
maximum = 0x3ff;
load_raw = &CLASS broadcom_load_raw;
thumb_offset = 0;
thumb_length = sz - 0x9cb600 - 1;
}
else if (!(strncmp(model, "ov5647", 6) && strncmp(model, "RP_OV5647", 9)) && sz >= 0x61b800 &&
!fseek(ifp, -0x61b800, SEEK_END) && fread(head, 1, 0x20, ifp) && !strncmp(head, "BRCM", 4))
{
strcpy(make, "Broadcom");
if (!strncmp(model, "ov5647", 6))
strcpy(model, "RPi OV5647 v.1");
else
strcpy(model, "RPi OV5647 v.2");
if (raw_height > raw_width)
flip = 5;
data_offset = ftell(ifp) + 0x8000 - 0x20;
parse_broadcom();
black = 16;
maximum = 0x3ff;
load_raw = &CLASS broadcom_load_raw;
thumb_offset = 0;
thumb_length = sz - 0x61b800 - 1;
#else
if (!(strncmp(model, "ov", 2) && strncmp(model, "RP_OV", 5)) && sz >= 6404096 && !fseek(ifp, -6404096, SEEK_END) &&
fread(head, 1, 32, ifp) && !strcmp(head, "BRCMn"))
{
strcpy(make, "OmniVision");
data_offset = ftell(ifp) + 0x8000 - 32;
width = raw_width;
raw_width = 2611;
load_raw = &CLASS nokia_load_raw;
filters = 0x16161616;
#endif
}
else
is_raw = 0;
}
#ifdef LIBRAW_LIBRARY_BUILD
// make sure strings are terminated
desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0;
#endif
for (i = 0; i < sizeof corp / sizeof *corp; i++)
if (strcasestr(make, corp[i])) /* Simplify company names */
strcpy(make, corp[i]);
if ((!strncmp(make, "Kodak", 5) || !strncmp(make, "Leica", 5)) &&
((cp = strcasestr(model, " DIGITAL CAMERA")) || (cp = strstr(model, "FILE VERSION"))))
*cp = 0;
if (!strncasecmp(model, "PENTAX", 6))
strcpy(make, "Pentax");
#ifdef LIBRAW_LIBRARY_BUILD
remove_trailing_spaces(make, sizeof(make));
remove_trailing_spaces(model, sizeof(model));
#else
cp = make + strlen(make); /* Remove trailing spaces */
while (*--cp == ' ')
*cp = 0;
cp = model + strlen(model);
while (*--cp == ' ')
*cp = 0;
#endif
i = strbuflen(make); /* Remove make from model */
if (!strncasecmp(model, make, i) && model[i++] == ' ')
memmove(model, model + i, 64 - i);
if (!strncmp(model, "FinePix ", 8))
memmove(model, model + 8,strlen(model)-7);
if (!strncmp(model, "Digital Camera ", 15))
memmove(model, model + 15,strlen(model)-14);
desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0;
if (!is_raw)
goto notraw;
if (!height)
height = raw_height;
if (!width)
width = raw_width;
if (height == 2624 && width == 3936) /* Pentax K10D and Samsung GX10 */
{
height = 2616;
width = 3896;
}
if (height == 3136 && width == 4864) /* Pentax K20D and Samsung GX20 */
{
height = 3124;
width = 4688;
filters = 0x16161616;
}
if (width == 4352 && (!strcmp(model, "K-r") || !strcmp(model, "K-x")))
{
width = 4309;
filters = 0x16161616;
}
if (width >= 4960 && !strncmp(model, "K-5", 3))
{
left_margin = 10;
width = 4950;
filters = 0x16161616;
}
if (width == 6080 && !strcmp(model, "K-70"))
{
height = 4016;
top_margin = 32;
width = 6020;
left_margin = 60;
}
if (width == 4736 && !strcmp(model, "K-7"))
{
height = 3122;
width = 4684;
filters = 0x16161616;
top_margin = 2;
}
if (width == 6080 && !strcmp(model, "K-3 II")) /* moved back */
{
left_margin = 4;
width = 6040;
}
if (width == 6112 && !strcmp(model, "KP"))
{
/* From DNG, maybe too strict */
left_margin = 54;
top_margin = 28;
width = 6028;
height = raw_height - top_margin;
}
if (width == 6080 && !strcmp(model, "K-3"))
{
left_margin = 4;
width = 6040;
}
if (width == 7424 && !strcmp(model, "645D"))
{
height = 5502;
width = 7328;
filters = 0x61616161;
top_margin = 29;
left_margin = 48;
}
if (height == 3014 && width == 4096) /* Ricoh GX200 */
width = 4014;
if (dng_version)
{
if (filters == UINT_MAX)
filters = 0;
if (filters)
is_raw *= tiff_samples;
else
colors = tiff_samples;
switch (tiff_compress)
{
case 0: /* Compression not set, assuming uncompressed */
case 1:
load_raw = &CLASS packed_dng_load_raw;
break;
case 7:
load_raw = &CLASS lossless_dng_load_raw;
break;
#ifdef LIBRAW_LIBRARY_BUILD
case 8:
load_raw = &CLASS deflate_dng_load_raw;
break;
#endif
case 34892:
load_raw = &CLASS lossy_dng_load_raw;
break;
default:
load_raw = 0;
}
if (!strncmp(make, "Canon", 5) && unique_id)
{
for (i = 0; i < sizeof unique / sizeof *unique; i++)
if (unique_id == 0x80000000 + unique[i].id)
{
strcpy(model, unique[i].t_model);
break;
}
}
if (!strncasecmp(make, "Sony", 4) && unique_id)
{
for (i = 0; i < sizeof sonique / sizeof *sonique; i++)
if (unique_id == sonique[i].id)
{
strcpy(model, sonique[i].t_model);
break;
}
}
goto dng_skip;
}
if (!strncmp(make, "Canon", 5) && !fsize && tiff_bps != 15)
{
if (!load_raw)
load_raw = &CLASS lossless_jpeg_load_raw;
for (i = 0; i < sizeof canon / sizeof *canon; i++)
if (raw_width == canon[i][0] && raw_height == canon[i][1])
{
width = raw_width - (left_margin = canon[i][2]);
height = raw_height - (top_margin = canon[i][3]);
width -= canon[i][4];
height -= canon[i][5];
mask[0][1] = canon[i][6];
mask[0][3] = -canon[i][7];
mask[1][1] = canon[i][8];
mask[1][3] = -canon[i][9];
if (canon[i][10])
filters = canon[i][10] * 0x01010101U;
}
if ((unique_id | 0x20000) == 0x2720000)
{
left_margin = 8;
top_margin = 16;
}
}
if (!strncmp(make, "Canon", 5) && unique_id)
{
for (i = 0; i < sizeof unique / sizeof *unique; i++)
if (unique_id == 0x80000000 + unique[i].id)
{
adobe_coeff("Canon", unique[i].t_model);
strcpy(model, unique[i].t_model);
}
}
if (!strncasecmp(make, "Sony", 4) && unique_id)
{
for (i = 0; i < sizeof sonique / sizeof *sonique; i++)
if (unique_id == sonique[i].id)
{
adobe_coeff("Sony", sonique[i].t_model);
strcpy(model, sonique[i].t_model);
}
}
if (!strncmp(make, "Nikon", 5))
{
if (!load_raw)
load_raw = &CLASS packed_load_raw;
if (model[0] == 'E')
load_flags |= !data_offset << 2 | 2;
}
/* Set parameters based on camera name (for non-DNG files). */
if (!strcmp(model, "KAI-0340") && find_green(16, 16, 3840, 5120) < 25)
{
height = 480;
top_margin = filters = 0;
strcpy(model, "C603");
}
#ifndef LIBRAW_LIBRARY_BUILD
if (!strcmp(make, "Sony") && raw_width > 3888 && !black && !cblack[0])
black = 128 << (tiff_bps - 12);
#else
/* Always 512 for arw2_load_raw */
if (!strcmp(make, "Sony") && raw_width > 3888 && !black && !cblack[0])
black = (load_raw == &LibRaw::sony_arw2_load_raw) ? 512 : (128 << (tiff_bps - 12));
#endif
if (is_foveon)
{
if (height * 2 < width)
pixel_aspect = 0.5;
if (height > width)
pixel_aspect = 2;
filters = 0;
}
else if (!strncmp(make, "Pentax", 6) && !strncmp(model, "K-1", 3))
{
top_margin = 18;
height = raw_height - top_margin;
if (raw_width == 7392)
{
left_margin = 6;
width = 7376;
}
}
else if (!strncmp(make, "Canon", 5) && tiff_bps == 15)
{
switch (width)
{
case 3344:
width -= 66;
case 3872:
width -= 6;
}
if (height > width)
{
SWAP(height, width);
SWAP(raw_height, raw_width);
}
if (width == 7200 && height == 3888)
{
raw_width = width = 6480;
raw_height = height = 4320;
}
filters = 0;
tiff_samples = colors = 3;
load_raw = &CLASS canon_sraw_load_raw;
}
else if (!strcmp(model, "PowerShot 600"))
{
height = 613;
width = 854;
raw_width = 896;
colors = 4;
filters = 0xe1e4e1e4;
load_raw = &CLASS canon_600_load_raw;
}
else if (!strcmp(model, "PowerShot A5") || !strcmp(model, "PowerShot A5 Zoom"))
{
height = 773;
width = 960;
raw_width = 992;
pixel_aspect = 256 / 235.0;
filters = 0x1e4e1e4e;
goto canon_a5;
}
else if (!strcmp(model, "PowerShot A50"))
{
height = 968;
width = 1290;
raw_width = 1320;
filters = 0x1b4e4b1e;
goto canon_a5;
}
else if (!strcmp(model, "PowerShot Pro70"))
{
height = 1024;
width = 1552;
filters = 0x1e4b4e1b;
canon_a5:
colors = 4;
tiff_bps = 10;
load_raw = &CLASS packed_load_raw;
load_flags = 40;
}
else if (!strcmp(model, "PowerShot Pro90 IS") || !strcmp(model, "PowerShot G1"))
{
colors = 4;
filters = 0xb4b4b4b4;
}
else if (!strcmp(model, "PowerShot A610"))
{
if (canon_s2is())
strcpy(model + 10, "S2 IS");
}
else if (!strcmp(model, "PowerShot SX220 HS"))
{
mask[1][3] = -4;
top_margin = 16;
left_margin = 92;
}
else if (!strcmp(model, "PowerShot S120"))
{
raw_width = 4192;
raw_height = 3062;
width = 4022;
height = 3016;
mask[0][0] = top_margin = 31;
mask[0][2] = top_margin + height;
left_margin = 120;
mask[0][1] = 23;
mask[0][3] = 72;
}
else if (!strcmp(model, "PowerShot G16"))
{
mask[0][0] = 0;
mask[0][2] = 80;
mask[0][1] = 0;
mask[0][3] = 16;
top_margin = 29;
left_margin = 120;
width = raw_width - left_margin - 48;
height = raw_height - top_margin - 14;
}
else if (!strcmp(model, "PowerShot SX50 HS"))
{
top_margin = 17;
}
else if (!strcmp(model, "EOS D2000C"))
{
filters = 0x61616161;
if (!black)
black = curve[200];
}
else if (!strcmp(model, "D1"))
{
cam_mul[0] *= 256 / 527.0;
cam_mul[2] *= 256 / 317.0;
}
else if (!strcmp(model, "D1X"))
{
width -= 4;
pixel_aspect = 0.5;
}
else if (!strcmp(model, "D40X") || !strcmp(model, "D60") || !strcmp(model, "D80") || !strcmp(model, "D3000"))
{
height -= 3;
width -= 4;
}
else if (!strcmp(model, "D3") || !strcmp(model, "D3S") || !strcmp(model, "D700"))
{
width -= 4;
left_margin = 2;
}
else if (!strcmp(model, "D3100"))
{
width -= 28;
left_margin = 6;
}
else if (!strcmp(model, "D5000") || !strcmp(model, "D90"))
{
width -= 42;
}
else if (!strcmp(model, "D5100") || !strcmp(model, "D7000") || !strcmp(model, "COOLPIX A"))
{
width -= 44;
}
else if (!strcmp(model, "D3200") || !strncmp(model, "D6", 2) || !strncmp(model, "D800", 4))
{
width -= 46;
}
else if (!strcmp(model, "D4") || !strcmp(model, "Df"))
{
width -= 52;
left_margin = 2;
}
else if (!strcmp(model, "D500"))
{
// Empty - to avoid width-1 below
}
else if (!strncmp(model, "D40", 3) || !strncmp(model, "D50", 3) || !strncmp(model, "D70", 3))
{
width--;
}
else if (!strcmp(model, "D100"))
{
if (load_flags)
raw_width = (width += 3) + 3;
}
else if (!strcmp(model, "D200"))
{
left_margin = 1;
width -= 4;
filters = 0x94949494;
}
else if (!strncmp(model, "D2H", 3))
{
left_margin = 6;
width -= 14;
}
else if (!strncmp(model, "D2X", 3))
{
if (width == 3264)
width -= 32;
else
width -= 8;
}
else if (!strncmp(model, "D300", 4))
{
width -= 32;
}
else if (!strncmp(make, "Nikon", 5) && raw_width == 4032)
{
if (!strcmp(model, "COOLPIX P7700"))
{
adobe_coeff("Nikon", "COOLPIX P7700");
maximum = 65504;
load_flags = 0;
}
else if (!strcmp(model, "COOLPIX P7800"))
{
adobe_coeff("Nikon", "COOLPIX P7800");
maximum = 65504;
load_flags = 0;
}
else if (!strcmp(model, "COOLPIX P340"))
load_flags = 0;
}
else if (!strncmp(model, "COOLPIX P", 9) && raw_width != 4032)
{
load_flags = 24;
filters = 0x94949494;
if (model[9] == '7' && (iso_speed >= 400 || iso_speed == 0) && !strstr(software, "V1.2"))
black = 255;
}
else if (!strncmp(model, "COOLPIX B700", 12))
{
load_flags = 24;
black = 200;
}
else if (!strncmp(model, "1 ", 2))
{
height -= 2;
}
else if (fsize == 1581060)
{
simple_coeff(3);
pre_mul[0] = 1.2085;
pre_mul[1] = 1.0943;
pre_mul[3] = 1.1103;
}
else if (fsize == 3178560)
{
cam_mul[0] *= 4;
cam_mul[2] *= 4;
}
else if (fsize == 4771840)
{
if (!timestamp && nikon_e995())
strcpy(model, "E995");
if (strcmp(model, "E995"))
{
filters = 0xb4b4b4b4;
simple_coeff(3);
pre_mul[0] = 1.196;
pre_mul[1] = 1.246;
pre_mul[2] = 1.018;
}
}
else if (fsize == 2940928)
{
if (!timestamp && !nikon_e2100())
strcpy(model, "E2500");
if (!strcmp(model, "E2500"))
{
height -= 2;
load_flags = 6;
colors = 4;
filters = 0x4b4b4b4b;
}
}
else if (fsize == 4775936)
{
if (!timestamp)
nikon_3700();
if (model[0] == 'E' && atoi(model + 1) < 3700)
filters = 0x49494949;
if (!strcmp(model, "Optio 33WR"))
{
flip = 1;
filters = 0x16161616;
}
if (make[0] == 'O')
{
i = find_green(12, 32, 1188864, 3576832);
c = find_green(12, 32, 2383920, 2387016);
if (abs(i) < abs(c))
{
SWAP(i, c);
load_flags = 24;
}
if (i < 0)
filters = 0x61616161;
}
}
else if (fsize == 5869568)
{
if (!timestamp && minolta_z2())
{
strcpy(make, "Minolta");
strcpy(model, "DiMAGE Z2");
}
load_flags = 6 + 24 * (make[0] == 'M');
}
else if (fsize == 6291456)
{
fseek(ifp, 0x300000, SEEK_SET);
if ((order = guess_byte_order(0x10000)) == 0x4d4d)
{
height -= (top_margin = 16);
width -= (left_margin = 28);
maximum = 0xf5c0;
strcpy(make, "ISG");
model[0] = 0;
}
}
else if (!strncmp(make, "Fujifilm", 8))
{
if (!strcmp(model, "X-A3") || !strcmp(model, "X-A10")
|| !strcmp(model, "X-A5") || !strcmp(model, "X-A20"))
{
left_margin = 0;
top_margin = 0;
width = raw_width;
height = raw_height;
}
if (!strcmp(model + 7, "S2Pro"))
{
strcpy(model, "S2Pro");
height = 2144;
width = 2880;
flip = 6;
}
else if (load_raw != &CLASS packed_load_raw && strncmp(model, "X-", 2) && filters >=1000) // Bayer and not X-models
maximum = (is_raw == 2 && shot_select) ? 0x2f00 : 0x3e00;
top_margin = (raw_height - height) >> 2 << 1;
left_margin = (raw_width - width) >> 2 << 1;
if (width == 2848 || width == 3664)
filters = 0x16161616;
if (width == 4032 || width == 4952)
left_margin = 0;
if (width == 3328 && (width -= 66))
left_margin = 34;
if (width == 4936)
left_margin = 4;
if (width == 6032)
left_margin = 0;
if (!strcmp(model, "HS50EXR") || !strcmp(model, "F900EXR"))
{
width += 2;
left_margin = 0;
filters = 0x16161616;
}
if (!strcmp(model, "GFX 50S"))
{
left_margin = 0;
top_margin = 0;
}
if (!strcmp(model, "S5500"))
{
height -= (top_margin = 6);
}
if (fuji_layout)
raw_width *= is_raw;
if (filters == 9)
FORC(36)((char *)xtrans)[c] = xtrans_abs[(c / 6 + top_margin) % 6][(c + left_margin) % 6];
}
else if (!strcmp(model, "KD-400Z"))
{
height = 1712;
width = 2312;
raw_width = 2336;
goto konica_400z;
}
else if (!strcmp(model, "KD-510Z"))
{
goto konica_510z;
}
else if (!strncasecmp(make, "Minolta", 7))
{
if (!load_raw && (maximum = 0xfff))
load_raw = &CLASS unpacked_load_raw;
if (!strncmp(model, "DiMAGE A", 8))
{
if (!strcmp(model, "DiMAGE A200"))
filters = 0x49494949;
tiff_bps = 12;
load_raw = &CLASS packed_load_raw;
}
else if (!strncmp(model, "ALPHA", 5) || !strncmp(model, "DYNAX", 5) || !strncmp(model, "MAXXUM", 6))
{
sprintf(model + 20, "DYNAX %-10s", model + 6 + (model[0] == 'M'));
adobe_coeff(make, model + 20);
load_raw = &CLASS packed_load_raw;
}
else if (!strncmp(model, "DiMAGE G", 8))
{
if (model[8] == '4')
{
height = 1716;
width = 2304;
}
else if (model[8] == '5')
{
konica_510z:
height = 1956;
width = 2607;
raw_width = 2624;
}
else if (model[8] == '6')
{
height = 2136;
width = 2848;
}
data_offset += 14;
filters = 0x61616161;
konica_400z:
load_raw = &CLASS unpacked_load_raw;
maximum = 0x3df;
order = 0x4d4d;
}
}
else if (!strcmp(model, "*ist D"))
{
load_raw = &CLASS unpacked_load_raw;
data_error = -1;
}
else if (!strcmp(model, "*ist DS"))
{
height -= 2;
}
else if (!strncmp(make, "Samsung", 7) && raw_width == 4704)
{
height -= top_margin = 8;
width -= 2 * (left_margin = 8);
load_flags = 32;
}
else if (!strncmp(make, "Samsung", 7) && !strcmp(model, "NX3000"))
{
top_margin = 38;
left_margin = 92;
width = 5456;
height = 3634;
filters = 0x61616161;
colors = 3;
}
else if (!strncmp(make, "Samsung", 7) && raw_height == 3714)
{
height -= top_margin = 18;
left_margin = raw_width - (width = 5536);
if (raw_width != 5600)
left_margin = top_margin = 0;
filters = 0x61616161;
colors = 3;
}
else if (!strncmp(make, "Samsung", 7) && raw_width == 5632)
{
order = 0x4949;
height = 3694;
top_margin = 2;
width = 5574 - (left_margin = 32 + tiff_bps);
if (tiff_bps == 12)
load_flags = 80;
}
else if (!strncmp(make, "Samsung", 7) && raw_width == 5664)
{
height -= top_margin = 17;
left_margin = 96;
width = 5544;
filters = 0x49494949;
}
else if (!strncmp(make, "Samsung", 7) && raw_width == 6496)
{
filters = 0x61616161;
#ifdef LIBRAW_LIBRARY_BUILD
if (!black && !cblack[0] && !cblack[1] && !cblack[2] && !cblack[3])
#endif
black = 1 << (tiff_bps - 7);
}
else if (!strcmp(model, "EX1"))
{
order = 0x4949;
height -= 20;
top_margin = 2;
if ((width -= 6) > 3682)
{
height -= 10;
width -= 46;
top_margin = 8;
}
}
else if (!strcmp(model, "WB2000"))
{
order = 0x4949;
height -= 3;
top_margin = 2;
if ((width -= 10) > 3718)
{
height -= 28;
width -= 56;
top_margin = 8;
}
}
else if (strstr(model, "WB550"))
{
strcpy(model, "WB550");
}
else if (!strcmp(model, "EX2F"))
{
height = 3030;
width = 4040;
top_margin = 15;
left_margin = 24;
order = 0x4949;
filters = 0x49494949;
load_raw = &CLASS unpacked_load_raw;
}
else if (!strcmp(model, "STV680 VGA"))
{
black = 16;
}
else if (!strcmp(model, "N95"))
{
height = raw_height - (top_margin = 2);
}
else if (!strcmp(model, "640x480"))
{
gamma_curve(0.45, 4.5, 1, 255);
}
else if (!strncmp(make, "Hasselblad", 10))
{
if (load_raw == &CLASS lossless_jpeg_load_raw)
load_raw = &CLASS hasselblad_load_raw;
if (raw_width == 7262)
{
height = 5444;
width = 7248;
top_margin = 4;
left_margin = 7;
filters = 0x61616161;
if (!strncasecmp(model, "H3D", 3))
{
adobe_coeff("Hasselblad", "H3DII-39");
strcpy(model, "H3DII-39");
}
}
else if (raw_width == 12000) // H6D 100c, A6D 100c
{
left_margin = 64;
width = 11608;
top_margin = 108;
height = raw_height - top_margin;
adobe_coeff("Hasselblad", "H6D-100c");
}
else if (raw_width == 7410 || raw_width == 8282)
{
height -= 84;
width -= 82;
top_margin = 4;
left_margin = 41;
filters = 0x61616161;
adobe_coeff("Hasselblad", "H4D-40");
strcpy(model, "H4D-40");
}
else if (raw_width == 8384) // X1D
{
top_margin = 96;
height -= 96;
left_margin = 48;
width -= 106;
adobe_coeff("Hasselblad", "X1D");
maximum = 0xffff;
tiff_bps = 16;
}
else if (raw_width == 9044)
{
if (black > 500)
{
top_margin = 12;
left_margin = 44;
width = 8956;
height = 6708;
memset(cblack, 0, sizeof(cblack));
adobe_coeff("Hasselblad", "H4D-60");
strcpy(model, "H4D-60");
black = 512;
}
else
{
height = 6716;
width = 8964;
top_margin = 8;
left_margin = 40;
black += load_flags = 256;
maximum = 0x8101;
strcpy(model, "H3DII-60");
}
}
else if (raw_width == 4090)
{
strcpy(model, "V96C");
height -= (top_margin = 6);
width -= (left_margin = 3) + 7;
filters = 0x61616161;
}
else if (raw_width == 8282 && raw_height == 6240)
{
if (!strncasecmp(model, "H5D", 3))
{
/* H5D 50*/
left_margin = 54;
top_margin = 16;
width = 8176;
height = 6132;
black = 256;
strcpy(model, "H5D-50");
}
else if (!strncasecmp(model, "H3D", 3))
{
black = 0;
left_margin = 54;
top_margin = 16;
width = 8176;
height = 6132;
memset(cblack, 0, sizeof(cblack));
adobe_coeff("Hasselblad", "H3D-50");
strcpy(model, "H3D-50");
}
}
else if (raw_width == 8374 && raw_height == 6304)
{
/* H5D 50c*/
left_margin = 52;
top_margin = 100;
width = 8272;
height = 6200;
black = 256;
strcpy(model, "H5D-50c");
}
if (tiff_samples > 1)
{
is_raw = tiff_samples + 1;
if (!shot_select && !half_size)
filters = 0;
}
}
else if (!strncmp(make, "Sinar", 5))
{
if (!load_raw)
load_raw = &CLASS unpacked_load_raw;
if (is_raw > 1 && !shot_select && !half_size)
filters = 0;
maximum = 0x3fff;
}
else if (!strncmp(make, "Leaf", 4))
{
maximum = 0x3fff;
fseek(ifp, data_offset, SEEK_SET);
if (ljpeg_start(&jh, 1) && jh.bits == 15)
maximum = 0x1fff;
if (tiff_samples > 1)
filters = 0;
if (tiff_samples > 1 || tile_length < raw_height)
{
load_raw = &CLASS leaf_hdr_load_raw;
raw_width = tile_width;
}
if ((width | height) == 2048)
{
if (tiff_samples == 1)
{
filters = 1;
strcpy(cdesc, "RBTG");
strcpy(model, "CatchLight");
top_margin = 8;
left_margin = 18;
height = 2032;
width = 2016;
}
else
{
strcpy(model, "DCB2");
top_margin = 10;
left_margin = 16;
height = 2028;
width = 2022;
}
}
else if (width + height == 3144 + 2060)
{
if (!model[0])
strcpy(model, "Cantare");
if (width > height)
{
top_margin = 6;
left_margin = 32;
height = 2048;
width = 3072;
filters = 0x61616161;
}
else
{
left_margin = 6;
top_margin = 32;
width = 2048;
height = 3072;
filters = 0x16161616;
}
if (!cam_mul[0] || model[0] == 'V')
filters = 0;
else
is_raw = tiff_samples;
}
else if (width == 2116)
{
strcpy(model, "Valeo 6");
height -= 2 * (top_margin = 30);
width -= 2 * (left_margin = 55);
filters = 0x49494949;
}
else if (width == 3171)
{
strcpy(model, "Valeo 6");
height -= 2 * (top_margin = 24);
width -= 2 * (left_margin = 24);
filters = 0x16161616;
}
}
else if (!strncmp(make, "Leica", 5) || !strncmp(make, "Panasonic", 9) || !strncasecmp(make, "YUNEEC", 6))
{
if (raw_width > 0 && ((flen - data_offset) / (raw_width * 8 / 7) == raw_height))
load_raw = &CLASS panasonic_load_raw;
if (!load_raw)
{
load_raw = &CLASS unpacked_load_raw;
load_flags = 4;
}
zero_is_bad = 1;
if ((height += 12) > raw_height)
height = raw_height;
for (i = 0; i < sizeof pana / sizeof *pana; i++)
if (raw_width == pana[i][0] && raw_height == pana[i][1])
{
left_margin = pana[i][2];
top_margin = pana[i][3];
width += pana[i][4];
height += pana[i][5];
}
filters = 0x01010101U * (uchar) "\x94\x61\x49\x16"[((filters - 1) ^ (left_margin & 1) ^ (top_margin << 1)) & 3];
}
else if (!strcmp(model, "C770UZ"))
{
height = 1718;
width = 2304;
filters = 0x16161616;
load_raw = &CLASS packed_load_raw;
load_flags = 30;
}
else if (!strncmp(make, "Olympus", 7))
{
height += height & 1;
if (exif_cfa)
filters = exif_cfa;
if (width == 4100)
width -= 4;
if (width == 4080)
width -= 24;
if (width == 9280)
{
width -= 6;
height -= 6;
}
if (load_raw == &CLASS unpacked_load_raw)
load_flags = 4;
tiff_bps = 12;
if (!strcmp(model, "E-300") || !strcmp(model, "E-500"))
{
width -= 20;
if (load_raw == &CLASS unpacked_load_raw)
{
maximum = 0xfc3;
memset(cblack, 0, sizeof cblack);
}
}
else if (!strcmp(model, "STYLUS1"))
{
width -= 14;
maximum = 0xfff;
}
else if (!strcmp(model, "E-330"))
{
width -= 30;
if (load_raw == &CLASS unpacked_load_raw)
maximum = 0xf79;
}
else if (!strcmp(model, "SP550UZ"))
{
thumb_length = flen - (thumb_offset = 0xa39800);
thumb_height = 480;
thumb_width = 640;
}
else if (!strcmp(model, "TG-4"))
{
width -= 16;
}
else if (!strcmp(model, "TG-5"))
{
width -= 26;
}
}
else if (!strcmp(model, "N Digital"))
{
height = 2047;
width = 3072;
filters = 0x61616161;
data_offset = 0x1a00;
load_raw = &CLASS packed_load_raw;
}
else if (!strcmp(model, "DSC-F828"))
{
width = 3288;
left_margin = 5;
mask[1][3] = -17;
data_offset = 862144;
load_raw = &CLASS sony_load_raw;
filters = 0x9c9c9c9c;
colors = 4;
strcpy(cdesc, "RGBE");
}
else if (!strcmp(model, "DSC-V3"))
{
width = 3109;
left_margin = 59;
mask[0][1] = 9;
data_offset = 787392;
load_raw = &CLASS sony_load_raw;
}
else if (!strncmp(make, "Sony", 4) && raw_width == 3984)
{
width = 3925;
order = 0x4d4d;
}
else if (!strncmp(make, "Sony", 4) && raw_width == 4288)
{
width -= 32;
}
else if (!strcmp(make, "Sony") && raw_width == 4600)
{
if (!strcmp(model, "DSLR-A350"))
height -= 4;
black = 0;
}
else if (!strncmp(make, "Sony", 4) && raw_width == 4928)
{
if (height < 3280)
width -= 8;
}
else if (!strncmp(make, "Sony", 4) && raw_width == 5504)
{ // ILCE-3000//5000
width -= height > 3664 ? 8 : 32;
}
else if (!strncmp(make, "Sony", 4) && raw_width == 6048)
{
width -= 24;
if (strstr(model, "RX1") || strstr(model, "A99"))
width -= 6;
}
else if (!strncmp(make, "Sony", 4) && raw_width == 7392)
{
width -= 30;
}
else if (!strncmp(make, "Sony", 4) && raw_width == 8000)
{
width -= 32;
}
else if (!strcmp(model, "DSLR-A100"))
{
if (width == 3880)
{
height--;
width = ++raw_width;
}
else
{
height -= 4;
width -= 4;
order = 0x4d4d;
load_flags = 2;
}
filters = 0x61616161;
}
else if (!strcmp(model, "PIXL"))
{
height -= top_margin = 4;
width -= left_margin = 32;
gamma_curve(0, 7, 1, 255);
}
else if (!strcmp(model, "C603") || !strcmp(model, "C330") || !strcmp(model, "12MP"))
{
order = 0x4949;
if (filters && data_offset)
{
fseek(ifp, data_offset < 4096 ? 168 : 5252, SEEK_SET);
read_shorts(curve, 256);
}
else
gamma_curve(0, 3.875, 1, 255);
load_raw = filters ? &CLASS eight_bit_load_raw
: strcmp(model, "C330") ? &CLASS kodak_c603_load_raw : &CLASS kodak_c330_load_raw;
load_flags = tiff_bps > 16;
tiff_bps = 8;
}
else if (!strncasecmp(model, "EasyShare", 9))
{
data_offset = data_offset < 0x15000 ? 0x15000 : 0x17000;
load_raw = &CLASS packed_load_raw;
}
else if (!strncasecmp(make, "Kodak", 5))
{
if (filters == UINT_MAX)
filters = 0x61616161;
if (!strncmp(model, "NC2000", 6) || !strncmp(model, "EOSDCS", 6) || !strncmp(model, "DCS4", 4))
{
width -= 4;
left_margin = 2;
if (model[6] == ' ')
model[6] = 0;
if (!strcmp(model, "DCS460A"))
goto bw;
}
else if (!strcmp(model, "DCS660M"))
{
black = 214;
goto bw;
}
else if (!strcmp(model, "DCS760M"))
{
bw:
colors = 1;
filters = 0;
}
if (!strcmp(model + 4, "20X"))
strcpy(cdesc, "MYCY");
if (strstr(model, "DC25"))
{
strcpy(model, "DC25");
data_offset = 15424;
}
if (!strncmp(model, "DC2", 3))
{
raw_height = 2 + (height = 242);
if (!strncmp(model, "DC290", 5))
iso_speed = 100;
if (!strncmp(model, "DC280", 5))
iso_speed = 70;
if (flen < 100000)
{
raw_width = 256;
width = 249;
pixel_aspect = (4.0 * height) / (3.0 * width);
}
else
{
raw_width = 512;
width = 501;
pixel_aspect = (493.0 * height) / (373.0 * width);
}
top_margin = left_margin = 1;
colors = 4;
filters = 0x8d8d8d8d;
simple_coeff(1);
pre_mul[1] = 1.179;
pre_mul[2] = 1.209;
pre_mul[3] = 1.036;
load_raw = &CLASS eight_bit_load_raw;
}
else if (!strcmp(model, "40"))
{
strcpy(model, "DC40");
height = 512;
width = 768;
data_offset = 1152;
load_raw = &CLASS kodak_radc_load_raw;
tiff_bps = 12;
}
else if (strstr(model, "DC50"))
{
strcpy(model, "DC50");
height = 512;
width = 768;
iso_speed = 84;
data_offset = 19712;
load_raw = &CLASS kodak_radc_load_raw;
}
else if (strstr(model, "DC120"))
{
strcpy(model, "DC120");
raw_height = height = 976;
raw_width = width = 848;
iso_speed = 160;
pixel_aspect = height / 0.75 / width;
load_raw = tiff_compress == 7 ? &CLASS kodak_jpeg_load_raw : &CLASS kodak_dc120_load_raw;
}
else if (!strcmp(model, "DCS200"))
{
thumb_height = 128;
thumb_width = 192;
thumb_offset = 6144;
thumb_misc = 360;
iso_speed = 140;
write_thumb = &CLASS layer_thumb;
black = 17;
}
}
else if (!strcmp(model, "Fotoman Pixtura"))
{
height = 512;
width = 768;
data_offset = 3632;
load_raw = &CLASS kodak_radc_load_raw;
filters = 0x61616161;
simple_coeff(2);
}
else if (!strncmp(model, "QuickTake", 9))
{
if (head[5])
strcpy(model + 10, "200");
fseek(ifp, 544, SEEK_SET);
height = get2();
width = get2();
data_offset = (get4(), get2()) == 30 ? 738 : 736;
if (height > width)
{
SWAP(height, width);
fseek(ifp, data_offset - 6, SEEK_SET);
flip = ~get2() & 3 ? 5 : 6;
}
filters = 0x61616161;
}
else if (!strncmp(make, "Rollei", 6) && !load_raw)
{
switch (raw_width)
{
case 1316:
height = 1030;
width = 1300;
top_margin = 1;
left_margin = 6;
break;
case 2568:
height = 1960;
width = 2560;
top_margin = 2;
left_margin = 8;
}
filters = 0x16161616;
load_raw = &CLASS rollei_load_raw;
}
else if (!strcmp(model, "GRAS-50S5C"))
{
height = 2048;
width = 2440;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x49494949;
order = 0x4949;
maximum = 0xfffC;
}
else if (!strcmp(model, "BB-500CL"))
{
height = 2058;
width = 2448;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x3fff;
}
else if (!strcmp(model, "BB-500GE"))
{
height = 2058;
width = 2456;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x3fff;
}
else if (!strcmp(model, "SVS625CL"))
{
height = 2050;
width = 2448;
load_raw = &CLASS unpacked_load_raw;
data_offset = 0;
filters = 0x94949494;
order = 0x4949;
maximum = 0x0fff;
}
/* Early reject for damaged images */
if (!load_raw || height < 22 || width < 22 ||
#ifdef LIBRAW_LIBRARY_BUILD
(tiff_bps > 16 && load_raw != &LibRaw::deflate_dng_load_raw)
#else
tiff_bps > 16
#endif
|| tiff_samples > 4 || colors > 4 || colors < 1
/* alloc in unpack() may be fooled by size adjust */
|| ((int)width + (int)left_margin > 65535) || ((int)height + (int)top_margin > 65535))
{
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY, 1, 2);
#endif
return;
}
if (!model[0])
sprintf(model, "%dx%d", width, height);
if (filters == UINT_MAX)
filters = 0x94949494;
if (thumb_offset && !thumb_height)
{
fseek(ifp, thumb_offset, SEEK_SET);
if (ljpeg_start(&jh, 1))
{
thumb_width = jh.wide;
thumb_height = jh.high;
}
}
dng_skip:
#ifdef LIBRAW_LIBRARY_BUILD
if (dng_version) /* Override black level by DNG tags */
{
/* copy DNG data from per-IFD field to color.dng */
int iifd = 0; // Active IFD we'll show to user.
for (; iifd < tiff_nifds; iifd++)
if (tiff_ifd[iifd].offset == data_offset) // found
break;
int pifd = -1;
for (int ii = 0; ii < tiff_nifds; ii++)
if (tiff_ifd[ii].offset == thumb_offset) // found
{
pifd = ii;
break;
}
#define CFAROUND(value, filters) filters ? (filters >= 1000 ? ((value + 1) / 2) * 2 : ((value + 5) / 6) * 6) : value
#define IFDCOLORINDEX(ifd, subset, bit) \
(tiff_ifd[ifd].dng_color[subset].parsedfields & bit) ? ifd \
: ((tiff_ifd[0].dng_color[subset].parsedfields & bit) ? 0 : -1)
#define IFDLEVELINDEX(ifd, bit) \
(tiff_ifd[ifd].dng_levels.parsedfields & bit) ? ifd : ((tiff_ifd[0].dng_levels.parsedfields & bit) ? 0 : -1)
#define COPYARR(to, from) memmove(&to, &from, sizeof(from))
if (iifd < tiff_nifds)
{
int sidx;
// Per field, not per structure
if (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_CHECK_DNG_ILLUMINANT)
{
int illidx[2], cmidx[2],calidx[2], abidx;
for(int i = 0; i < 2; i++)
{
illidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_ILLUMINANT);
cmidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_COLORMATRIX);
calidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_CALIBRATION);
}
abidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_ANALOGBALANCE);
// Data found, all in same ifd, illuminants are inited
if (illidx[0] >= 0 && illidx[0] < tiff_nifds && illidx[0] == illidx[1] && illidx[0] == cmidx[0] && illidx[0] == cmidx[1]
&& tiff_ifd[illidx[0]].dng_color[0].illuminant>0 && tiff_ifd[illidx[0]].dng_color[1].illuminant>0)
{
sidx = illidx[0]; // => selected IFD
double cc[4][4], cm[4][3], cam_xyz[4][3];
// CM -> Color Matrix
// CC -> Camera calibration
for (int j = 0; j < 4; j++) for (int i = 0; i < 4; i++) cc[j][i] = i == j;
int colidx = -1;
// IS D65 here?
for(int i = 0; i < 2; i++)
{
int ill = tiff_ifd[sidx].dng_color[i].illuminant;
if (tiff_ifd[sidx].dng_color[i].illuminant == LIBRAW_WBI_D65)
{
colidx = i; break;
}
}
// Other daylight-type ill
if(colidx<0)
for(int i = 0; i < 2; i++)
{
int ill = tiff_ifd[sidx].dng_color[i].illuminant;
if (ill == LIBRAW_WBI_Daylight || ill == LIBRAW_WBI_D55 || ill == LIBRAW_WBI_D75 || ill == LIBRAW_WBI_D50 || ill == LIBRAW_WBI_Flash)
{
colidx = i; break;
}
}
if(colidx>=0) // Selected
{
// Init camera matrix from DNG
FORCC for (int j = 0; j < 3; j++)
cm[c][j] = tiff_ifd[sidx].dng_color[colidx].colormatrix[c][j];
if(calidx[colidx] == sidx)
{
for (int i = 0; i < colors; i++)
FORCC
cc[i][c] = tiff_ifd[sidx].dng_color[colidx].calibration[i][c];
}
if(abidx == sidx)
for (int i = 0; i < colors; i++)
FORCC cc[i][c] *= tiff_ifd[sidx].dng_levels.analogbalance[i];
int j;
FORCC for (int i = 0; i < 3; i++) for (cam_xyz[c][i] = j = 0; j < colors; j++) cam_xyz[c][i] +=
cc[c][j] * cm[j][i];// add AsShotXY later * xyz[i];
cam_xyz_coeff(cmatrix, cam_xyz);
}
}
}
if (imgdata.params.raw_processing_options & LIBRAW_PROCESSING_USE_DNG_DEFAULT_CROP)
{
sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_CROPORIGIN);
int sidx2 = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_CROPSIZE);
if (sidx >= 0 && sidx == sidx2 && tiff_ifd[sidx].dng_levels.default_crop[2] > 0 &&
tiff_ifd[sidx].dng_levels.default_crop[3] > 0)
{
int lm = tiff_ifd[sidx].dng_levels.default_crop[0];
int lmm = CFAROUND(lm, filters);
int tm = tiff_ifd[sidx].dng_levels.default_crop[1];
int tmm = CFAROUND(tm, filters);
int ww = tiff_ifd[sidx].dng_levels.default_crop[2];
int hh = tiff_ifd[sidx].dng_levels.default_crop[3];
if (lmm > lm)
ww -= (lmm - lm);
if (tmm > tm)
hh -= (tmm - tm);
if (left_margin + lm + ww <= raw_width && top_margin + tm + hh <= raw_height)
{
left_margin += lmm;
top_margin += tmm;
width = ww;
height = hh;
}
}
}
if (!(imgdata.color.dng_color[0].parsedfields & LIBRAW_DNGFM_FORWARDMATRIX)) // Not set already (Leica makernotes)
{
sidx = IFDCOLORINDEX(iifd, 0, LIBRAW_DNGFM_FORWARDMATRIX);
if (sidx >= 0)
COPYARR(imgdata.color.dng_color[0].forwardmatrix, tiff_ifd[sidx].dng_color[0].forwardmatrix);
}
if (!(imgdata.color.dng_color[1].parsedfields & LIBRAW_DNGFM_FORWARDMATRIX)) // Not set already (Leica makernotes)
{
sidx = IFDCOLORINDEX(iifd, 1, LIBRAW_DNGFM_FORWARDMATRIX);
if (sidx >= 0)
COPYARR(imgdata.color.dng_color[1].forwardmatrix, tiff_ifd[sidx].dng_color[1].forwardmatrix);
}
for (int ss = 0; ss < 2; ss++)
{
sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_COLORMATRIX);
if (sidx >= 0)
COPYARR(imgdata.color.dng_color[ss].colormatrix, tiff_ifd[sidx].dng_color[ss].colormatrix);
sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_CALIBRATION);
if (sidx >= 0)
COPYARR(imgdata.color.dng_color[ss].calibration, tiff_ifd[sidx].dng_color[ss].calibration);
sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_ILLUMINANT);
if (sidx >= 0)
imgdata.color.dng_color[ss].illuminant = tiff_ifd[sidx].dng_color[ss].illuminant;
}
// Levels
sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_ANALOGBALANCE);
if (sidx >= 0)
COPYARR(imgdata.color.dng_levels.analogbalance, tiff_ifd[sidx].dng_levels.analogbalance);
sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_WHITE);
if (sidx >= 0)
COPYARR(imgdata.color.dng_levels.dng_whitelevel, tiff_ifd[sidx].dng_levels.dng_whitelevel);
sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_BLACK);
if (sidx >= 0)
{
imgdata.color.dng_levels.dng_black = tiff_ifd[sidx].dng_levels.dng_black;
COPYARR(imgdata.color.dng_levels.dng_cblack, tiff_ifd[sidx].dng_levels.dng_cblack);
}
if (pifd >= 0)
{
sidx = IFDLEVELINDEX(pifd, LIBRAW_DNGFM_PREVIEWCS);
if (sidx >= 0)
imgdata.color.dng_levels.preview_colorspace = tiff_ifd[sidx].dng_levels.preview_colorspace;
}
sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_OPCODE2);
if (sidx >= 0)
meta_offset = tiff_ifd[sidx].opcode2_offset;
sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_LINTABLE);
INT64 linoff = -1;
int linlen = 0;
if (sidx >= 0)
{
linoff = tiff_ifd[sidx].lineartable_offset;
linlen = tiff_ifd[sidx].lineartable_len;
}
if (linoff >= 0 && linlen > 0)
{
INT64 pos = ftell(ifp);
fseek(ifp, linoff, SEEK_SET);
linear_table(linlen);
fseek(ifp, pos, SEEK_SET);
}
// Need to add curve too
}
/* Copy DNG black level to LibRaw's */
maximum = imgdata.color.dng_levels.dng_whitelevel[0];
black = imgdata.color.dng_levels.dng_black;
int ll = LIM(0, (sizeof(cblack) / sizeof(cblack[0])),
(sizeof(imgdata.color.dng_levels.dng_cblack) / sizeof(imgdata.color.dng_levels.dng_cblack[0])));
for (int i = 0; i < ll; i++)
cblack[i] = imgdata.color.dng_levels.dng_cblack[i];
}
#endif
/* Early reject for damaged images */
if (!load_raw || height < 22 || width < 22 ||
#ifdef LIBRAW_LIBRARY_BUILD
(tiff_bps > 16 && load_raw != &LibRaw::deflate_dng_load_raw)
#else
tiff_bps > 16
#endif
|| tiff_samples > 4 || colors > 4 || colors < 1)
{
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY, 1, 2);
#endif
return;
}
{
// Check cam_mul range
int cmul_ok =1;
FORCC if(cam_mul[c] <= 0.001f) cmul_ok = 0;;
if(cmul_ok)
{
double cmin = cam_mul[0],cmax;
double cnorm[4];
FORCC cmin = MIN(cmin,cam_mul[c]);
FORCC cnorm[c] = cam_mul[c]/cmin;
cmax = cmin = cnorm[0];
FORCC
{
cmin = MIN(cmin,cnorm[c]);
cmax = MIN(cmax,cnorm[c]);
}
if(cmin <= 0.01f || cmax > 100.f)
cmul_ok = false;
}
if(!cmul_ok)
cam_mul[0] = cam_mul[3] = 0;
}
if ((use_camera_matrix & ((use_camera_wb || dng_version) | 0x2)) && cmatrix[0][0] > 0.125)
{
memcpy(rgb_cam, cmatrix, sizeof cmatrix);
raw_color = 0;
}
if (raw_color)
adobe_coeff(make, model);
#ifdef LIBRAW_LIBRARY_BUILD
else if (imgdata.color.cam_xyz[0][0] < 0.01)
adobe_coeff(make, model, 1);
#endif
if (load_raw == &CLASS kodak_radc_load_raw)
if (raw_color)
adobe_coeff("Apple", "Quicktake");
#ifdef LIBRAW_LIBRARY_BUILD
// Clear erorneus fuji_width if not set through parse_fuji or for DNG
if (fuji_width && !dng_version && !(imgdata.process_warnings & LIBRAW_WARN_PARSEFUJI_PROCESSED))
fuji_width = 0;
#endif
if (fuji_width)
{
fuji_width = width >> !fuji_layout;
filters = fuji_width & 1 ? 0x94949494 : 0x49494949;
width = (height >> fuji_layout) + fuji_width;
height = width - 1;
pixel_aspect = 1;
}
else
{
if (raw_height < height)
raw_height = height;
if (raw_width < width)
raw_width = width;
}
if (!tiff_bps)
tiff_bps = 12;
if (!maximum)
{
maximum = (1 << tiff_bps) - 1;
if (maximum < 0x10000 && curve[maximum] > 0 && load_raw == &CLASS sony_arw2_load_raw)
maximum = curve[maximum];
}
if (!load_raw || height < 22 || width < 22 ||
#ifdef LIBRAW_LIBRARY_BUILD
(tiff_bps > 16 && load_raw != &LibRaw::deflate_dng_load_raw)
#else
tiff_bps > 16
#endif
|| tiff_samples > 6 || colors > 4)
is_raw = 0;
if (raw_width < 22 || raw_width > 64000 || raw_height < 22 || raw_height > 64000)
is_raw = 0;
#ifdef NO_JASPER
if (load_raw == &CLASS redcine_load_raw)
{
#ifdef DCRAW_VERBOSE
fprintf(stderr, _("%s: You must link dcraw with %s!!\n"), ifname, "libjasper");
#endif
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_JASPER;
#endif
}
#endif
#ifdef NO_JPEG
if (load_raw == &CLASS kodak_jpeg_load_raw || load_raw == &CLASS lossy_dng_load_raw)
{
#ifdef DCRAW_VERBOSE
fprintf(stderr, _("%s: You must link dcraw with %s!!\n"), ifname, "libjpeg");
#endif
is_raw = 0;
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_JPEGLIB;
#endif
}
#endif
if (!cdesc[0])
strcpy(cdesc, colors == 3 ? "RGBG" : "GMCY");
if (!raw_height)
raw_height = height;
if (!raw_width)
raw_width = width;
if (filters > 999 && colors == 3)
filters |= ((filters >> 2 & 0x22222222) | (filters << 2 & 0x88888888)) & filters << 1;
notraw:
if (flip == UINT_MAX)
flip = tiff_flip;
if (flip == UINT_MAX)
flip = 0;
// Convert from degrees to bit-field if needed
if (flip > 89 || flip < -89)
{
switch ((flip + 3600) % 360)
{
case 270:
flip = 5;
break;
case 180:
flip = 3;
break;
case 90:
flip = 6;
break;
}
}
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY, 1, 2);
#endif
}
//@end COMMON
//@out FILEIO
#ifndef NO_LCMS
void CLASS apply_profile(const char *input, const char *output)
{
char *prof;
cmsHPROFILE hInProfile = 0, hOutProfile = 0;
cmsHTRANSFORM hTransform;
FILE *fp;
unsigned size;
if (strcmp(input, "embed"))
hInProfile = cmsOpenProfileFromFile(input, "r");
else if (profile_length)
{
#ifndef LIBRAW_LIBRARY_BUILD
prof = (char *)malloc(profile_length);
merror(prof, "apply_profile()");
fseek(ifp, profile_offset, SEEK_SET);
fread(prof, 1, profile_length, ifp);
hInProfile = cmsOpenProfileFromMem(prof, profile_length);
free(prof);
#else
hInProfile = cmsOpenProfileFromMem(imgdata.color.profile, profile_length);
#endif
}
else
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_EMBEDDED_PROFILE;
#endif
#ifdef DCRAW_VERBOSE
fprintf(stderr, _("%s has no embedded profile.\n"), ifname);
#endif
}
if (!hInProfile)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_NO_INPUT_PROFILE;
#endif
return;
}
if (!output)
hOutProfile = cmsCreate_sRGBProfile();
else if ((fp = fopen(output, "rb")))
{
fread(&size, 4, 1, fp);
fseek(fp, 0, SEEK_SET);
oprof = (unsigned *)malloc(size = ntohl(size));
merror(oprof, "apply_profile()");
fread(oprof, 1, size, fp);
fclose(fp);
if (!(hOutProfile = cmsOpenProfileFromMem(oprof, size)))
{
free(oprof);
oprof = 0;
}
}
#ifdef DCRAW_VERBOSE
else
fprintf(stderr, _("Cannot open file %s!\n"), output);
#endif
if (!hOutProfile)
{
#ifdef LIBRAW_LIBRARY_BUILD
imgdata.process_warnings |= LIBRAW_WARN_BAD_OUTPUT_PROFILE;
#endif
goto quit;
}
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Applying color profile...\n"));
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE, 0, 2);
#endif
hTransform = cmsCreateTransform(hInProfile, TYPE_RGBA_16, hOutProfile, TYPE_RGBA_16, INTENT_PERCEPTUAL, 0);
cmsDoTransform(hTransform, image, image, width * height);
raw_color = 1; /* Don't use rgb_cam with a profile */
cmsDeleteTransform(hTransform);
cmsCloseProfile(hOutProfile);
quit:
cmsCloseProfile(hInProfile);
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_APPLY_PROFILE, 1, 2);
#endif
}
#endif
//@end FILEIO
//@out COMMON
void CLASS convert_to_rgb()
{
#ifndef LIBRAW_LIBRARY_BUILD
int row, col, c;
#endif
int i, j, k;
#ifndef LIBRAW_LIBRARY_BUILD
ushort *img;
float out[3];
#endif
float out_cam[3][4];
double num, inverse[3][3];
static const double xyzd50_srgb[3][3] = {
{0.436083, 0.385083, 0.143055}, {0.222507, 0.716888, 0.060608}, {0.013930, 0.097097, 0.714022}};
static const double rgb_rgb[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}};
static const double adobe_rgb[3][3] = {
{0.715146, 0.284856, 0.000000}, {0.000000, 1.000000, 0.000000}, {0.000000, 0.041166, 0.958839}};
static const double wide_rgb[3][3] = {
{0.593087, 0.404710, 0.002206}, {0.095413, 0.843149, 0.061439}, {0.011621, 0.069091, 0.919288}};
static const double prophoto_rgb[3][3] = {
{0.529317, 0.330092, 0.140588}, {0.098368, 0.873465, 0.028169}, {0.016879, 0.117663, 0.865457}};
static const double aces_rgb[3][3] = {
{0.432996, 0.375380, 0.189317}, {0.089427, 0.816523, 0.102989}, {0.019165, 0.118150, 0.941914}};
static const double(*out_rgb[])[3] = {rgb_rgb, adobe_rgb, wide_rgb, prophoto_rgb, xyz_rgb, aces_rgb};
static const char *name[] = {"sRGB", "Adobe RGB (1998)", "WideGamut D65", "ProPhoto D65", "XYZ", "ACES"};
static const unsigned phead[] = {1024, 0, 0x2100000, 0x6d6e7472, 0x52474220, 0x58595a20, 0,
0, 0, 0x61637370, 0, 0, 0x6e6f6e65, 0,
0, 0, 0, 0xf6d6, 0x10000, 0xd32d};
unsigned pbody[] = {10, 0x63707274, 0, 36, /* cprt */
0x64657363, 0, 40, /* desc */
0x77747074, 0, 20, /* wtpt */
0x626b7074, 0, 20, /* bkpt */
0x72545243, 0, 14, /* rTRC */
0x67545243, 0, 14, /* gTRC */
0x62545243, 0, 14, /* bTRC */
0x7258595a, 0, 20, /* rXYZ */
0x6758595a, 0, 20, /* gXYZ */
0x6258595a, 0, 20}; /* bXYZ */
static const unsigned pwhite[] = {0xf351, 0x10000, 0x116cc};
unsigned pcurve[] = {0x63757276, 0, 1, 0x1000000};
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB, 0, 2);
#endif
gamma_curve(gamm[0], gamm[1], 0, 0);
memcpy(out_cam, rgb_cam, sizeof out_cam);
#ifndef LIBRAW_LIBRARY_BUILD
raw_color |= colors == 1 || document_mode || output_color < 1 || output_color > 6;
#else
raw_color |= colors == 1 || output_color < 1 || output_color > 6;
#endif
if (!raw_color)
{
oprof = (unsigned *)calloc(phead[0], 1);
merror(oprof, "convert_to_rgb()");
memcpy(oprof, phead, sizeof phead);
if (output_color == 5)
oprof[4] = oprof[5];
oprof[0] = 132 + 12 * pbody[0];
for (i = 0; i < pbody[0]; i++)
{
oprof[oprof[0] / 4] = i ? (i > 1 ? 0x58595a20 : 0x64657363) : 0x74657874;
pbody[i * 3 + 2] = oprof[0];
oprof[0] += (pbody[i * 3 + 3] + 3) & -4;
}
memcpy(oprof + 32, pbody, sizeof pbody);
oprof[pbody[5] / 4 + 2] = strlen(name[output_color - 1]) + 1;
memcpy((char *)oprof + pbody[8] + 8, pwhite, sizeof pwhite);
pcurve[3] = (short)(256 / gamm[5] + 0.5) << 16;
for (i = 4; i < 7; i++)
memcpy((char *)oprof + pbody[i * 3 + 2], pcurve, sizeof pcurve);
pseudoinverse((double(*)[3])out_rgb[output_color - 1], inverse, 3);
for (i = 0; i < 3; i++)
for (j = 0; j < 3; j++)
{
for (num = k = 0; k < 3; k++)
num += xyzd50_srgb[i][k] * inverse[j][k];
oprof[pbody[j * 3 + 23] / 4 + i + 2] = num * 0x10000 + 0.5;
}
for (i = 0; i < phead[0] / 4; i++)
oprof[i] = htonl(oprof[i]);
strcpy((char *)oprof + pbody[2] + 8, "auto-generated by dcraw");
strcpy((char *)oprof + pbody[5] + 12, name[output_color - 1]);
for (i = 0; i < 3; i++)
for (j = 0; j < colors; j++)
for (out_cam[i][j] = k = 0; k < 3; k++)
out_cam[i][j] += out_rgb[output_color - 1][i][k] * rgb_cam[k][j];
}
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, raw_color ? _("Building histograms...\n") : _("Converting to %s colorspace...\n"),
name[output_color - 1]);
#endif
#ifdef LIBRAW_LIBRARY_BUILD
convert_to_rgb_loop(out_cam);
#else
memset(histogram, 0, sizeof histogram);
for (img = image[0], row = 0; row < height; row++)
for (col = 0; col < width; col++, img += 4)
{
if (!raw_color)
{
out[0] = out[1] = out[2] = 0;
FORCC
{
out[0] += out_cam[0][c] * img[c];
out[1] += out_cam[1][c] * img[c];
out[2] += out_cam[2][c] * img[c];
}
FORC3 img[c] = CLIP((int)out[c]);
}
else if (document_mode)
img[0] = img[fcol(row, col)];
FORCC histogram[c][img[c] >> 3]++;
}
#endif
if (colors == 4 && output_color)
colors = 3;
#ifndef LIBRAW_LIBRARY_BUILD
if (document_mode && filters)
colors = 1;
#endif
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_CONVERT_RGB, 1, 2);
#endif
}
void CLASS fuji_rotate()
{
int i, row, col;
double step;
float r, c, fr, fc;
unsigned ur, uc;
ushort wide, high, (*img)[4], (*pix)[4];
if (!fuji_width)
return;
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Rotating image 45 degrees...\n"));
#endif
fuji_width = (fuji_width - 1 + shrink) >> shrink;
step = sqrt(0.5);
wide = fuji_width / step;
high = (height - fuji_width) / step;
img = (ushort(*)[4])calloc(high, wide * sizeof *img);
merror(img, "fuji_rotate()");
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE, 0, 2);
#endif
for (row = 0; row < high; row++)
for (col = 0; col < wide; col++)
{
ur = r = fuji_width + (row - col) * step;
uc = c = (row + col) * step;
if (ur > height - 2 || uc > width - 2)
continue;
fr = r - ur;
fc = c - uc;
pix = image + ur * width + uc;
for (i = 0; i < colors; i++)
img[row * wide + col][i] = (pix[0][i] * (1 - fc) + pix[1][i] * fc) * (1 - fr) +
(pix[width][i] * (1 - fc) + pix[width + 1][i] * fc) * fr;
}
free(image);
width = wide;
height = high;
image = img;
fuji_width = 0;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_FUJI_ROTATE, 1, 2);
#endif
}
void CLASS stretch()
{
ushort newdim, (*img)[4], *pix0, *pix1;
int row, col, c;
double rc, frac;
if (pixel_aspect == 1)
return;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH, 0, 2);
#endif
#ifdef DCRAW_VERBOSE
if (verbose)
fprintf(stderr, _("Stretching the image...\n"));
#endif
if (pixel_aspect < 1)
{
newdim = height / pixel_aspect + 0.5;
img = (ushort(*)[4])calloc(width, newdim * sizeof *img);
merror(img, "stretch()");
for (rc = row = 0; row < newdim; row++, rc += pixel_aspect)
{
frac = rc - (c = rc);
pix0 = pix1 = image[c * width];
if (c + 1 < height)
pix1 += width * 4;
for (col = 0; col < width; col++, pix0 += 4, pix1 += 4)
FORCC img[row * width + col][c] = pix0[c] * (1 - frac) + pix1[c] * frac + 0.5;
}
height = newdim;
}
else
{
newdim = width * pixel_aspect + 0.5;
img = (ushort(*)[4])calloc(height, newdim * sizeof *img);
merror(img, "stretch()");
for (rc = col = 0; col < newdim; col++, rc += 1 / pixel_aspect)
{
frac = rc - (c = rc);
pix0 = pix1 = image[c];
if (c + 1 < width)
pix1 += 4;
for (row = 0; row < height; row++, pix0 += width * 4, pix1 += width * 4)
FORCC img[row * newdim + col][c] = pix0[c] * (1 - frac) + pix1[c] * frac + 0.5;
}
width = newdim;
}
free(image);
image = img;
#ifdef LIBRAW_LIBRARY_BUILD
RUN_CALLBACK(LIBRAW_PROGRESS_STRETCH, 1, 2);
#endif
}
int CLASS flip_index(int row, int col)
{
if (flip & 4)
SWAP(row, col);
if (flip & 2)
row = iheight - 1 - row;
if (flip & 1)
col = iwidth - 1 - col;
return row * iwidth + col;
}
//@end COMMON
struct libraw_tiff_tag
{
ushort tag, type;
int count;
union {
char c[4];
short s[2];
int i;
} val;
};
struct tiff_hdr
{
ushort t_order, magic;
int ifd;
ushort pad, ntag;
struct libraw_tiff_tag tag[23];
int nextifd;
ushort pad2, nexif;
struct libraw_tiff_tag exif[4];
ushort pad3, ngps;
struct libraw_tiff_tag gpst[10];
short bps[4];
int rat[10];
unsigned gps[26];
char t_desc[512], t_make[64], t_model[64], soft[32], date[20], t_artist[64];
};
//@out COMMON
void CLASS tiff_set(struct tiff_hdr *th, ushort *ntag, ushort tag, ushort type, int count, int val)
{
struct libraw_tiff_tag *tt;
int c;
tt = (struct libraw_tiff_tag *)(ntag + 1) + (*ntag)++;
tt->val.i = val;
if (type == 1 && count <= 4)
FORC(4) tt->val.c[c] = val >> (c << 3);
else if (type == 2)
{
count = strnlen((char *)th + val, count - 1) + 1;
if (count <= 4)
FORC(4) tt->val.c[c] = ((char *)th)[val + c];
}
else if (type == 3 && count <= 2)
FORC(2) tt->val.s[c] = val >> (c << 4);
tt->count = count;
tt->type = type;
tt->tag = tag;
}
#define TOFF(ptr) ((char *)(&(ptr)) - (char *)th)
void CLASS tiff_head(struct tiff_hdr *th, int full)
{
int c, psize = 0;
struct tm *t;
memset(th, 0, sizeof *th);
th->t_order = htonl(0x4d4d4949) >> 16;
th->magic = 42;
th->ifd = 10;
th->rat[0] = th->rat[2] = 300;
th->rat[1] = th->rat[3] = 1;
FORC(6) th->rat[4 + c] = 1000000;
th->rat[4] *= shutter;
th->rat[6] *= aperture;
th->rat[8] *= focal_len;
strncpy(th->t_desc, desc, 512);
strncpy(th->t_make, make, 64);
strncpy(th->t_model, model, 64);
strcpy(th->soft, "dcraw v" DCRAW_VERSION);
t = localtime(×tamp);
sprintf(th->date, "%04d:%02d:%02d %02d:%02d:%02d", t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, t->tm_hour,
t->tm_min, t->tm_sec);
strncpy(th->t_artist, artist, 64);
if (full)
{
tiff_set(th, &th->ntag, 254, 4, 1, 0);
tiff_set(th, &th->ntag, 256, 4, 1, width);
tiff_set(th, &th->ntag, 257, 4, 1, height);
tiff_set(th, &th->ntag, 258, 3, colors, output_bps);
if (colors > 2)
th->tag[th->ntag - 1].val.i = TOFF(th->bps);
FORC4 th->bps[c] = output_bps;
tiff_set(th, &th->ntag, 259, 3, 1, 1);
tiff_set(th, &th->ntag, 262, 3, 1, 1 + (colors > 1));
}
tiff_set(th, &th->ntag, 270, 2, 512, TOFF(th->t_desc));
tiff_set(th, &th->ntag, 271, 2, 64, TOFF(th->t_make));
tiff_set(th, &th->ntag, 272, 2, 64, TOFF(th->t_model));
if (full)
{
if (oprof)
psize = ntohl(oprof[0]);
tiff_set(th, &th->ntag, 273, 4, 1, sizeof *th + psize);
tiff_set(th, &th->ntag, 277, 3, 1, colors);
tiff_set(th, &th->ntag, 278, 4, 1, height);
tiff_set(th, &th->ntag, 279, 4, 1, height * width * colors * output_bps / 8);
}
else
tiff_set(th, &th->ntag, 274, 3, 1, "12435867"[flip] - '0');
tiff_set(th, &th->ntag, 282, 5, 1, TOFF(th->rat[0]));
tiff_set(th, &th->ntag, 283, 5, 1, TOFF(th->rat[2]));
tiff_set(th, &th->ntag, 284, 3, 1, 1);
tiff_set(th, &th->ntag, 296, 3, 1, 2);
tiff_set(th, &th->ntag, 305, 2, 32, TOFF(th->soft));
tiff_set(th, &th->ntag, 306, 2, 20, TOFF(th->date));
tiff_set(th, &th->ntag, 315, 2, 64, TOFF(th->t_artist));
tiff_set(th, &th->ntag, 34665, 4, 1, TOFF(th->nexif));
if (psize)
tiff_set(th, &th->ntag, 34675, 7, psize, sizeof *th);
tiff_set(th, &th->nexif, 33434, 5, 1, TOFF(th->rat[4]));
tiff_set(th, &th->nexif, 33437, 5, 1, TOFF(th->rat[6]));
tiff_set(th, &th->nexif, 34855, 3, 1, iso_speed);
tiff_set(th, &th->nexif, 37386, 5, 1, TOFF(th->rat[8]));
if (gpsdata[1])
{
tiff_set(th, &th->ntag, 34853, 4, 1, TOFF(th->ngps));
tiff_set(th, &th->ngps, 0, 1, 4, 0x202);
tiff_set(th, &th->ngps, 1, 2, 2, gpsdata[29]);
tiff_set(th, &th->ngps, 2, 5, 3, TOFF(th->gps[0]));
tiff_set(th, &th->ngps, 3, 2, 2, gpsdata[30]);
tiff_set(th, &th->ngps, 4, 5, 3, TOFF(th->gps[6]));
tiff_set(th, &th->ngps, 5, 1, 1, gpsdata[31]);
tiff_set(th, &th->ngps, 6, 5, 1, TOFF(th->gps[18]));
tiff_set(th, &th->ngps, 7, 5, 3, TOFF(th->gps[12]));
tiff_set(th, &th->ngps, 18, 2, 12, TOFF(th->gps[20]));
tiff_set(th, &th->ngps, 29, 2, 12, TOFF(th->gps[23]));
memcpy(th->gps, gpsdata, sizeof th->gps);
}
}
#ifdef LIBRAW_LIBRARY_BUILD
void CLASS jpeg_thumb_writer(FILE *tfp, char *t_humb, int t_humb_length)
{
ushort exif[5];
struct tiff_hdr th;
fputc(0xff, tfp);
fputc(0xd8, tfp);
if (strcmp(t_humb + 6, "Exif"))
{
memcpy(exif, "\xff\xe1 Exif\0\0", 10);
exif[1] = htons(8 + sizeof th);
fwrite(exif, 1, sizeof exif, tfp);
tiff_head(&th, 0);
fwrite(&th, 1, sizeof th, tfp);
}
fwrite(t_humb + 2, 1, t_humb_length - 2, tfp);
}
void CLASS jpeg_thumb()
{
char *thumb;
thumb = (char *)malloc(thumb_length);
merror(thumb, "jpeg_thumb()");
fread(thumb, 1, thumb_length, ifp);
jpeg_thumb_writer(ofp, thumb, thumb_length);
free(thumb);
}
#else
void CLASS jpeg_thumb()
{
char *thumb;
ushort exif[5];
struct tiff_hdr th;
thumb = (char *)malloc(thumb_length);
merror(thumb, "jpeg_thumb()");
fread(thumb, 1, thumb_length, ifp);
fputc(0xff, ofp);
fputc(0xd8, ofp);
if (strcmp(thumb + 6, "Exif"))
{
memcpy(exif, "\xff\xe1 Exif\0\0", 10);
exif[1] = htons(8 + sizeof th);
fwrite(exif, 1, sizeof exif, ofp);
tiff_head(&th, 0);
fwrite(&th, 1, sizeof th, ofp);
}
fwrite(thumb + 2, 1, thumb_length - 2, ofp);
free(thumb);
}
#endif
void CLASS write_ppm_tiff()
{
struct tiff_hdr th;
uchar *ppm;
ushort *ppm2;
int c, row, col, soff, rstep, cstep;
int perc, val, total, t_white = 0x2000;
#ifdef LIBRAW_LIBRARY_BUILD
perc = width * height * auto_bright_thr;
#else
perc = width * height * 0.01; /* 99th percentile white level */
#endif
if (fuji_width)
perc /= 2;
if (!((highlight & ~2) || no_auto_bright))
for (t_white = c = 0; c < colors; c++)
{
for (val = 0x2000, total = 0; --val > 32;)
if ((total += histogram[c][val]) > perc)
break;
if (t_white < val)
t_white = val;
}
gamma_curve(gamm[0], gamm[1], 2, (t_white << 3) / bright);
iheight = height;
iwidth = width;
if (flip & 4)
SWAP(height, width);
ppm = (uchar *)calloc(width, colors * output_bps / 8);
ppm2 = (ushort *)ppm;
merror(ppm, "write_ppm_tiff()");
if (output_tiff)
{
tiff_head(&th, 1);
fwrite(&th, sizeof th, 1, ofp);
if (oprof)
fwrite(oprof, ntohl(oprof[0]), 1, ofp);
}
else if (colors > 3)
fprintf(ofp, "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLTYPE %s\nENDHDR\n", width, height, colors,
(1 << output_bps) - 1, cdesc);
else
fprintf(ofp, "P%d\n%d %d\n%d\n", colors / 2 + 5, width, height, (1 << output_bps) - 1);
soff = flip_index(0, 0);
cstep = flip_index(0, 1) - soff;
rstep = flip_index(1, 0) - flip_index(0, width);
for (row = 0; row < height; row++, soff += rstep)
{
for (col = 0; col < width; col++, soff += cstep)
if (output_bps == 8)
FORCC ppm[col * colors + c] = curve[image[soff][c]] >> 8;
else
FORCC ppm2[col * colors + c] = curve[image[soff][c]];
if (output_bps == 16 && !output_tiff && htons(0x55aa) != 0x55aa)
swab((char *)ppm2, (char *)ppm2, width * colors * 2);
fwrite(ppm, colors * output_bps / 8, width, ofp);
}
free(ppm);
}
//@end COMMON
int CLASS main(int argc, const char **argv)
{
int arg, status = 0, quality, i, c;
int timestamp_only = 0, thumbnail_only = 0, identify_only = 0;
int user_qual = -1, user_black = -1, user_sat = -1, user_flip = -1;
int use_fuji_rotate = 1, write_to_stdout = 0, read_from_stdin = 0;
const char *sp, *bpfile = 0, *dark_frame = 0, *write_ext;
char opm, opt, *ofname, *cp;
struct utimbuf ut;
#ifndef NO_LCMS
const char *cam_profile = 0, *out_profile = 0;
#endif
#ifndef LOCALTIME
putenv((char *)"TZ=UTC");
#endif
#ifdef LOCALEDIR
setlocale(LC_CTYPE, "");
setlocale(LC_MESSAGES, "");
bindtextdomain("dcraw", LOCALEDIR);
textdomain("dcraw");
#endif
if (argc == 1)
{
printf(_("\nRaw photo decoder \"dcraw\" v%s"), DCRAW_VERSION);
printf(_("\nby Dave Coffin, dcoffin a cybercom o net\n"));
printf(_("\nUsage: %s [OPTION]... [FILE]...\n\n"), argv[0]);
puts(_("-v Print verbose messages"));
puts(_("-c Write image data to standard output"));
puts(_("-e Extract embedded thumbnail image"));
puts(_("-i Identify files without decoding them"));
puts(_("-i -v Identify files and show metadata"));
puts(_("-z Change file dates to camera timestamp"));
puts(_("-w Use camera white balance, if possible"));
puts(_("-a Average the whole image for white balance"));
puts(_("-A <x y w h> Average a grey box for white balance"));
puts(_("-r <r g b g> Set custom white balance"));
puts(_("+M/-M Use/don't use an embedded color matrix"));
puts(_("-C <r b> Correct chromatic aberration"));
puts(_("-P <file> Fix the dead pixels listed in this file"));
puts(_("-K <file> Subtract dark frame (16-bit raw PGM)"));
puts(_("-k <num> Set the darkness level"));
puts(_("-S <num> Set the saturation level"));
puts(_("-n <num> Set threshold for wavelet denoising"));
puts(_("-H [0-9] Highlight mode (0=clip, 1=unclip, 2=blend, 3+=rebuild)"));
puts(_("-t [0-7] Flip image (0=none, 3=180, 5=90CCW, 6=90CW)"));
puts(_("-o [0-5] Output colorspace (raw,sRGB,Adobe,Wide,ProPhoto,XYZ)"));
#ifndef NO_LCMS
puts(_("-o <file> Apply output ICC profile from file"));
puts(_("-p <file> Apply camera ICC profile from file or \"embed\""));
#endif
puts(_("-d Document mode (no color, no interpolation)"));
puts(_("-D Document mode without scaling (totally raw)"));
puts(_("-j Don't stretch or rotate raw pixels"));
puts(_("-W Don't automatically brighten the image"));
puts(_("-b <num> Adjust brightness (default = 1.0)"));
puts(_("-g <p ts> Set custom gamma curve (default = 2.222 4.5)"));
puts(_("-q [0-3] Set the interpolation quality"));
puts(_("-h Half-size color image (twice as fast as \"-q 0\")"));
puts(_("-f Interpolate RGGB as four colors"));
puts(_("-m <num> Apply a 3x3 median filter to R-G and B-G"));
puts(_("-s [0..N-1] Select one raw image or \"all\" from each file"));
puts(_("-6 Write 16-bit instead of 8-bit"));
puts(_("-4 Linear 16-bit, same as \"-6 -W -g 1 1\""));
puts(_("-T Write TIFF instead of PPM"));
puts("");
return 1;
}
argv[argc] = "";
for (arg = 1; (((opm = argv[arg][0]) - 2) | 2) == '+';)
{
opt = argv[arg++][1];
if ((cp = (char *)strchr(sp = "nbrkStqmHACg", opt)))
for (i = 0; i < "114111111422"[cp - sp] - '0'; i++)
if (!isdigit(argv[arg + i][0]))
{
fprintf(stderr, _("Non-numeric argument to \"-%c\"\n"), opt);
return 1;
}
switch (opt)
{
case 'n':
threshold = atof(argv[arg++]);
break;
case 'b':
bright = atof(argv[arg++]);
break;
case 'r':
FORC4 user_mul[c] = atof(argv[arg++]);
break;
case 'C':
aber[0] = 1 / atof(argv[arg++]);
aber[2] = 1 / atof(argv[arg++]);
break;
case 'g':
gamm[0] = atof(argv[arg++]);
gamm[1] = atof(argv[arg++]);
if (gamm[0])
gamm[0] = 1 / gamm[0];
break;
case 'k':
user_black = atoi(argv[arg++]);
break;
case 'S':
user_sat = atoi(argv[arg++]);
break;
case 't':
user_flip = atoi(argv[arg++]);
break;
case 'q':
user_qual = atoi(argv[arg++]);
break;
case 'm':
med_passes = atoi(argv[arg++]);
break;
case 'H':
highlight = atoi(argv[arg++]);
break;
case 's':
shot_select = abs(atoi(argv[arg]));
multi_out = !strcmp(argv[arg++], "all");
break;
case 'o':
if (isdigit(argv[arg][0]) && !argv[arg][1])
output_color = atoi(argv[arg++]);
#ifndef NO_LCMS
else
out_profile = argv[arg++];
break;
case 'p':
cam_profile = argv[arg++];
#endif
break;
case 'P':
bpfile = argv[arg++];
break;
case 'K':
dark_frame = argv[arg++];
break;
case 'z':
timestamp_only = 1;
break;
case 'e':
thumbnail_only = 1;
break;
case 'i':
identify_only = 1;
break;
case 'c':
write_to_stdout = 1;
break;
case 'v':
verbose = 1;
break;
case 'h':
half_size = 1;
break;
case 'f':
four_color_rgb = 1;
break;
case 'A':
FORC4 greybox[c] = atoi(argv[arg++]);
case 'a':
use_auto_wb = 1;
break;
case 'w':
use_camera_wb = 1;
break;
case 'M':
use_camera_matrix = 3 * (opm == '+');
break;
case 'I':
read_from_stdin = 1;
break;
case 'E':
document_mode++;
case 'D':
document_mode++;
case 'd':
document_mode++;
case 'j':
use_fuji_rotate = 0;
break;
case 'W':
no_auto_bright = 1;
break;
case 'T':
output_tiff = 1;
break;
case '4':
gamm[0] = gamm[1] = no_auto_bright = 1;
case '6':
output_bps = 16;
break;
default:
fprintf(stderr, _("Unknown option \"-%c\".\n"), opt);
return 1;
}
}
if (arg == argc)
{
fprintf(stderr, _("No files to process.\n"));
return 1;
}
if (write_to_stdout)
{
if (isatty(1))
{
fprintf(stderr, _("Will not write an image to the terminal!\n"));
return 1;
}
#if defined(WIN32) || defined(DJGPP) || defined(__CYGWIN__)
if (setmode(1, O_BINARY) < 0)
{
perror("setmode()");
return 1;
}
#endif
}
for (; arg < argc; arg++)
{
status = 1;
raw_image = 0;
image = 0;
oprof = 0;
meta_data = ofname = 0;
ofp = stdout;
if (setjmp(failure))
{
if (fileno(ifp) > 2)
fclose(ifp);
if (fileno(ofp) > 2)
fclose(ofp);
status = 1;
goto cleanup;
}
ifname = argv[arg];
if (!(ifp = fopen(ifname, "rb")))
{
perror(ifname);
continue;
}
status = (identify(), !is_raw);
if (user_flip >= 0)
flip = user_flip;
switch ((flip + 3600) % 360)
{
case 270:
flip = 5;
break;
case 180:
flip = 3;
break;
case 90:
flip = 6;
}
if (timestamp_only)
{
if ((status = !timestamp))
fprintf(stderr, _("%s has no timestamp.\n"), ifname);
else if (identify_only)
printf("%10ld%10d %s\n", (long)timestamp, shot_order, ifname);
else
{
if (verbose)
fprintf(stderr, _("%s time set to %d.\n"), ifname, (int)timestamp);
ut.actime = ut.modtime = timestamp;
utime(ifname, &ut);
}
goto next;
}
write_fun = &CLASS write_ppm_tiff;
if (thumbnail_only)
{
if ((status = !thumb_offset))
{
fprintf(stderr, _("%s has no thumbnail.\n"), ifname);
goto next;
}
else if (thumb_load_raw)
{
load_raw = thumb_load_raw;
data_offset = thumb_offset;
height = thumb_height;
width = thumb_width;
filters = 0;
colors = 3;
}
else
{
fseek(ifp, thumb_offset, SEEK_SET);
write_fun = write_thumb;
goto thumbnail;
}
}
if (load_raw == &CLASS kodak_ycbcr_load_raw)
{
height += height & 1;
width += width & 1;
}
if (identify_only && verbose && make[0])
{
printf(_("\nFilename: %s\n"), ifname);
printf(_("Timestamp: %s"), ctime(×tamp));
printf(_("Camera: %s %s\n"), make, model);
if (artist[0])
printf(_("Owner: %s\n"), artist);
if (dng_version)
{
printf(_("DNG Version: "));
for (i = 24; i >= 0; i -= 8)
printf("%d%c", dng_version >> i & 255, i ? '.' : '\n');
}
printf(_("ISO speed: %d\n"), (int)iso_speed);
printf(_("Shutter: "));
if (shutter > 0 && shutter < 1)
shutter = (printf("1/"), 1 / shutter);
printf(_("%0.1f sec\n"), shutter);
printf(_("Aperture: f/%0.1f\n"), aperture);
printf(_("Focal length: %0.1f mm\n"), focal_len);
printf(_("Embedded ICC profile: %s\n"), profile_length ? _("yes") : _("no"));
printf(_("Number of raw images: %d\n"), is_raw);
if (pixel_aspect != 1)
printf(_("Pixel Aspect Ratio: %0.6f\n"), pixel_aspect);
if (thumb_offset)
printf(_("Thumb size: %4d x %d\n"), thumb_width, thumb_height);
printf(_("Full size: %4d x %d\n"), raw_width, raw_height);
}
else if (!is_raw)
fprintf(stderr, _("Cannot decode file %s\n"), ifname);
if (!is_raw)
goto next;
shrink = filters && (half_size || (!identify_only && (threshold || aber[0] != 1 || aber[2] != 1)));
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (identify_only)
{
if (verbose)
{
if (document_mode == 3)
{
top_margin = left_margin = fuji_width = 0;
height = raw_height;
width = raw_width;
}
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (use_fuji_rotate)
{
if (fuji_width)
{
fuji_width = (fuji_width - 1 + shrink) >> shrink;
iwidth = fuji_width / sqrt(0.5);
iheight = (iheight - fuji_width) / sqrt(0.5);
}
else
{
if (pixel_aspect < 1)
iheight = iheight / pixel_aspect + 0.5;
if (pixel_aspect > 1)
iwidth = iwidth * pixel_aspect + 0.5;
}
}
if (flip & 4)
SWAP(iheight, iwidth);
printf(_("Image size: %4d x %d\n"), width, height);
printf(_("Output size: %4d x %d\n"), iwidth, iheight);
printf(_("Raw colors: %d"), colors);
if (filters)
{
int fhigh = 2, fwide = 2;
if ((filters ^ (filters >> 8)) & 0xff)
fhigh = 4;
if ((filters ^ (filters >> 16)) & 0xffff)
fhigh = 8;
if (filters == 1)
fhigh = fwide = 16;
if (filters == 9)
fhigh = fwide = 6;
printf(_("\nFilter pattern: "));
for (i = 0; i < fhigh; i++)
for (c = i && putchar('/') && 0; c < fwide; c++)
putchar(cdesc[fcol(i, c)]);
}
printf(_("\nDaylight multipliers:"));
FORCC printf(" %f", pre_mul[c]);
if (cam_mul[0] > 0)
{
printf(_("\nCamera multipliers:"));
FORC4 printf(" %f", cam_mul[c]);
}
putchar('\n');
}
else
printf(_("%s is a %s %s image.\n"), ifname, make, model);
next:
fclose(ifp);
continue;
}
if (meta_length)
{
meta_data = (char *)malloc(meta_length);
merror(meta_data, "main()");
}
if (filters || colors == 1)
{
raw_image = (ushort *)calloc((raw_height + 7), raw_width * 2);
merror(raw_image, "main()");
}
else
{
image = (ushort(*)[4])calloc(iheight, iwidth * sizeof *image);
merror(image, "main()");
}
if (verbose)
fprintf(stderr, _("Loading %s %s image from %s ...\n"), make, model, ifname);
if (shot_select >= is_raw)
fprintf(stderr, _("%s: \"-s %d\" requests a nonexistent image!\n"), ifname, shot_select);
fseeko(ifp, data_offset, SEEK_SET);
if (raw_image && read_from_stdin)
fread(raw_image, 2, raw_height * raw_width, stdin);
else
(*load_raw)();
if (document_mode == 3)
{
top_margin = left_margin = fuji_width = 0;
height = raw_height;
width = raw_width;
}
iheight = (height + shrink) >> shrink;
iwidth = (width + shrink) >> shrink;
if (raw_image)
{
image = (ushort(*)[4])calloc(iheight, iwidth * sizeof *image);
merror(image, "main()");
crop_masked_pixels();
free(raw_image);
}
if (zero_is_bad)
remove_zeroes();
bad_pixels(bpfile);
if (dark_frame)
subtract(dark_frame);
quality = 2 + !fuji_width;
if (user_qual >= 0)
quality = user_qual;
i = cblack[3];
FORC3 if (i > cblack[c]) i = cblack[c];
FORC4 cblack[c] -= i;
black += i;
i = cblack[6];
FORC(cblack[4] * cblack[5])
if (i > cblack[6 + c])
i = cblack[6 + c];
FORC(cblack[4] * cblack[5])
cblack[6 + c] -= i;
black += i;
if (user_black >= 0)
black = user_black;
FORC4 cblack[c] += black;
if (user_sat > 0)
maximum = user_sat;
#ifdef COLORCHECK
colorcheck();
#endif
if (is_foveon)
{
if (document_mode || load_raw == &CLASS foveon_dp_load_raw)
{
for (i = 0; i < height * width * 4; i++)
if ((short)image[0][i] < 0)
image[0][i] = 0;
}
else
foveon_interpolate();
}
else if (document_mode < 2)
scale_colors();
pre_interpolate();
if (filters && !document_mode)
{
if (quality == 0)
lin_interpolate();
else if (quality == 1 || colors > 3)
vng_interpolate();
else if (quality == 2 && filters > 1000)
ppg_interpolate();
else if (filters == 9)
xtrans_interpolate(quality * 2 - 3);
else
ahd_interpolate();
}
if (mix_green)
for (colors = 3, i = 0; i < height * width; i++)
image[i][1] = (image[i][1] + image[i][3]) >> 1;
if (!is_foveon && colors == 3)
median_filter();
if (!is_foveon && highlight == 2)
blend_highlights();
if (!is_foveon && highlight > 2)
recover_highlights();
if (use_fuji_rotate)
fuji_rotate();
#ifndef NO_LCMS
if (cam_profile)
apply_profile(cam_profile, out_profile);
#endif
convert_to_rgb();
if (use_fuji_rotate)
stretch();
thumbnail:
if (write_fun == &CLASS jpeg_thumb)
write_ext = ".jpg";
else if (output_tiff && write_fun == &CLASS write_ppm_tiff)
write_ext = ".tiff";
else
write_ext = ".pgm\0.ppm\0.ppm\0.pam" + colors * 5 - 5;
ofname = (char *)malloc(strlen(ifname) + 64);
merror(ofname, "main()");
if (write_to_stdout)
strcpy(ofname, _("standard output"));
else
{
strcpy(ofname, ifname);
if ((cp = strrchr(ofname, '.')))
*cp = 0;
if (multi_out)
sprintf(ofname + strlen(ofname), "_%0*d", snprintf(0, 0, "%d", is_raw - 1), shot_select);
if (thumbnail_only)
strcat(ofname, ".thumb");
strcat(ofname, write_ext);
ofp = fopen(ofname, "wb");
if (!ofp)
{
status = 1;
perror(ofname);
goto cleanup;
}
}
if (verbose)
fprintf(stderr, _("Writing data to %s ...\n"), ofname);
(*write_fun)();
fclose(ifp);
if (ofp != stdout)
fclose(ofp);
cleanup:
if (meta_data)
free(meta_data);
if (ofname)
free(ofname);
if (oprof)
free(oprof);
if (image)
free(image);
if (multi_out)
{
if (++shot_select < is_raw)
arg--;
else
shot_select = 0;
}
}
return status;
}
#endif
|
GB_unop__lnot_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lnot_fp32_fp32
// op(A') function: GB_unop_tran__lnot_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lnot_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = !(z != 0) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lnot_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB001-antidep1-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A loop with loop-carried anti-dependence.
Data race pair: a[i+1]@64:10 vs. a[i]@64:5
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i;
int len = 1000;
int a[1000];
#pragma omp parallel for simd
for (i=0; i<len; i++)
a[i]= i;
#pragma omp simd
for (i=0;i< len -1 ;i++)
a[i]=a[i+1]+1;
printf ("a[500]=%d\n", a[500] );
return 0;
}
|
racf_fmt_plug.c | /* RACF cracker patch for JtR. Hacked together during March of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com> .
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Thanks to Nigel Pentland <nigel at nigelpentland.net>, author of CRACF for
* providing algorithm details.
*
* Thanks to Main Framed <mainframed767 at gmail.com> for providing test vectors,
* algorithm details and requesting the RACF cracker in the first place.
*
* racfdump format => userid:$racf$*userid*deshash
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_racf;
#elif FMT_REGISTERS_H
john_register_one(&fmt_racf);
#else
#include <openssl/des.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "crc32.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 64
static int omp_t = 1;
#endif
#include "memdbg.h"
#define FORMAT_LABEL "RACF"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "DES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 8
#define CIPHERTEXT_LENGTH 16
#define BINARY_SIZE 8
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static const unsigned char a2e[256] = {
0, 1, 2, 3, 55, 45, 46, 47, 22, 5, 37, 11, 12, 13, 14, 15,
16, 17, 18, 19, 60, 61, 50, 38, 24, 25, 63, 39, 28, 29, 30, 31,
64, 79,127,123, 91,108, 80,125, 77, 93, 92, 78,107, 96, 75, 97,
240,241,242,243,244,245,246,247,248,249,122, 94, 76,126,110,111,
124,193,194,195,196,197,198,199,200,201,209,210,211,212,213,214,
215,216,217,226,227,228,229,230,231,232,233, 74,224, 90, 95,109,
121,129,130,131,132,133,134,135,136,137,145,146,147,148,149,150,
151,152,153,162,163,164,165,166,167,168,169,192,106,208,161, 7,
32, 33, 34, 35, 36, 21, 6, 23, 40, 41, 42, 43, 44, 9, 10, 27,
48, 49, 26, 51, 52, 53, 54, 8, 56, 57, 58, 59, 4, 20, 62,225,
65, 66, 67, 68, 69, 70, 71, 72, 73, 81, 82, 83, 84, 85, 86, 87,
88, 89, 98, 99,100,101,102,103,104,105,112,113,114,115,116,117,
118,119,120,128,138,139,140,141,142,143,144,154,155,156,157,158,
159,160,170,171,172,173,174,175,176,177,178,179,180,181,182,183,
184,185,186,187,188,189,190,191,202,203,204,205,206,207,218,219,
220,221,222,223,234,235,236,237,238,239,250,251,252,253,254,255
};
/* This is a2e[] with each entry XOR 0x55, left-shifted one bit
and finally with odd parity so that DES_set_key_unchecked
can be used directly. This provides about 15% speed up. */
static const unsigned char a2e_precomputed[256] = {
171, 168, 174, 173, 196, 241, 247, 244, 134, 161, 224, 188, 179, 176, 182, 181,
138, 137, 143, 140, 211, 208, 206, 230, 155, 152, 213, 229, 146, 145, 151, 148,
42, 52, 84, 93, 28, 115, 11, 81, 49, 16, 19, 55, 124, 107, 61, 104,
74, 73, 79, 76, 67, 64, 70, 69, 91, 88, 94, 22, 50, 87, 118, 117,
82, 41, 47, 44, 35, 32, 38, 37, 59, 56, 8, 14, 13, 2, 1, 7,
4, 26, 25, 110, 109, 98, 97, 103, 100, 122, 121, 62, 107, 31, 21, 112,
88, 168, 174, 173, 162, 161, 167, 164, 186, 185, 137, 143, 140, 131, 128, 134,
133, 155, 152, 239, 236, 227, 224, 230, 229, 251, 248, 42, 127, 11, 233, 164,
234, 233, 239, 236, 227, 128, 167, 133, 251, 248, 254, 253, 242, 185, 191, 157,
203, 200, 158, 205, 194, 193, 199, 186, 218, 217, 223, 220, 162, 131, 214, 104,
41, 47, 44, 35, 32, 38, 37, 59, 56, 8, 14, 13, 2, 1, 7, 4,
26, 25, 110, 109, 98, 97, 103, 100, 122, 121, 74, 73, 79, 76, 67, 64,
70, 69, 91, 171, 191, 188, 179, 176, 182, 181, 138, 158, 157, 146, 145, 151,
148, 234, 254, 253, 242, 241, 247, 244, 203, 200, 206, 205, 194, 193, 199, 196,
218, 217, 223, 220, 211, 208, 214, 213, 62, 61, 50, 49, 55, 52, 31, 28,
19, 16, 22, 21, 127, 124, 115, 112, 118, 117, 94, 93, 82, 81, 87, 84
};
/* in-place ascii2ebcdic conversion */
static void ascii2ebcdic(unsigned char *str)
{
int i;
int n = strlen((const char*)str);
for (i = 0; i < n; ++i)
str[i] = a2e[str[i]];
}
/* replace missing characters in userid by EBCDIC spaces (0x40) */
static void process_userid(unsigned char *str)
{
int i;
for (i = strlen((const char*)str); i < 8; ++i)
str[i] = 0x40;
str[8] = 0; /* terminate string */
}
#ifdef RACF_DEBUG
static void print_hex(unsigned char *str, int len)
{
int i;
for (i = 0; i < len; ++i)
printf("%02x", str[i]);
printf("\n");
}
#endif
static struct fmt_tests racf_tests[] = {
{"$racf$*AAAAAAA*CA2E330B2FD1820E", "AAAAAAAA"},
{"$racf$*AAAAAAAA*062314297C496E0E", "AAAAAAAA"},
{"$racf$*JJJJJJJJ*8B5F0B1D0826D927", "TESTTEST"},
{"$racf$*TTTTTTTT*424B258AF8B9061B", "TESTTEST"},
{"$racf$*A*0F7DE80335E8ED68", "A"},
{"$racf$*OPEN3*EC76FC0DEF5B0A83", "SYS1"},
{"$racf$*TESTTEST*0FF48804F759193F", "TESTTEST"},
{"$racf$*SYSOPR*83845F8EEC7C20D8", "SYSOPR"},
{"$racf$*TCPIP*657889CD0F5D40DF", "SYS1"},
{"$racf$*TESTER*E05AB770EA048421", "TEST"},
{NULL}
};
static struct custom_salt {
unsigned char userid[8 + 1];
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p, *q;
int res;
if (strncmp(ciphertext, "$racf$*", 7))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 7;
p = strtok(ctcopy, "*"); /* username */
if(!p)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* hash */
goto err;
q = p;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
res = !*q && q - p == CIPHERTEXT_LENGTH;
MEM_FREE(keeptr);
return res;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy, *username;
static struct custom_salt cs;
ctcopy += 7; /* skip over "$racf$*" */
username = strtok(ctcopy, "*");
/* process username */
strncpy((char*)cs.userid, username, 8);
cs.userid[8] = 0; // terminate username at 8 bytes
ascii2ebcdic(cs.userid);
process_userid(cs.userid);
#ifdef RACF_DEBUG
printf("userid in EBCDIC : ");
print_hex(cs.userid, 8);
#endif
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
DES_cblock des_key;
DES_key_schedule schedule;
DES_cblock ivec;
int i;
/* process key */
for(i = 0; saved_key[index][i]; i++)
des_key[i] = a2e_precomputed[ARCH_INDEX(saved_key[index][i])];
/* replace missing characters in userid by (EBCDIC space (0x40) XOR 0x55) << 1 */
while(i < 8)
des_key[i++] = 0x2a;
DES_set_key_unchecked(&des_key, &schedule);
/* do encryption */
memset(ivec, 0, 8);
DES_cbc_encrypt(cur_salt->userid, (unsigned char*)crypt_out[index], 8, &schedule, &ivec, DES_ENCRYPT);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void racf_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > 8)
saved_key_length = 8;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_racf = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
racf_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
racf_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
rose_Stress-1.c | //#include <float.h>
//#include <math.h>
#define MIN(a, b) ( (a < b) ? a : b)
#define MAX(a, b) ( (a > b) ? a : b)
#include "omp.h"
typedef double real8;
void StressCheckEpsFail(real8 *newSxx,real8 *newSyy,real8 *newSzz,real8 *newTxy,real8 *newTxz,real8 *newTyz,real8 *eps,real8 eps_failure_model,const int *zoneset,int length)
{
int i;
int index;
#pragma omp parallel for private (index,i) firstprivate (eps_failure_model,length)
for (i = 0; i <= length - 1; i += 1) {
index = zoneset[i];
if (eps[zoneset[i]] > eps_failure_model) {
newSxx[i] = 0.0;
newSyy[i] = 0.0;
newSzz[i] = 0.0;
newTxy[i] = 0.0;
newTxz[i] = 0.0;
newTyz[i] = 0.0;
eps[zoneset[i]] = eps_failure_model * 1.01;
}
}
}
void StressStrainWork(real8 *deltz,real8 *delts,const real8 *newSxx,const real8 *newSyy,const real8 *newSzz,const real8 *newTxy,const real8 *newTxz,const real8 *newTyz,const real8 *sxx,const real8 *syy,const real8 *txy,const real8 *txz,const real8 *tyz,const real8 *dxx,const real8 *dyy,const real8 *dzz,const real8 *dxy,const real8 *dxz,const real8 *dyz,real8 deltaTime,const int *zoneset,const real8 *vc,const real8 *vnewc,int length)
{
int i;
int index;
real8 quarterDelta = 0.25 * deltaTime;
real8 szz;
#pragma omp parallel for private (index,szz,i) firstprivate (length,quarterDelta)
for (i = 0; i <= length - 1; i += 1) {
index = zoneset[i];
szz = -sxx[zoneset[i]] - syy[zoneset[i]];
deltz[zoneset[i]] += quarterDelta * (vnewc[i] + vc[i]) * (dxx[zoneset[i]] * (sxx[zoneset[i]] + newSxx[i]) + dyy[zoneset[i]] * (syy[zoneset[i]] + newSyy[i]) + dzz[zoneset[i]] * (szz + newSzz[i]) + 2. * dxy[zoneset[i]] * (txy[zoneset[i]] + newTxy[i]) + 2. * dxz[zoneset[i]] * (txz[zoneset[i]] + newTxz[i]) + 2. * dyz[zoneset[i]] * (tyz[zoneset[i]] + newTyz[i]));
delts[i] += quarterDelta * (vnewc[i] + vc[i]) * (dxx[zoneset[i]] * sxx[zoneset[i]] + dyy[zoneset[i]] * syy[zoneset[i]] + dzz[zoneset[i]] * szz + 2. * dxy[zoneset[i]] * txy[zoneset[i]] + 2. * dxz[zoneset[i]] * txz[zoneset[i]] + 2. * dyz[zoneset[i]] * tyz[zoneset[i]]);
}
}
void StressStrainHeat(const real8 *deltz,real8 *deltzh,real8 *deltrh,const real8 *shearMod,const real8 *shearRatio,const real8 *shearDer,const real8 *newSxx,const real8 *newSyy,const real8 *newSzz,const real8 *newTxy,const real8 *newTxz,const real8 *newTyz,const real8 *sxx,const real8 *syy,const real8 *txy,const real8 *txz,const real8 *tyz,real8 deltaTime,const int *zoneset,const real8 *vc,const real8 *vnewc,int length)
{
real8 shearr;
real8 sheari;
real8 avgMod;
int nz;
int i;
/* Quiet the compiler - unused argument */
deltaTime = deltaTime;
#pragma omp parallel for private (shearr,sheari,avgMod,nz,i) firstprivate (length)
for (i = 0; i <= length - 1; i += 1) {
nz = zoneset[i];
shearr = 0.5 * shearRatio[i];
if (shearMod[zoneset[i]] > 0.) {
sheari = 0.5 / shearMod[zoneset[i]];
deltrh[zoneset[i]] = 0.25 * (vnewc[i] + vc[i]) * ((newSxx[i] * sheari - sxx[zoneset[i]] * shearr) * (sxx[zoneset[i]] + newSxx[i]) + (newSyy[i] * sheari - syy[zoneset[i]] * shearr) * (syy[zoneset[i]] + newSyy[i]) + (newSzz[i] * sheari + (syy[zoneset[i]] + sxx[zoneset[i]]) * shearr) * (newSzz[i] - sxx[zoneset[i]] - syy[zoneset[i]]) + 2. * (newTxy[i] * sheari - txy[zoneset[i]] * shearr) * (txy[zoneset[i]] + newTxy[i]) + 2. * (newTxz[i] * sheari - txz[zoneset[i]] * shearr) * (txz[zoneset[i]] + newTxz[i]) + 2. * (newTyz[i] * sheari - tyz[zoneset[i]] * shearr) * (tyz[zoneset[i]] + newTyz[i]));
}
else {
deltrh[zoneset[i]] = - 0.25 * (vnewc[i] + vc[i]) * (sxx[zoneset[i]] * (sxx[zoneset[i]] + newSxx[i]) + syy[zoneset[i]] * (syy[zoneset[i]] + newSyy[i]) - (syy[zoneset[i]] + sxx[zoneset[i]]) * (newSzz[i] - sxx[zoneset[i]] - syy[zoneset[i]]) + 2. * txy[zoneset[i]] * (txy[zoneset[i]] + newTxy[i]) + 2. * txz[zoneset[i]] * (txz[zoneset[i]] + newTxz[i]) + 2. * tyz[zoneset[i]] * (tyz[zoneset[i]] + newTyz[i])) * shearr;
}
deltzh[zoneset[i]] = deltz[zoneset[i]] - deltrh[zoneset[i]];
avgMod = 0.5 * shearMod[zoneset[i]];
if (shearRatio[i] > 0.) {
avgMod = avgMod + 0.5 / shearRatio[i];
}
if (avgMod > 0.) {
deltrh[zoneset[i]] = shearDer[i] * deltrh[zoneset[i]] / avgMod;
}
else {
deltrh[zoneset[i]] = 0.0;
}
}
}
|
GB_binop__bxor_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_01__bxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__bxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__bxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bxor_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bxor_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_int16)
// C=scalar+B GB (_bind1st__bxor_int16)
// C=scalar+B' GB (_bind1st_tran__bxor_int16)
// C=A+scalar GB (_bind2nd__bxor_int16)
// C=A'+scalar GB (_bind2nd_tran__bxor_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij) ^ (bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) ^ (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXOR || GxB_NO_INT16 || GxB_NO_BXOR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bxor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxor_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxor_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bxor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bxor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxor_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxor_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) ^ (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxor_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) ^ (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) ^ (aij) ; \
}
GrB_Info GB (_bind1st_tran__bxor_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) ^ (y) ; \
}
GrB_Info GB (_bind2nd_tran__bxor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gimplify.c | /* Tree lowering pass. This pass converts the GENERIC functions-as-trees
tree representation into the GIMPLE form.
Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
2012 Free Software Foundation, Inc.
Major work done by Sebastian Pop <s.pop@laposte.net>,
Diego Novillo <dnovillo@redhat.com> and Jason Merrill <jason@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "gimple.h"
#include "tree-iterator.h"
#include "tree-inline.h"
#include "tree-pretty-print.h"
#include "langhooks.h"
#include "tree-flow.h"
#include "cgraph.h"
#include "timevar.h"
#include "hashtab.h"
#include "flags.h"
#include "function.h"
#include "output.h"
#include "ggc.h"
#include "diagnostic-core.h"
#include "target.h"
#include "pointer-set.h"
#include "splay-tree.h"
#include "vec.h"
#include "gimple.h"
#include "tree-pass.h"
#include "langhooks-def.h" /* FIXME: for lhd_set_decl_assembler_name. */
#include "expr.h" /* FIXME: for can_move_by_pieces
and STACK_CHECK_MAX_VAR_SIZE. */
enum gimplify_omp_var_data
{
GOVD_SEEN = 1,
GOVD_EXPLICIT = 2,
GOVD_SHARED = 4,
GOVD_PRIVATE = 8,
GOVD_FIRSTPRIVATE = 16,
GOVD_LASTPRIVATE = 32,
GOVD_REDUCTION = 64,
GOVD_LOCAL = 128,
GOVD_DEBUG_PRIVATE = 256,
GOVD_PRIVATE_OUTER_REF = 512,
GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE
| GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LOCAL)
};
enum omp_region_type
{
ORT_WORKSHARE = 0,
ORT_PARALLEL = 2,
ORT_COMBINED_PARALLEL = 3,
ORT_TASK = 4,
ORT_UNTIED_TASK = 5
};
struct gimplify_omp_ctx
{
struct gimplify_omp_ctx *outer_context;
splay_tree variables;
struct pointer_set_t *privatized_types;
location_t location;
enum omp_clause_default_kind default_kind;
enum omp_region_type region_type;
};
static struct gimplify_ctx *gimplify_ctxp;
static struct gimplify_omp_ctx *gimplify_omp_ctxp;
/* Formal (expression) temporary table handling: multiple occurrences of
the same scalar expression are evaluated into the same temporary. */
typedef struct gimple_temp_hash_elt
{
tree val; /* Key */
tree temp; /* Value */
} elt_t;
/* Forward declaration. */
static enum gimplify_status gimplify_compound_expr (tree *, gimple_seq *, bool);
/* Mark X addressable. Unlike the langhook we expect X to be in gimple
form and we don't do any syntax checking. */
void
mark_addressable (tree x)
{
while (handled_component_p (x))
x = TREE_OPERAND (x, 0);
if (TREE_CODE (x) == MEM_REF
&& TREE_CODE (TREE_OPERAND (x, 0)) == ADDR_EXPR)
x = TREE_OPERAND (TREE_OPERAND (x, 0), 0);
if (TREE_CODE (x) != VAR_DECL
&& TREE_CODE (x) != PARM_DECL
&& TREE_CODE (x) != RESULT_DECL)
return;
TREE_ADDRESSABLE (x) = 1;
}
/* Return a hash value for a formal temporary table entry. */
static hashval_t
gimple_tree_hash (const void *p)
{
tree t = ((const elt_t *) p)->val;
return iterative_hash_expr (t, 0);
}
/* Compare two formal temporary table entries. */
static int
gimple_tree_eq (const void *p1, const void *p2)
{
tree t1 = ((const elt_t *) p1)->val;
tree t2 = ((const elt_t *) p2)->val;
enum tree_code code = TREE_CODE (t1);
if (TREE_CODE (t2) != code
|| TREE_TYPE (t1) != TREE_TYPE (t2))
return 0;
if (!operand_equal_p (t1, t2, 0))
return 0;
#ifdef ENABLE_CHECKING
/* Only allow them to compare equal if they also hash equal; otherwise
results are nondeterminate, and we fail bootstrap comparison. */
gcc_assert (gimple_tree_hash (p1) == gimple_tree_hash (p2));
#endif
return 1;
}
/* Link gimple statement GS to the end of the sequence *SEQ_P. If
*SEQ_P is NULL, a new sequence is allocated. This function is
similar to gimple_seq_add_stmt, but does not scan the operands.
During gimplification, we need to manipulate statement sequences
before the def/use vectors have been constructed. */
void
gimple_seq_add_stmt_without_update (gimple_seq *seq_p, gimple gs)
{
gimple_stmt_iterator si;
if (gs == NULL)
return;
if (*seq_p == NULL)
*seq_p = gimple_seq_alloc ();
si = gsi_last (*seq_p);
gsi_insert_after_without_update (&si, gs, GSI_NEW_STMT);
}
/* Shorter alias name for the above function for use in gimplify.c
only. */
static inline void
gimplify_seq_add_stmt (gimple_seq *seq_p, gimple gs)
{
gimple_seq_add_stmt_without_update (seq_p, gs);
}
/* Append sequence SRC to the end of sequence *DST_P. If *DST_P is
NULL, a new sequence is allocated. This function is
similar to gimple_seq_add_seq, but does not scan the operands.
During gimplification, we need to manipulate statement sequences
before the def/use vectors have been constructed. */
static void
gimplify_seq_add_seq (gimple_seq *dst_p, gimple_seq src)
{
gimple_stmt_iterator si;
if (src == NULL)
return;
if (*dst_p == NULL)
*dst_p = gimple_seq_alloc ();
si = gsi_last (*dst_p);
gsi_insert_seq_after_without_update (&si, src, GSI_NEW_STMT);
}
/* Set up a context for the gimplifier. */
void
push_gimplify_context (struct gimplify_ctx *c)
{
memset (c, '\0', sizeof (*c));
c->prev_context = gimplify_ctxp;
gimplify_ctxp = c;
}
/* Tear down a context for the gimplifier. If BODY is non-null, then
put the temporaries into the outer BIND_EXPR. Otherwise, put them
in the local_decls.
BODY is not a sequence, but the first tuple in a sequence. */
void
pop_gimplify_context (gimple body)
{
struct gimplify_ctx *c = gimplify_ctxp;
gcc_assert (c && (c->bind_expr_stack == NULL
|| VEC_empty (gimple, c->bind_expr_stack)));
VEC_free (gimple, heap, c->bind_expr_stack);
gimplify_ctxp = c->prev_context;
if (body)
declare_vars (c->temps, body, false);
else
record_vars (c->temps);
if (c->temp_htab)
htab_delete (c->temp_htab);
}
/* Push a GIMPLE_BIND tuple onto the stack of bindings. */
static void
gimple_push_bind_expr (gimple gimple_bind)
{
if (gimplify_ctxp->bind_expr_stack == NULL)
gimplify_ctxp->bind_expr_stack = VEC_alloc (gimple, heap, 8);
VEC_safe_push (gimple, heap, gimplify_ctxp->bind_expr_stack, gimple_bind);
}
/* Pop the first element off the stack of bindings. */
static void
gimple_pop_bind_expr (void)
{
VEC_pop (gimple, gimplify_ctxp->bind_expr_stack);
}
/* Return the first element of the stack of bindings. */
gimple
gimple_current_bind_expr (void)
{
return VEC_last (gimple, gimplify_ctxp->bind_expr_stack);
}
/* Return the stack of bindings created during gimplification. */
VEC(gimple, heap) *
gimple_bind_expr_stack (void)
{
return gimplify_ctxp->bind_expr_stack;
}
/* Return true iff there is a COND_EXPR between us and the innermost
CLEANUP_POINT_EXPR. This info is used by gimple_push_cleanup. */
static bool
gimple_conditional_context (void)
{
return gimplify_ctxp->conditions > 0;
}
/* Note that we've entered a COND_EXPR. */
static void
gimple_push_condition (void)
{
#ifdef ENABLE_GIMPLE_CHECKING
if (gimplify_ctxp->conditions == 0)
gcc_assert (gimple_seq_empty_p (gimplify_ctxp->conditional_cleanups));
#endif
++(gimplify_ctxp->conditions);
}
/* Note that we've left a COND_EXPR. If we're back at unconditional scope
now, add any conditional cleanups we've seen to the prequeue. */
static void
gimple_pop_condition (gimple_seq *pre_p)
{
int conds = --(gimplify_ctxp->conditions);
gcc_assert (conds >= 0);
if (conds == 0)
{
gimplify_seq_add_seq (pre_p, gimplify_ctxp->conditional_cleanups);
gimplify_ctxp->conditional_cleanups = NULL;
}
}
/* A stable comparison routine for use with splay trees and DECLs. */
static int
splay_tree_compare_decl_uid (splay_tree_key xa, splay_tree_key xb)
{
tree a = (tree) xa;
tree b = (tree) xb;
return DECL_UID (a) - DECL_UID (b);
}
/* Create a new omp construct that deals with variable remapping. */
static struct gimplify_omp_ctx *
new_omp_context (enum omp_region_type region_type)
{
struct gimplify_omp_ctx *c;
c = XCNEW (struct gimplify_omp_ctx);
c->outer_context = gimplify_omp_ctxp;
c->variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0);
c->privatized_types = pointer_set_create ();
c->location = input_location;
c->region_type = region_type;
if ((region_type & ORT_TASK) == 0)
c->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
else
c->default_kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
return c;
}
/* Destroy an omp construct that deals with variable remapping. */
static void
delete_omp_context (struct gimplify_omp_ctx *c)
{
splay_tree_delete (c->variables);
pointer_set_destroy (c->privatized_types);
XDELETE (c);
}
static void omp_add_variable (struct gimplify_omp_ctx *, tree, unsigned int);
static bool omp_notice_variable (struct gimplify_omp_ctx *, tree, bool);
/* Both gimplify the statement T and append it to *SEQ_P. This function
behaves exactly as gimplify_stmt, but you don't have to pass T as a
reference. */
void
gimplify_and_add (tree t, gimple_seq *seq_p)
{
gimplify_stmt (&t, seq_p);
}
/* Gimplify statement T into sequence *SEQ_P, and return the first
tuple in the sequence of generated tuples for this statement.
Return NULL if gimplifying T produced no tuples. */
static gimple
gimplify_and_return_first (tree t, gimple_seq *seq_p)
{
gimple_stmt_iterator last = gsi_last (*seq_p);
gimplify_and_add (t, seq_p);
if (!gsi_end_p (last))
{
gsi_next (&last);
return gsi_stmt (last);
}
else
return gimple_seq_first_stmt (*seq_p);
}
/* Strip off a legitimate source ending from the input string NAME of
length LEN. Rather than having to know the names used by all of
our front ends, we strip off an ending of a period followed by
up to five characters. (Java uses ".class".) */
static inline void
remove_suffix (char *name, int len)
{
int i;
for (i = 2; i < 8 && len > i; i++)
{
if (name[len - i] == '.')
{
name[len - i] = '\0';
break;
}
}
}
/* Create a new temporary name with PREFIX. Return an identifier. */
static GTY(()) unsigned int tmp_var_id_num;
tree
create_tmp_var_name (const char *prefix)
{
char *tmp_name;
if (prefix)
{
char *preftmp = ASTRDUP (prefix);
remove_suffix (preftmp, strlen (preftmp));
clean_symbol_name (preftmp);
prefix = preftmp;
}
ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix ? prefix : "T", tmp_var_id_num++);
return get_identifier (tmp_name);
}
/* Create a new temporary variable declaration of type TYPE.
Do NOT push it into the current binding. */
tree
create_tmp_var_raw (tree type, const char *prefix)
{
tree tmp_var;
tmp_var = build_decl (input_location,
VAR_DECL, prefix ? create_tmp_var_name (prefix) : NULL,
type);
/* The variable was declared by the compiler. */
DECL_ARTIFICIAL (tmp_var) = 1;
/* And we don't want debug info for it. */
DECL_IGNORED_P (tmp_var) = 1;
/* Make the variable writable. */
TREE_READONLY (tmp_var) = 0;
DECL_EXTERNAL (tmp_var) = 0;
TREE_STATIC (tmp_var) = 0;
TREE_USED (tmp_var) = 1;
return tmp_var;
}
/* Create a new temporary variable declaration of type TYPE. DO push the
variable into the current binding. Further, assume that this is called
only from gimplification or optimization, at which point the creation of
certain types are bugs. */
tree
create_tmp_var (tree type, const char *prefix)
{
tree tmp_var;
/* We don't allow types that are addressable (meaning we can't make copies),
or incomplete. We also used to reject every variable size objects here,
but now support those for which a constant upper bound can be obtained.
The processing for variable sizes is performed in gimple_add_tmp_var,
point at which it really matters and possibly reached via paths not going
through this function, e.g. after direct calls to create_tmp_var_raw. */
gcc_assert (!TREE_ADDRESSABLE (type) && COMPLETE_TYPE_P (type));
tmp_var = create_tmp_var_raw (type, prefix);
gimple_add_tmp_var (tmp_var);
return tmp_var;
}
/* Create a new temporary variable declaration of type TYPE by calling
create_tmp_var and if TYPE is a vector or a complex number, mark the new
temporary as gimple register. */
tree
create_tmp_reg (tree type, const char *prefix)
{
tree tmp;
tmp = create_tmp_var (type, prefix);
if (TREE_CODE (type) == COMPLEX_TYPE
|| TREE_CODE (type) == VECTOR_TYPE)
DECL_GIMPLE_REG_P (tmp) = 1;
return tmp;
}
/* Create a temporary with a name derived from VAL. Subroutine of
lookup_tmp_var; nobody else should call this function. */
static inline tree
create_tmp_from_val (tree val)
{
/* Drop all qualifiers and address-space information from the value type. */
return create_tmp_var (TYPE_MAIN_VARIANT (TREE_TYPE (val)), get_name (val));
}
/* Create a temporary to hold the value of VAL. If IS_FORMAL, try to reuse
an existing expression temporary. */
static tree
lookup_tmp_var (tree val, bool is_formal)
{
tree ret;
/* If not optimizing, never really reuse a temporary. local-alloc
won't allocate any variable that is used in more than one basic
block, which means it will go into memory, causing much extra
work in reload and final and poorer code generation, outweighing
the extra memory allocation here. */
if (!optimize || !is_formal || TREE_SIDE_EFFECTS (val))
ret = create_tmp_from_val (val);
else
{
elt_t elt, *elt_p;
void **slot;
elt.val = val;
if (gimplify_ctxp->temp_htab == NULL)
gimplify_ctxp->temp_htab
= htab_create (1000, gimple_tree_hash, gimple_tree_eq, free);
slot = htab_find_slot (gimplify_ctxp->temp_htab, (void *)&elt, INSERT);
if (*slot == NULL)
{
elt_p = XNEW (elt_t);
elt_p->val = val;
elt_p->temp = ret = create_tmp_from_val (val);
*slot = (void *) elt_p;
}
else
{
elt_p = (elt_t *) *slot;
ret = elt_p->temp;
}
}
return ret;
}
/* Return true if T is a CALL_EXPR or an expression that can be
assigned to a temporary. Note that this predicate should only be
used during gimplification. See the rationale for this in
gimplify_modify_expr. */
static bool
is_gimple_reg_rhs_or_call (tree t)
{
return (get_gimple_rhs_class (TREE_CODE (t)) != GIMPLE_INVALID_RHS
|| TREE_CODE (t) == CALL_EXPR);
}
/* Return true if T is a valid memory RHS or a CALL_EXPR. Note that
this predicate should only be used during gimplification. See the
rationale for this in gimplify_modify_expr. */
static bool
is_gimple_mem_rhs_or_call (tree t)
{
/* If we're dealing with a renamable type, either source or dest must be
a renamed variable. */
if (is_gimple_reg_type (TREE_TYPE (t)))
return is_gimple_val (t);
else
return (is_gimple_val (t) || is_gimple_lvalue (t)
|| TREE_CODE (t) == CALL_EXPR);
}
/* Helper for get_formal_tmp_var and get_initialized_tmp_var. */
static tree
internal_get_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p,
bool is_formal)
{
tree t, mod;
/* Notice that we explicitly allow VAL to be a CALL_EXPR so that we
can create an INIT_EXPR and convert it into a GIMPLE_CALL below. */
gimplify_expr (&val, pre_p, post_p, is_gimple_reg_rhs_or_call,
fb_rvalue);
t = lookup_tmp_var (val, is_formal);
if (is_formal
&& (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE))
DECL_GIMPLE_REG_P (t) = 1;
mod = build2 (INIT_EXPR, TREE_TYPE (t), t, unshare_expr (val));
SET_EXPR_LOCATION (mod, EXPR_LOC_OR_HERE (val));
/* gimplify_modify_expr might want to reduce this further. */
gimplify_and_add (mod, pre_p);
ggc_free (mod);
/* If we're gimplifying into ssa, gimplify_modify_expr will have
given our temporary an SSA name. Find and return it. */
if (gimplify_ctxp->into_ssa)
{
gimple last = gimple_seq_last_stmt (*pre_p);
t = gimple_get_lhs (last);
}
return t;
}
/* Return a formal temporary variable initialized with VAL. PRE_P is as
in gimplify_expr. Only use this function if:
1) The value of the unfactored expression represented by VAL will not
change between the initialization and use of the temporary, and
2) The temporary will not be otherwise modified.
For instance, #1 means that this is inappropriate for SAVE_EXPR temps,
and #2 means it is inappropriate for && temps.
For other cases, use get_initialized_tmp_var instead. */
tree
get_formal_tmp_var (tree val, gimple_seq *pre_p)
{
return internal_get_tmp_var (val, pre_p, NULL, true);
}
/* Return a temporary variable initialized with VAL. PRE_P and POST_P
are as in gimplify_expr. */
tree
get_initialized_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p)
{
return internal_get_tmp_var (val, pre_p, post_p, false);
}
/* Declare all the variables in VARS in SCOPE. If DEBUG_INFO is true,
generate debug info for them; otherwise don't. */
void
declare_vars (tree vars, gimple scope, bool debug_info)
{
tree last = vars;
if (last)
{
tree temps, block;
gcc_assert (gimple_code (scope) == GIMPLE_BIND);
temps = nreverse (last);
block = gimple_bind_block (scope);
gcc_assert (!block || TREE_CODE (block) == BLOCK);
if (!block || !debug_info)
{
DECL_CHAIN (last) = gimple_bind_vars (scope);
gimple_bind_set_vars (scope, temps);
}
else
{
/* We need to attach the nodes both to the BIND_EXPR and to its
associated BLOCK for debugging purposes. The key point here
is that the BLOCK_VARS of the BIND_EXPR_BLOCK of a BIND_EXPR
is a subchain of the BIND_EXPR_VARS of the BIND_EXPR. */
if (BLOCK_VARS (block))
BLOCK_VARS (block) = chainon (BLOCK_VARS (block), temps);
else
{
gimple_bind_set_vars (scope,
chainon (gimple_bind_vars (scope), temps));
BLOCK_VARS (block) = temps;
}
}
}
}
/* For VAR a VAR_DECL of variable size, try to find a constant upper bound
for the size and adjust DECL_SIZE/DECL_SIZE_UNIT accordingly. Abort if
no such upper bound can be obtained. */
static void
force_constant_size (tree var)
{
/* The only attempt we make is by querying the maximum size of objects
of the variable's type. */
HOST_WIDE_INT max_size;
gcc_assert (TREE_CODE (var) == VAR_DECL);
max_size = max_int_size_in_bytes (TREE_TYPE (var));
gcc_assert (max_size >= 0);
DECL_SIZE_UNIT (var)
= build_int_cst (TREE_TYPE (DECL_SIZE_UNIT (var)), max_size);
DECL_SIZE (var)
= build_int_cst (TREE_TYPE (DECL_SIZE (var)), max_size * BITS_PER_UNIT);
}
/* Push the temporary variable TMP into the current binding. */
void
gimple_add_tmp_var (tree tmp)
{
gcc_assert (!DECL_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp));
/* Later processing assumes that the object size is constant, which might
not be true at this point. Force the use of a constant upper bound in
this case. */
if (!host_integerp (DECL_SIZE_UNIT (tmp), 1))
force_constant_size (tmp);
DECL_CONTEXT (tmp) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1;
if (gimplify_ctxp)
{
DECL_CHAIN (tmp) = gimplify_ctxp->temps;
gimplify_ctxp->temps = tmp;
/* Mark temporaries local within the nearest enclosing parallel. */
if (gimplify_omp_ctxp)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
while (ctx && ctx->region_type == ORT_WORKSHARE)
ctx = ctx->outer_context;
if (ctx)
omp_add_variable (ctx, tmp, GOVD_LOCAL | GOVD_SEEN);
}
}
else if (cfun)
record_vars (tmp);
else
{
gimple_seq body_seq;
/* This case is for nested functions. We need to expose the locals
they create. */
body_seq = gimple_body (current_function_decl);
declare_vars (tmp, gimple_seq_first_stmt (body_seq), false);
}
}
/* Determine whether to assign a location to the statement GS. */
static bool
should_carry_location_p (gimple gs)
{
/* Don't emit a line note for a label. We particularly don't want to
emit one for the break label, since it doesn't actually correspond
to the beginning of the loop/switch. */
if (gimple_code (gs) == GIMPLE_LABEL)
return false;
return true;
}
/* Return true if a location should not be emitted for this statement
by annotate_one_with_location. */
static inline bool
gimple_do_not_emit_location_p (gimple g)
{
return gimple_plf (g, GF_PLF_1);
}
/* Mark statement G so a location will not be emitted by
annotate_one_with_location. */
static inline void
gimple_set_do_not_emit_location (gimple g)
{
/* The PLF flags are initialized to 0 when a new tuple is created,
so no need to initialize it anywhere. */
gimple_set_plf (g, GF_PLF_1, true);
}
/* Set the location for gimple statement GS to LOCATION. */
static void
annotate_one_with_location (gimple gs, location_t location)
{
if (!gimple_has_location (gs)
&& !gimple_do_not_emit_location_p (gs)
&& should_carry_location_p (gs))
gimple_set_location (gs, location);
}
/* Set LOCATION for all the statements after iterator GSI in sequence
SEQ. If GSI is pointing to the end of the sequence, start with the
first statement in SEQ. */
static void
annotate_all_with_location_after (gimple_seq seq, gimple_stmt_iterator gsi,
location_t location)
{
if (gsi_end_p (gsi))
gsi = gsi_start (seq);
else
gsi_next (&gsi);
for (; !gsi_end_p (gsi); gsi_next (&gsi))
annotate_one_with_location (gsi_stmt (gsi), location);
}
/* Set the location for all the statements in a sequence STMT_P to LOCATION. */
void
annotate_all_with_location (gimple_seq stmt_p, location_t location)
{
gimple_stmt_iterator i;
if (gimple_seq_empty_p (stmt_p))
return;
for (i = gsi_start (stmt_p); !gsi_end_p (i); gsi_next (&i))
{
gimple gs = gsi_stmt (i);
annotate_one_with_location (gs, location);
}
}
/* This page contains routines to unshare tree nodes, i.e. to duplicate tree
nodes that are referenced more than once in GENERIC functions. This is
necessary because gimplification (translation into GIMPLE) is performed
by modifying tree nodes in-place, so gimplication of a shared node in a
first context could generate an invalid GIMPLE form in a second context.
This is achieved with a simple mark/copy/unmark algorithm that walks the
GENERIC representation top-down, marks nodes with TREE_VISITED the first
time it encounters them, duplicates them if they already have TREE_VISITED
set, and finally removes the TREE_VISITED marks it has set.
The algorithm works only at the function level, i.e. it generates a GENERIC
representation of a function with no nodes shared within the function when
passed a GENERIC function (except for nodes that are allowed to be shared).
At the global level, it is also necessary to unshare tree nodes that are
referenced in more than one function, for the same aforementioned reason.
This requires some cooperation from the front-end. There are 2 strategies:
1. Manual unsharing. The front-end needs to call unshare_expr on every
expression that might end up being shared across functions.
2. Deep unsharing. This is an extension of regular unsharing. Instead
of calling unshare_expr on expressions that might be shared across
functions, the front-end pre-marks them with TREE_VISITED. This will
ensure that they are unshared on the first reference within functions
when the regular unsharing algorithm runs. The counterpart is that
this algorithm must look deeper than for manual unsharing, which is
specified by LANG_HOOKS_DEEP_UNSHARING.
If there are only few specific cases of node sharing across functions, it is
probably easier for a front-end to unshare the expressions manually. On the
contrary, if the expressions generated at the global level are as widespread
as expressions generated within functions, deep unsharing is very likely the
way to go. */
/* Similar to copy_tree_r but do not copy SAVE_EXPR or TARGET_EXPR nodes.
These nodes model computations that must be done once. If we were to
unshare something like SAVE_EXPR(i++), the gimplification process would
create wrong code. However, if DATA is non-null, it must hold a pointer
set that is used to unshare the subtrees of these nodes. */
static tree
mostly_copy_tree_r (tree *tp, int *walk_subtrees, void *data)
{
tree t = *tp;
enum tree_code code = TREE_CODE (t);
/* Do not copy SAVE_EXPR, TARGET_EXPR or BIND_EXPR nodes themselves, but
copy their subtrees if we can make sure to do it only once. */
if (code == SAVE_EXPR || code == TARGET_EXPR || code == BIND_EXPR)
{
if (data && !pointer_set_insert ((struct pointer_set_t *)data, t))
;
else
*walk_subtrees = 0;
}
/* Stop at types, decls, constants like copy_tree_r. */
else if (TREE_CODE_CLASS (code) == tcc_type
|| TREE_CODE_CLASS (code) == tcc_declaration
|| TREE_CODE_CLASS (code) == tcc_constant
/* We can't do anything sensible with a BLOCK used as an
expression, but we also can't just die when we see it
because of non-expression uses. So we avert our eyes
and cross our fingers. Silly Java. */
|| code == BLOCK)
*walk_subtrees = 0;
/* Cope with the statement expression extension. */
else if (code == STATEMENT_LIST)
;
/* Leave the bulk of the work to copy_tree_r itself. */
else
copy_tree_r (tp, walk_subtrees, NULL);
return NULL_TREE;
}
/* Callback for walk_tree to unshare most of the shared trees rooted at *TP.
If *TP has been visited already, then *TP is deeply copied by calling
mostly_copy_tree_r. DATA is passed to mostly_copy_tree_r unmodified. */
static tree
copy_if_shared_r (tree *tp, int *walk_subtrees, void *data)
{
tree t = *tp;
enum tree_code code = TREE_CODE (t);
/* Skip types, decls, and constants. But we do want to look at their
types and the bounds of types. Mark them as visited so we properly
unmark their subtrees on the unmark pass. If we've already seen them,
don't look down further. */
if (TREE_CODE_CLASS (code) == tcc_type
|| TREE_CODE_CLASS (code) == tcc_declaration
|| TREE_CODE_CLASS (code) == tcc_constant)
{
if (TREE_VISITED (t))
*walk_subtrees = 0;
else
TREE_VISITED (t) = 1;
}
/* If this node has been visited already, unshare it and don't look
any deeper. */
else if (TREE_VISITED (t))
{
walk_tree (tp, mostly_copy_tree_r, data, NULL);
*walk_subtrees = 0;
}
/* Otherwise, mark the node as visited and keep looking. */
else
TREE_VISITED (t) = 1;
return NULL_TREE;
}
/* Unshare most of the shared trees rooted at *TP. DATA is passed to the
copy_if_shared_r callback unmodified. */
static inline void
copy_if_shared (tree *tp, void *data)
{
walk_tree (tp, copy_if_shared_r, data, NULL);
}
/* Unshare all the trees in the body of FNDECL, as well as in the bodies of
any nested functions. */
static void
unshare_body (tree fndecl)
{
struct cgraph_node *cgn = cgraph_get_node (fndecl);
/* If the language requires deep unsharing, we need a pointer set to make
sure we don't repeatedly unshare subtrees of unshareable nodes. */
struct pointer_set_t *visited
= lang_hooks.deep_unsharing ? pointer_set_create () : NULL;
copy_if_shared (&DECL_SAVED_TREE (fndecl), visited);
copy_if_shared (&DECL_SIZE (DECL_RESULT (fndecl)), visited);
copy_if_shared (&DECL_SIZE_UNIT (DECL_RESULT (fndecl)), visited);
if (visited)
pointer_set_destroy (visited);
if (cgn)
for (cgn = cgn->nested; cgn; cgn = cgn->next_nested)
unshare_body (cgn->decl);
}
/* Callback for walk_tree to unmark the visited trees rooted at *TP.
Subtrees are walked until the first unvisited node is encountered. */
static tree
unmark_visited_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
{
tree t = *tp;
/* If this node has been visited, unmark it and keep looking. */
if (TREE_VISITED (t))
TREE_VISITED (t) = 0;
/* Otherwise, don't look any deeper. */
else
*walk_subtrees = 0;
return NULL_TREE;
}
/* Unmark the visited trees rooted at *TP. */
static inline void
unmark_visited (tree *tp)
{
walk_tree (tp, unmark_visited_r, NULL, NULL);
}
/* Likewise, but mark all trees as not visited. */
static void
unvisit_body (tree fndecl)
{
struct cgraph_node *cgn = cgraph_get_node (fndecl);
unmark_visited (&DECL_SAVED_TREE (fndecl));
unmark_visited (&DECL_SIZE (DECL_RESULT (fndecl)));
unmark_visited (&DECL_SIZE_UNIT (DECL_RESULT (fndecl)));
if (cgn)
for (cgn = cgn->nested; cgn; cgn = cgn->next_nested)
unvisit_body (cgn->decl);
}
/* Unconditionally make an unshared copy of EXPR. This is used when using
stored expressions which span multiple functions, such as BINFO_VTABLE,
as the normal unsharing process can't tell that they're shared. */
tree
unshare_expr (tree expr)
{
walk_tree (&expr, mostly_copy_tree_r, NULL, NULL);
return expr;
}
/* WRAPPER is a code such as BIND_EXPR or CLEANUP_POINT_EXPR which can both
contain statements and have a value. Assign its value to a temporary
and give it void_type_node. Return the temporary, or NULL_TREE if
WRAPPER was already void. */
tree
voidify_wrapper_expr (tree wrapper, tree temp)
{
tree type = TREE_TYPE (wrapper);
if (type && !VOID_TYPE_P (type))
{
tree *p;
/* Set p to point to the body of the wrapper. Loop until we find
something that isn't a wrapper. */
for (p = &wrapper; p && *p; )
{
switch (TREE_CODE (*p))
{
case BIND_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
/* For a BIND_EXPR, the body is operand 1. */
p = &BIND_EXPR_BODY (*p);
break;
case CLEANUP_POINT_EXPR:
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = &TREE_OPERAND (*p, 0);
break;
case STATEMENT_LIST:
{
tree_stmt_iterator i = tsi_last (*p);
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = tsi_end_p (i) ? NULL : tsi_stmt_ptr (i);
}
break;
case COMPOUND_EXPR:
/* Advance to the last statement. Set all container types to
void. */
for (; TREE_CODE (*p) == COMPOUND_EXPR; p = &TREE_OPERAND (*p, 1))
{
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
}
break;
case TRANSACTION_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = &TRANSACTION_EXPR_BODY (*p);
break;
default:
/* Assume that any tree upon which voidify_wrapper_expr is
directly called is a wrapper, and that its body is op0. */
if (p == &wrapper)
{
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = &TREE_OPERAND (*p, 0);
break;
}
goto out;
}
}
out:
if (p == NULL || IS_EMPTY_STMT (*p))
temp = NULL_TREE;
else if (temp)
{
/* The wrapper is on the RHS of an assignment that we're pushing
down. */
gcc_assert (TREE_CODE (temp) == INIT_EXPR
|| TREE_CODE (temp) == MODIFY_EXPR);
TREE_OPERAND (temp, 1) = *p;
*p = temp;
}
else
{
temp = create_tmp_var (type, "retval");
*p = build2 (INIT_EXPR, type, temp, *p);
}
return temp;
}
return NULL_TREE;
}
/* Prepare calls to builtins to SAVE and RESTORE the stack as well as
a temporary through which they communicate. */
static void
build_stack_save_restore (gimple *save, gimple *restore)
{
tree tmp_var;
*save = gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_SAVE), 0);
tmp_var = create_tmp_var (ptr_type_node, "saved_stack");
gimple_call_set_lhs (*save, tmp_var);
*restore
= gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_RESTORE),
1, tmp_var);
}
/* Gimplify a BIND_EXPR. Just voidify and recurse. */
static enum gimplify_status
gimplify_bind_expr (tree *expr_p, gimple_seq *pre_p)
{
tree bind_expr = *expr_p;
bool old_save_stack = gimplify_ctxp->save_stack;
tree t;
gimple gimple_bind;
gimple_seq body, cleanup;
gimple stack_save;
tree temp = voidify_wrapper_expr (bind_expr, NULL);
/* Mark variables seen in this bind expr. */
for (t = BIND_EXPR_VARS (bind_expr); t ; t = DECL_CHAIN (t))
{
if (TREE_CODE (t) == VAR_DECL)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
/* Mark variable as local. */
if (ctx && !DECL_EXTERNAL (t)
&& (! DECL_SEEN_IN_BIND_EXPR_P (t)
|| splay_tree_lookup (ctx->variables,
(splay_tree_key) t) == NULL))
omp_add_variable (gimplify_omp_ctxp, t, GOVD_LOCAL | GOVD_SEEN);
DECL_SEEN_IN_BIND_EXPR_P (t) = 1;
if (DECL_HARD_REGISTER (t) && !is_global_var (t) && cfun)
cfun->has_local_explicit_reg_vars = true;
}
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (t)
&& (TREE_CODE (t) == VAR_DECL && !DECL_HARD_REGISTER (t))
&& !needs_to_live_in_memory (t))
DECL_GIMPLE_REG_P (t) = 1;
}
gimple_bind = gimple_build_bind (BIND_EXPR_VARS (bind_expr), NULL,
BIND_EXPR_BLOCK (bind_expr));
gimple_push_bind_expr (gimple_bind);
gimplify_ctxp->save_stack = false;
/* Gimplify the body into the GIMPLE_BIND tuple's body. */
body = NULL;
gimplify_stmt (&BIND_EXPR_BODY (bind_expr), &body);
gimple_bind_set_body (gimple_bind, body);
cleanup = NULL;
stack_save = NULL;
if (gimplify_ctxp->save_stack)
{
gimple stack_restore;
/* Save stack on entry and restore it on exit. Add a try_finally
block to achieve this. Note that mudflap depends on the
format of the emitted code: see mx_register_decls(). */
build_stack_save_restore (&stack_save, &stack_restore);
gimplify_seq_add_stmt (&cleanup, stack_restore);
}
/* Add clobbers for all variables that go out of scope. */
for (t = BIND_EXPR_VARS (bind_expr); t ; t = DECL_CHAIN (t))
{
if (TREE_CODE (t) == VAR_DECL
&& !is_global_var (t)
&& DECL_CONTEXT (t) == current_function_decl
&& !DECL_HARD_REGISTER (t)
&& !TREE_THIS_VOLATILE (t)
&& !DECL_HAS_VALUE_EXPR_P (t)
/* Only care for variables that have to be in memory. Others
will be rewritten into SSA names, hence moved to the top-level. */
&& !is_gimple_reg (t))
{
tree clobber = build_constructor (TREE_TYPE (t), NULL);
TREE_THIS_VOLATILE (clobber) = 1;
gimplify_seq_add_stmt (&cleanup, gimple_build_assign (t, clobber));
}
}
if (cleanup)
{
gimple gs;
gimple_seq new_body;
new_body = NULL;
gs = gimple_build_try (gimple_bind_body (gimple_bind), cleanup,
GIMPLE_TRY_FINALLY);
if (stack_save)
gimplify_seq_add_stmt (&new_body, stack_save);
gimplify_seq_add_stmt (&new_body, gs);
gimple_bind_set_body (gimple_bind, new_body);
}
gimplify_ctxp->save_stack = old_save_stack;
gimple_pop_bind_expr ();
gimplify_seq_add_stmt (pre_p, gimple_bind);
if (temp)
{
*expr_p = temp;
return GS_OK;
}
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* Gimplify a RETURN_EXPR. If the expression to be returned is not a
GIMPLE value, it is assigned to a new temporary and the statement is
re-written to return the temporary.
PRE_P points to the sequence where side effects that must happen before
STMT should be stored. */
static enum gimplify_status
gimplify_return_expr (tree stmt, gimple_seq *pre_p)
{
gimple ret;
tree ret_expr = TREE_OPERAND (stmt, 0);
tree result_decl, result;
if (ret_expr == error_mark_node)
return GS_ERROR;
if (!ret_expr
|| TREE_CODE (ret_expr) == RESULT_DECL
|| ret_expr == error_mark_node)
{
gimple ret = gimple_build_return (ret_expr);
gimple_set_no_warning (ret, TREE_NO_WARNING (stmt));
gimplify_seq_add_stmt (pre_p, ret);
return GS_ALL_DONE;
}
if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl))))
result_decl = NULL_TREE;
else
{
result_decl = TREE_OPERAND (ret_expr, 0);
/* See through a return by reference. */
if (TREE_CODE (result_decl) == INDIRECT_REF)
result_decl = TREE_OPERAND (result_decl, 0);
gcc_assert ((TREE_CODE (ret_expr) == MODIFY_EXPR
|| TREE_CODE (ret_expr) == INIT_EXPR)
&& TREE_CODE (result_decl) == RESULT_DECL);
}
/* If aggregate_value_p is true, then we can return the bare RESULT_DECL.
Recall that aggregate_value_p is FALSE for any aggregate type that is
returned in registers. If we're returning values in registers, then
we don't want to extend the lifetime of the RESULT_DECL, particularly
across another call. In addition, for those aggregates for which
hard_function_value generates a PARALLEL, we'll die during normal
expansion of structure assignments; there's special code in expand_return
to handle this case that does not exist in expand_expr. */
if (!result_decl)
result = NULL_TREE;
else if (aggregate_value_p (result_decl, TREE_TYPE (current_function_decl)))
{
if (TREE_CODE (DECL_SIZE (result_decl)) != INTEGER_CST)
{
if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (result_decl)))
gimplify_type_sizes (TREE_TYPE (result_decl), pre_p);
/* Note that we don't use gimplify_vla_decl because the RESULT_DECL
should be effectively allocated by the caller, i.e. all calls to
this function must be subject to the Return Slot Optimization. */
gimplify_one_sizepos (&DECL_SIZE (result_decl), pre_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (result_decl), pre_p);
}
result = result_decl;
}
else if (gimplify_ctxp->return_temp)
result = gimplify_ctxp->return_temp;
else
{
result = create_tmp_reg (TREE_TYPE (result_decl), NULL);
/* ??? With complex control flow (usually involving abnormal edges),
we can wind up warning about an uninitialized value for this. Due
to how this variable is constructed and initialized, this is never
true. Give up and never warn. */
TREE_NO_WARNING (result) = 1;
gimplify_ctxp->return_temp = result;
}
/* Smash the lhs of the MODIFY_EXPR to the temporary we plan to use.
Then gimplify the whole thing. */
if (result != result_decl)
TREE_OPERAND (ret_expr, 0) = result;
gimplify_and_add (TREE_OPERAND (stmt, 0), pre_p);
ret = gimple_build_return (result);
gimple_set_no_warning (ret, TREE_NO_WARNING (stmt));
gimplify_seq_add_stmt (pre_p, ret);
return GS_ALL_DONE;
}
/* Gimplify a variable-length array DECL. */
static void
gimplify_vla_decl (tree decl, gimple_seq *seq_p)
{
/* This is a variable-sized decl. Simplify its size and mark it
for deferred expansion. Note that mudflap depends on the format
of the emitted code: see mx_register_decls(). */
tree t, addr, ptr_type;
gimplify_one_sizepos (&DECL_SIZE (decl), seq_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), seq_p);
/* All occurrences of this decl in final gimplified code will be
replaced by indirection. Setting DECL_VALUE_EXPR does two
things: First, it lets the rest of the gimplifier know what
replacement to use. Second, it lets the debug info know
where to find the value. */
ptr_type = build_pointer_type (TREE_TYPE (decl));
addr = create_tmp_var (ptr_type, get_name (decl));
DECL_IGNORED_P (addr) = 0;
t = build_fold_indirect_ref (addr);
TREE_THIS_NOTRAP (t) = 1;
SET_DECL_VALUE_EXPR (decl, t);
DECL_HAS_VALUE_EXPR_P (decl) = 1;
t = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
t = build_call_expr (t, 2, DECL_SIZE_UNIT (decl),
size_int (DECL_ALIGN (decl)));
/* The call has been built for a variable-sized object. */
CALL_ALLOCA_FOR_VAR_P (t) = 1;
t = fold_convert (ptr_type, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (addr), addr, t);
gimplify_and_add (t, seq_p);
/* Indicate that we need to restore the stack level when the
enclosing BIND_EXPR is exited. */
gimplify_ctxp->save_stack = true;
}
/* Gimplify a DECL_EXPR node *STMT_P by making any necessary allocation
and initialization explicit. */
static enum gimplify_status
gimplify_decl_expr (tree *stmt_p, gimple_seq *seq_p)
{
tree stmt = *stmt_p;
tree decl = DECL_EXPR_DECL (stmt);
*stmt_p = NULL_TREE;
if (TREE_TYPE (decl) == error_mark_node)
return GS_ERROR;
if ((TREE_CODE (decl) == TYPE_DECL
|| TREE_CODE (decl) == VAR_DECL)
&& !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (decl)))
gimplify_type_sizes (TREE_TYPE (decl), seq_p);
/* ??? DECL_ORIGINAL_TYPE is streamed for LTO so it needs to be gimplified
in case its size expressions contain problematic nodes like CALL_EXPR. */
if (TREE_CODE (decl) == TYPE_DECL
&& DECL_ORIGINAL_TYPE (decl)
&& !TYPE_SIZES_GIMPLIFIED (DECL_ORIGINAL_TYPE (decl)))
gimplify_type_sizes (DECL_ORIGINAL_TYPE (decl), seq_p);
if (TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl))
{
tree init = DECL_INITIAL (decl);
if (TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST
|| (!TREE_STATIC (decl)
&& flag_stack_check == GENERIC_STACK_CHECK
&& compare_tree_int (DECL_SIZE_UNIT (decl),
STACK_CHECK_MAX_VAR_SIZE) > 0))
gimplify_vla_decl (decl, seq_p);
/* Some front ends do not explicitly declare all anonymous
artificial variables. We compensate here by declaring the
variables, though it would be better if the front ends would
explicitly declare them. */
if (!DECL_SEEN_IN_BIND_EXPR_P (decl)
&& DECL_ARTIFICIAL (decl) && DECL_NAME (decl) == NULL_TREE)
gimple_add_tmp_var (decl);
if (init && init != error_mark_node)
{
if (!TREE_STATIC (decl))
{
DECL_INITIAL (decl) = NULL_TREE;
init = build2 (INIT_EXPR, void_type_node, decl, init);
gimplify_and_add (init, seq_p);
ggc_free (init);
}
else
/* We must still examine initializers for static variables
as they may contain a label address. */
walk_tree (&init, force_labels_r, NULL, NULL);
}
}
return GS_ALL_DONE;
}
/* Gimplify a LOOP_EXPR. Normally this just involves gimplifying the body
and replacing the LOOP_EXPR with goto, but if the loop contains an
EXIT_EXPR, we need to append a label for it to jump to. */
static enum gimplify_status
gimplify_loop_expr (tree *expr_p, gimple_seq *pre_p)
{
tree saved_label = gimplify_ctxp->exit_label;
tree start_label = create_artificial_label (UNKNOWN_LOCATION);
gimplify_seq_add_stmt (pre_p, gimple_build_label (start_label));
gimplify_ctxp->exit_label = NULL_TREE;
gimplify_and_add (LOOP_EXPR_BODY (*expr_p), pre_p);
gimplify_seq_add_stmt (pre_p, gimple_build_goto (start_label));
if (gimplify_ctxp->exit_label)
gimplify_seq_add_stmt (pre_p,
gimple_build_label (gimplify_ctxp->exit_label));
gimplify_ctxp->exit_label = saved_label;
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Gimplify a statement list onto a sequence. These may be created either
by an enlightened front-end, or by shortcut_cond_expr. */
static enum gimplify_status
gimplify_statement_list (tree *expr_p, gimple_seq *pre_p)
{
tree temp = voidify_wrapper_expr (*expr_p, NULL);
tree_stmt_iterator i = tsi_start (*expr_p);
while (!tsi_end_p (i))
{
gimplify_stmt (tsi_stmt_ptr (i), pre_p);
tsi_delink (&i);
}
if (temp)
{
*expr_p = temp;
return GS_OK;
}
return GS_ALL_DONE;
}
/* Compare two case labels. Because the front end should already have
made sure that case ranges do not overlap, it is enough to only compare
the CASE_LOW values of each case label. */
static int
compare_case_labels (const void *p1, const void *p2)
{
const_tree const case1 = *(const_tree const*)p1;
const_tree const case2 = *(const_tree const*)p2;
/* The 'default' case label always goes first. */
if (!CASE_LOW (case1))
return -1;
else if (!CASE_LOW (case2))
return 1;
else
return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2));
}
/* Sort the case labels in LABEL_VEC in place in ascending order. */
void
sort_case_labels (VEC(tree,heap)* label_vec)
{
VEC_qsort (tree, label_vec, compare_case_labels);
}
/* Gimplify a SWITCH_EXPR, and collect a TREE_VEC of the labels it can
branch to. */
static enum gimplify_status
gimplify_switch_expr (tree *expr_p, gimple_seq *pre_p)
{
tree switch_expr = *expr_p;
gimple_seq switch_body_seq = NULL;
enum gimplify_status ret;
ret = gimplify_expr (&SWITCH_COND (switch_expr), pre_p, NULL, is_gimple_val,
fb_rvalue);
if (ret == GS_ERROR || ret == GS_UNHANDLED)
return ret;
if (SWITCH_BODY (switch_expr))
{
VEC (tree,heap) *labels;
VEC (tree,heap) *saved_labels;
tree default_case = NULL_TREE;
size_t i, len;
gimple gimple_switch;
/* If someone can be bothered to fill in the labels, they can
be bothered to null out the body too. */
gcc_assert (!SWITCH_LABELS (switch_expr));
/* save old labels, get new ones from body, then restore the old
labels. Save all the things from the switch body to append after. */
saved_labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels = VEC_alloc (tree, heap, 8);
gimplify_stmt (&SWITCH_BODY (switch_expr), &switch_body_seq);
labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels = saved_labels;
i = 0;
while (i < VEC_length (tree, labels))
{
tree elt = VEC_index (tree, labels, i);
tree low = CASE_LOW (elt);
bool remove_element = FALSE;
if (low)
{
/* Discard empty ranges. */
tree high = CASE_HIGH (elt);
if (high && tree_int_cst_lt (high, low))
remove_element = TRUE;
}
else
{
/* The default case must be the last label in the list. */
gcc_assert (!default_case);
default_case = elt;
remove_element = TRUE;
}
if (remove_element)
VEC_ordered_remove (tree, labels, i);
else
i++;
}
len = i;
if (!VEC_empty (tree, labels))
sort_case_labels (labels);
if (!default_case)
{
tree type = TREE_TYPE (switch_expr);
/* If the switch has no default label, add one, so that we jump
around the switch body. If the labels already cover the whole
range of type, add the default label pointing to one of the
existing labels. */
if (type == void_type_node)
type = TREE_TYPE (SWITCH_COND (switch_expr));
if (len
&& INTEGRAL_TYPE_P (type)
&& TYPE_MIN_VALUE (type)
&& TYPE_MAX_VALUE (type)
&& tree_int_cst_equal (CASE_LOW (VEC_index (tree, labels, 0)),
TYPE_MIN_VALUE (type)))
{
tree low, high = CASE_HIGH (VEC_index (tree, labels, len - 1));
if (!high)
high = CASE_LOW (VEC_index (tree, labels, len - 1));
if (tree_int_cst_equal (high, TYPE_MAX_VALUE (type)))
{
for (i = 1; i < len; i++)
{
high = CASE_LOW (VEC_index (tree, labels, i));
low = CASE_HIGH (VEC_index (tree, labels, i - 1));
if (!low)
low = CASE_LOW (VEC_index (tree, labels, i - 1));
if ((TREE_INT_CST_LOW (low) + 1
!= TREE_INT_CST_LOW (high))
|| (TREE_INT_CST_HIGH (low)
+ (TREE_INT_CST_LOW (high) == 0)
!= TREE_INT_CST_HIGH (high)))
break;
}
if (i == len)
{
tree label = CASE_LABEL (VEC_index (tree, labels, 0));
default_case = build_case_label (NULL_TREE, NULL_TREE,
label);
}
}
}
if (!default_case)
{
gimple new_default;
default_case
= build_case_label (NULL_TREE, NULL_TREE,
create_artificial_label (UNKNOWN_LOCATION));
new_default = gimple_build_label (CASE_LABEL (default_case));
gimplify_seq_add_stmt (&switch_body_seq, new_default);
}
}
gimple_switch = gimple_build_switch_vec (SWITCH_COND (switch_expr),
default_case, labels);
gimplify_seq_add_stmt (pre_p, gimple_switch);
gimplify_seq_add_seq (pre_p, switch_body_seq);
VEC_free(tree, heap, labels);
}
else
gcc_assert (SWITCH_LABELS (switch_expr));
return GS_ALL_DONE;
}
/* Gimplify the CASE_LABEL_EXPR pointed to by EXPR_P. */
static enum gimplify_status
gimplify_case_label_expr (tree *expr_p, gimple_seq *pre_p)
{
struct gimplify_ctx *ctxp;
gimple gimple_label;
/* Invalid OpenMP programs can play Duff's Device type games with
#pragma omp parallel. At least in the C front end, we don't
detect such invalid branches until after gimplification. */
for (ctxp = gimplify_ctxp; ; ctxp = ctxp->prev_context)
if (ctxp->case_labels)
break;
gimple_label = gimple_build_label (CASE_LABEL (*expr_p));
VEC_safe_push (tree, heap, ctxp->case_labels, *expr_p);
gimplify_seq_add_stmt (pre_p, gimple_label);
return GS_ALL_DONE;
}
/* Build a GOTO to the LABEL_DECL pointed to by LABEL_P, building it first
if necessary. */
tree
build_and_jump (tree *label_p)
{
if (label_p == NULL)
/* If there's nowhere to jump, just fall through. */
return NULL_TREE;
if (*label_p == NULL_TREE)
{
tree label = create_artificial_label (UNKNOWN_LOCATION);
*label_p = label;
}
return build1 (GOTO_EXPR, void_type_node, *label_p);
}
/* Gimplify an EXIT_EXPR by converting to a GOTO_EXPR inside a COND_EXPR.
This also involves building a label to jump to and communicating it to
gimplify_loop_expr through gimplify_ctxp->exit_label. */
static enum gimplify_status
gimplify_exit_expr (tree *expr_p)
{
tree cond = TREE_OPERAND (*expr_p, 0);
tree expr;
expr = build_and_jump (&gimplify_ctxp->exit_label);
expr = build3 (COND_EXPR, void_type_node, cond, expr, NULL_TREE);
*expr_p = expr;
return GS_OK;
}
/* A helper function to be called via walk_tree. Mark all labels under *TP
as being forced. To be called for DECL_INITIAL of static variables. */
tree
force_labels_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
{
if (TYPE_P (*tp))
*walk_subtrees = 0;
if (TREE_CODE (*tp) == LABEL_DECL)
FORCED_LABEL (*tp) = 1;
return NULL_TREE;
}
/* *EXPR_P is a COMPONENT_REF being used as an rvalue. If its type is
different from its canonical type, wrap the whole thing inside a
NOP_EXPR and force the type of the COMPONENT_REF to be the canonical
type.
The canonical type of a COMPONENT_REF is the type of the field being
referenced--unless the field is a bit-field which can be read directly
in a smaller mode, in which case the canonical type is the
sign-appropriate type corresponding to that mode. */
static void
canonicalize_component_ref (tree *expr_p)
{
tree expr = *expr_p;
tree type;
gcc_assert (TREE_CODE (expr) == COMPONENT_REF);
if (INTEGRAL_TYPE_P (TREE_TYPE (expr)))
type = TREE_TYPE (get_unwidened (expr, NULL_TREE));
else
type = TREE_TYPE (TREE_OPERAND (expr, 1));
/* One could argue that all the stuff below is not necessary for
the non-bitfield case and declare it a FE error if type
adjustment would be needed. */
if (TREE_TYPE (expr) != type)
{
#ifdef ENABLE_TYPES_CHECKING
tree old_type = TREE_TYPE (expr);
#endif
int type_quals;
/* We need to preserve qualifiers and propagate them from
operand 0. */
type_quals = TYPE_QUALS (type)
| TYPE_QUALS (TREE_TYPE (TREE_OPERAND (expr, 0)));
if (TYPE_QUALS (type) != type_quals)
type = build_qualified_type (TYPE_MAIN_VARIANT (type), type_quals);
/* Set the type of the COMPONENT_REF to the underlying type. */
TREE_TYPE (expr) = type;
#ifdef ENABLE_TYPES_CHECKING
/* It is now a FE error, if the conversion from the canonical
type to the original expression type is not useless. */
gcc_assert (useless_type_conversion_p (old_type, type));
#endif
}
}
/* If a NOP conversion is changing a pointer to array of foo to a pointer
to foo, embed that change in the ADDR_EXPR by converting
T array[U];
(T *)&array
==>
&array[L]
where L is the lower bound. For simplicity, only do this for constant
lower bound.
The constraint is that the type of &array[L] is trivially convertible
to T *. */
static void
canonicalize_addr_expr (tree *expr_p)
{
tree expr = *expr_p;
tree addr_expr = TREE_OPERAND (expr, 0);
tree datype, ddatype, pddatype;
/* We simplify only conversions from an ADDR_EXPR to a pointer type. */
if (!POINTER_TYPE_P (TREE_TYPE (expr))
|| TREE_CODE (addr_expr) != ADDR_EXPR)
return;
/* The addr_expr type should be a pointer to an array. */
datype = TREE_TYPE (TREE_TYPE (addr_expr));
if (TREE_CODE (datype) != ARRAY_TYPE)
return;
/* The pointer to element type shall be trivially convertible to
the expression pointer type. */
ddatype = TREE_TYPE (datype);
pddatype = build_pointer_type (ddatype);
if (!useless_type_conversion_p (TYPE_MAIN_VARIANT (TREE_TYPE (expr)),
pddatype))
return;
/* The lower bound and element sizes must be constant. */
if (!TYPE_SIZE_UNIT (ddatype)
|| TREE_CODE (TYPE_SIZE_UNIT (ddatype)) != INTEGER_CST
|| !TYPE_DOMAIN (datype) || !TYPE_MIN_VALUE (TYPE_DOMAIN (datype))
|| TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (datype))) != INTEGER_CST)
return;
/* All checks succeeded. Build a new node to merge the cast. */
*expr_p = build4 (ARRAY_REF, ddatype, TREE_OPERAND (addr_expr, 0),
TYPE_MIN_VALUE (TYPE_DOMAIN (datype)),
NULL_TREE, NULL_TREE);
*expr_p = build1 (ADDR_EXPR, pddatype, *expr_p);
/* We can have stripped a required restrict qualifier above. */
if (!useless_type_conversion_p (TREE_TYPE (expr), TREE_TYPE (*expr_p)))
*expr_p = fold_convert (TREE_TYPE (expr), *expr_p);
}
/* *EXPR_P is a NOP_EXPR or CONVERT_EXPR. Remove it and/or other conversions
underneath as appropriate. */
static enum gimplify_status
gimplify_conversion (tree *expr_p)
{
location_t loc = EXPR_LOCATION (*expr_p);
gcc_assert (CONVERT_EXPR_P (*expr_p));
/* Then strip away all but the outermost conversion. */
STRIP_SIGN_NOPS (TREE_OPERAND (*expr_p, 0));
/* And remove the outermost conversion if it's useless. */
if (tree_ssa_useless_type_conversion (*expr_p))
*expr_p = TREE_OPERAND (*expr_p, 0);
/* If we still have a conversion at the toplevel,
then canonicalize some constructs. */
if (CONVERT_EXPR_P (*expr_p))
{
tree sub = TREE_OPERAND (*expr_p, 0);
/* If a NOP conversion is changing the type of a COMPONENT_REF
expression, then canonicalize its type now in order to expose more
redundant conversions. */
if (TREE_CODE (sub) == COMPONENT_REF)
canonicalize_component_ref (&TREE_OPERAND (*expr_p, 0));
/* If a NOP conversion is changing a pointer to array of foo
to a pointer to foo, embed that change in the ADDR_EXPR. */
else if (TREE_CODE (sub) == ADDR_EXPR)
canonicalize_addr_expr (expr_p);
}
/* If we have a conversion to a non-register type force the
use of a VIEW_CONVERT_EXPR instead. */
if (CONVERT_EXPR_P (*expr_p) && !is_gimple_reg_type (TREE_TYPE (*expr_p)))
*expr_p = fold_build1_loc (loc, VIEW_CONVERT_EXPR, TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0));
return GS_OK;
}
/* Nonlocal VLAs seen in the current function. */
static struct pointer_set_t *nonlocal_vlas;
/* Gimplify a VAR_DECL or PARM_DECL. Return GS_OK if we expanded a
DECL_VALUE_EXPR, and it's worth re-examining things. */
static enum gimplify_status
gimplify_var_or_parm_decl (tree *expr_p)
{
tree decl = *expr_p;
/* ??? If this is a local variable, and it has not been seen in any
outer BIND_EXPR, then it's probably the result of a duplicate
declaration, for which we've already issued an error. It would
be really nice if the front end wouldn't leak these at all.
Currently the only known culprit is C++ destructors, as seen
in g++.old-deja/g++.jason/binding.C. */
if (TREE_CODE (decl) == VAR_DECL
&& !DECL_SEEN_IN_BIND_EXPR_P (decl)
&& !TREE_STATIC (decl) && !DECL_EXTERNAL (decl)
&& decl_function_context (decl) == current_function_decl)
{
gcc_assert (seen_error ());
return GS_ERROR;
}
/* When within an OpenMP context, notice uses of variables. */
if (gimplify_omp_ctxp && omp_notice_variable (gimplify_omp_ctxp, decl, true))
return GS_ALL_DONE;
/* If the decl is an alias for another expression, substitute it now. */
if (DECL_HAS_VALUE_EXPR_P (decl))
{
tree value_expr = DECL_VALUE_EXPR (decl);
/* For referenced nonlocal VLAs add a decl for debugging purposes
to the current function. */
if (TREE_CODE (decl) == VAR_DECL
&& TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST
&& nonlocal_vlas != NULL
&& TREE_CODE (value_expr) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (value_expr, 0)) == VAR_DECL
&& decl_function_context (decl) != current_function_decl)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
while (ctx && ctx->region_type == ORT_WORKSHARE)
ctx = ctx->outer_context;
if (!ctx && !pointer_set_insert (nonlocal_vlas, decl))
{
tree copy = copy_node (decl), block;
lang_hooks.dup_lang_specific_decl (copy);
SET_DECL_RTL (copy, 0);
TREE_USED (copy) = 1;
block = DECL_INITIAL (current_function_decl);
DECL_CHAIN (copy) = BLOCK_VARS (block);
BLOCK_VARS (block) = copy;
SET_DECL_VALUE_EXPR (copy, unshare_expr (value_expr));
DECL_HAS_VALUE_EXPR_P (copy) = 1;
}
}
*expr_p = unshare_expr (value_expr);
return GS_OK;
}
return GS_ALL_DONE;
}
/* Gimplify the COMPONENT_REF, ARRAY_REF, REALPART_EXPR or IMAGPART_EXPR
node *EXPR_P.
compound_lval
: min_lval '[' val ']'
| min_lval '.' ID
| compound_lval '[' val ']'
| compound_lval '.' ID
This is not part of the original SIMPLE definition, which separates
array and member references, but it seems reasonable to handle them
together. Also, this way we don't run into problems with union
aliasing; gcc requires that for accesses through a union to alias, the
union reference must be explicit, which was not always the case when we
were splitting up array and member refs.
PRE_P points to the sequence where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the sequence where side effects that must happen after
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_compound_lval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
fallback_t fallback)
{
tree *p;
VEC(tree,heap) *stack;
enum gimplify_status ret = GS_ALL_DONE, tret;
int i;
location_t loc = EXPR_LOCATION (*expr_p);
tree expr = *expr_p;
/* Create a stack of the subexpressions so later we can walk them in
order from inner to outer. */
stack = VEC_alloc (tree, heap, 10);
/* We can handle anything that get_inner_reference can deal with. */
for (p = expr_p; ; p = &TREE_OPERAND (*p, 0))
{
restart:
/* Fold INDIRECT_REFs now to turn them into ARRAY_REFs. */
if (TREE_CODE (*p) == INDIRECT_REF)
*p = fold_indirect_ref_loc (loc, *p);
if (handled_component_p (*p))
;
/* Expand DECL_VALUE_EXPR now. In some cases that may expose
additional COMPONENT_REFs. */
else if ((TREE_CODE (*p) == VAR_DECL || TREE_CODE (*p) == PARM_DECL)
&& gimplify_var_or_parm_decl (p) == GS_OK)
goto restart;
else
break;
VEC_safe_push (tree, heap, stack, *p);
}
gcc_assert (VEC_length (tree, stack));
/* Now STACK is a stack of pointers to all the refs we've walked through
and P points to the innermost expression.
Java requires that we elaborated nodes in source order. That
means we must gimplify the inner expression followed by each of
the indices, in order. But we can't gimplify the inner
expression until we deal with any variable bounds, sizes, or
positions in order to deal with PLACEHOLDER_EXPRs.
So we do this in three steps. First we deal with the annotations
for any variables in the components, then we gimplify the base,
then we gimplify any indices, from left to right. */
for (i = VEC_length (tree, stack) - 1; i >= 0; i--)
{
tree t = VEC_index (tree, stack, i);
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
/* Gimplify the low bound and element type size and put them into
the ARRAY_REF. If these values are set, they have already been
gimplified. */
if (TREE_OPERAND (t, 2) == NULL_TREE)
{
tree low = unshare_expr (array_ref_low_bound (t));
if (!is_gimple_min_invariant (low))
{
TREE_OPERAND (t, 2) = low;
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p,
post_p, is_gimple_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
if (TREE_OPERAND (t, 3) == NULL_TREE)
{
tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0)));
tree elmt_size = unshare_expr (array_ref_element_size (t));
tree factor = size_int (TYPE_ALIGN_UNIT (elmt_type));
/* Divide the element size by the alignment of the element
type (above). */
elmt_size
= size_binop_loc (loc, EXACT_DIV_EXPR, elmt_size, factor);
if (!is_gimple_min_invariant (elmt_size))
{
TREE_OPERAND (t, 3) = elmt_size;
tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p,
post_p, is_gimple_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
else if (TREE_CODE (t) == COMPONENT_REF)
{
/* Set the field offset into T and gimplify it. */
if (TREE_OPERAND (t, 2) == NULL_TREE)
{
tree offset = unshare_expr (component_ref_field_offset (t));
tree field = TREE_OPERAND (t, 1);
tree factor
= size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT);
/* Divide the offset by its alignment. */
offset = size_binop_loc (loc, EXACT_DIV_EXPR, offset, factor);
if (!is_gimple_min_invariant (offset))
{
TREE_OPERAND (t, 2) = offset;
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p,
post_p, is_gimple_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
}
/* Step 2 is to gimplify the base expression. Make sure lvalue is set
so as to match the min_lval predicate. Failure to do so may result
in the creation of large aggregate temporaries. */
tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval,
fallback | fb_lvalue);
ret = MIN (ret, tret);
/* And finally, the indices and operands to BIT_FIELD_REF. During this
loop we also remove any useless conversions. */
for (; VEC_length (tree, stack) > 0; )
{
tree t = VEC_pop (tree, stack);
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
/* Gimplify the dimension. */
if (!is_gimple_min_invariant (TREE_OPERAND (t, 1)))
{
tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
}
}
else if (TREE_CODE (t) == BIT_FIELD_REF)
{
tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
}
STRIP_USELESS_TYPE_CONVERSION (TREE_OPERAND (t, 0));
/* The innermost expression P may have originally had
TREE_SIDE_EFFECTS set which would have caused all the outer
expressions in *EXPR_P leading to P to also have had
TREE_SIDE_EFFECTS set. */
recalculate_side_effects (t);
}
/* If the outermost expression is a COMPONENT_REF, canonicalize its type. */
if ((fallback & fb_rvalue) && TREE_CODE (*expr_p) == COMPONENT_REF)
{
canonicalize_component_ref (expr_p);
}
VEC_free (tree, heap, stack);
gcc_assert (*expr_p == expr || ret != GS_ALL_DONE);
return ret;
}
/* Gimplify the self modifying expression pointed to by EXPR_P
(++, --, +=, -=).
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored.
WANT_VALUE is nonzero iff we want to use the value of this expression
in another expression. */
static enum gimplify_status
gimplify_self_mod_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value)
{
enum tree_code code;
tree lhs, lvalue, rhs, t1;
gimple_seq post = NULL, *orig_post_p = post_p;
bool postfix;
enum tree_code arith_code;
enum gimplify_status ret;
location_t loc = EXPR_LOCATION (*expr_p);
code = TREE_CODE (*expr_p);
gcc_assert (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR
|| code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR);
/* Prefix or postfix? */
if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR)
/* Faster to treat as prefix if result is not used. */
postfix = want_value;
else
postfix = false;
/* For postfix, make sure the inner expression's post side effects
are executed after side effects from this expression. */
if (postfix)
post_p = &post;
/* Add or subtract? */
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
arith_code = PLUS_EXPR;
else
arith_code = MINUS_EXPR;
/* Gimplify the LHS into a GIMPLE lvalue. */
lvalue = TREE_OPERAND (*expr_p, 0);
ret = gimplify_expr (&lvalue, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
/* Extract the operands to the arithmetic operation. */
lhs = lvalue;
rhs = TREE_OPERAND (*expr_p, 1);
/* For postfix operator, we evaluate the LHS to an rvalue and then use
that as the result value and in the postqueue operation. We also
make sure to make lvalue a minimal lval, see
gcc.c-torture/execute/20040313-1.c for an example where this matters. */
if (postfix)
{
if (!is_gimple_min_lval (lvalue))
{
mark_addressable (lvalue);
lvalue = build_fold_addr_expr_loc (input_location, lvalue);
gimplify_expr (&lvalue, pre_p, post_p, is_gimple_val, fb_rvalue);
lvalue = build_fold_indirect_ref_loc (input_location, lvalue);
}
ret = gimplify_expr (&lhs, pre_p, post_p, is_gimple_val, fb_rvalue);
if (ret == GS_ERROR)
return ret;
}
/* For POINTERs increment, use POINTER_PLUS_EXPR. */
if (POINTER_TYPE_P (TREE_TYPE (lhs)))
{
rhs = convert_to_ptrofftype_loc (loc, rhs);
if (arith_code == MINUS_EXPR)
rhs = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (rhs), rhs);
arith_code = POINTER_PLUS_EXPR;
}
t1 = build2 (arith_code, TREE_TYPE (*expr_p), lhs, rhs);
if (postfix)
{
gimplify_assign (lvalue, t1, orig_post_p);
gimplify_seq_add_seq (orig_post_p, post);
*expr_p = lhs;
return GS_ALL_DONE;
}
else
{
*expr_p = build2 (MODIFY_EXPR, TREE_TYPE (lvalue), lvalue, t1);
return GS_OK;
}
}
/* If *EXPR_P has a variable sized type, wrap it in a WITH_SIZE_EXPR. */
static void
maybe_with_size_expr (tree *expr_p)
{
tree expr = *expr_p;
tree type = TREE_TYPE (expr);
tree size;
/* If we've already wrapped this or the type is error_mark_node, we can't do
anything. */
if (TREE_CODE (expr) == WITH_SIZE_EXPR
|| type == error_mark_node)
return;
/* If the size isn't known or is a constant, we have nothing to do. */
size = TYPE_SIZE_UNIT (type);
if (!size || TREE_CODE (size) == INTEGER_CST)
return;
/* Otherwise, make a WITH_SIZE_EXPR. */
size = unshare_expr (size);
size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, expr);
*expr_p = build2 (WITH_SIZE_EXPR, type, expr, size);
}
/* Helper for gimplify_call_expr. Gimplify a single argument *ARG_P
Store any side-effects in PRE_P. CALL_LOCATION is the location of
the CALL_EXPR. */
static enum gimplify_status
gimplify_arg (tree *arg_p, gimple_seq *pre_p, location_t call_location)
{
bool (*test) (tree);
fallback_t fb;
/* In general, we allow lvalues for function arguments to avoid
extra overhead of copying large aggregates out of even larger
aggregates into temporaries only to copy the temporaries to
the argument list. Make optimizers happy by pulling out to
temporaries those types that fit in registers. */
if (is_gimple_reg_type (TREE_TYPE (*arg_p)))
test = is_gimple_val, fb = fb_rvalue;
else
{
test = is_gimple_lvalue, fb = fb_either;
/* Also strip a TARGET_EXPR that would force an extra copy. */
if (TREE_CODE (*arg_p) == TARGET_EXPR)
{
tree init = TARGET_EXPR_INITIAL (*arg_p);
if (init
&& !VOID_TYPE_P (TREE_TYPE (init)))
*arg_p = init;
}
}
/* If this is a variable sized type, we must remember the size. */
maybe_with_size_expr (arg_p);
/* FIXME diagnostics: This will mess up gcc.dg/Warray-bounds.c. */
/* Make sure arguments have the same location as the function call
itself. */
protected_set_expr_location (*arg_p, call_location);
/* There is a sequence point before a function call. Side effects in
the argument list must occur before the actual call. So, when
gimplifying arguments, force gimplify_expr to use an internal
post queue which is then appended to the end of PRE_P. */
return gimplify_expr (arg_p, pre_p, NULL, test, fb);
}
/* Gimplify the CALL_EXPR node *EXPR_P into the GIMPLE sequence PRE_P.
WANT_VALUE is true if the result of the call is desired. */
static enum gimplify_status
gimplify_call_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
{
tree fndecl, parms, p, fnptrtype;
enum gimplify_status ret;
int i, nargs;
gimple call;
bool builtin_va_start_p = FALSE;
location_t loc = EXPR_LOCATION (*expr_p);
gcc_assert (TREE_CODE (*expr_p) == CALL_EXPR);
/* For reliable diagnostics during inlining, it is necessary that
every call_expr be annotated with file and line. */
if (! EXPR_HAS_LOCATION (*expr_p))
SET_EXPR_LOCATION (*expr_p, input_location);
/* This may be a call to a builtin function.
Builtin function calls may be transformed into different
(and more efficient) builtin function calls under certain
circumstances. Unfortunately, gimplification can muck things
up enough that the builtin expanders are not aware that certain
transformations are still valid.
So we attempt transformation/gimplification of the call before
we gimplify the CALL_EXPR. At this time we do not manage to
transform all calls in the same manner as the expanders do, but
we do transform most of them. */
fndecl = get_callee_fndecl (*expr_p);
if (fndecl && DECL_BUILT_IN (fndecl))
{
tree new_tree = fold_call_expr (input_location, *expr_p, !want_value);
if (new_tree && new_tree != *expr_p)
{
/* There was a transformation of this call which computes the
same value, but in a more efficient way. Return and try
again. */
*expr_p = new_tree;
return GS_OK;
}
if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (fndecl) == BUILT_IN_VA_START)
{
builtin_va_start_p = TRUE;
if (call_expr_nargs (*expr_p) < 2)
{
error ("too few arguments to function %<va_start%>");
*expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p));
return GS_OK;
}
if (fold_builtin_next_arg (*expr_p, true))
{
*expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p));
return GS_OK;
}
}
}
/* Remember the original function pointer type. */
fnptrtype = TREE_TYPE (CALL_EXPR_FN (*expr_p));
/* There is a sequence point before the call, so any side effects in
the calling expression must occur before the actual call. Force
gimplify_expr to use an internal post queue. */
ret = gimplify_expr (&CALL_EXPR_FN (*expr_p), pre_p, NULL,
is_gimple_call_addr, fb_rvalue);
nargs = call_expr_nargs (*expr_p);
/* Get argument types for verification. */
fndecl = get_callee_fndecl (*expr_p);
parms = NULL_TREE;
if (fndecl)
parms = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
else if (POINTER_TYPE_P (TREE_TYPE (CALL_EXPR_FN (*expr_p))))
parms = TYPE_ARG_TYPES (TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (*expr_p))));
if (fndecl && DECL_ARGUMENTS (fndecl))
p = DECL_ARGUMENTS (fndecl);
else if (parms)
p = parms;
else
p = NULL_TREE;
for (i = 0; i < nargs && p; i++, p = TREE_CHAIN (p))
;
/* If the last argument is __builtin_va_arg_pack () and it is not
passed as a named argument, decrease the number of CALL_EXPR
arguments and set instead the CALL_EXPR_VA_ARG_PACK flag. */
if (!p
&& i < nargs
&& TREE_CODE (CALL_EXPR_ARG (*expr_p, nargs - 1)) == CALL_EXPR)
{
tree last_arg = CALL_EXPR_ARG (*expr_p, nargs - 1);
tree last_arg_fndecl = get_callee_fndecl (last_arg);
if (last_arg_fndecl
&& TREE_CODE (last_arg_fndecl) == FUNCTION_DECL
&& DECL_BUILT_IN_CLASS (last_arg_fndecl) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (last_arg_fndecl) == BUILT_IN_VA_ARG_PACK)
{
tree call = *expr_p;
--nargs;
*expr_p = build_call_array_loc (loc, TREE_TYPE (call),
CALL_EXPR_FN (call),
nargs, CALL_EXPR_ARGP (call));
/* Copy all CALL_EXPR flags, location and block, except
CALL_EXPR_VA_ARG_PACK flag. */
CALL_EXPR_STATIC_CHAIN (*expr_p) = CALL_EXPR_STATIC_CHAIN (call);
CALL_EXPR_TAILCALL (*expr_p) = CALL_EXPR_TAILCALL (call);
CALL_EXPR_RETURN_SLOT_OPT (*expr_p)
= CALL_EXPR_RETURN_SLOT_OPT (call);
CALL_FROM_THUNK_P (*expr_p) = CALL_FROM_THUNK_P (call);
SET_EXPR_LOCATION (*expr_p, EXPR_LOCATION (call));
TREE_BLOCK (*expr_p) = TREE_BLOCK (call);
/* Set CALL_EXPR_VA_ARG_PACK. */
CALL_EXPR_VA_ARG_PACK (*expr_p) = 1;
}
}
/* Finally, gimplify the function arguments. */
if (nargs > 0)
{
for (i = (PUSH_ARGS_REVERSED ? nargs - 1 : 0);
PUSH_ARGS_REVERSED ? i >= 0 : i < nargs;
PUSH_ARGS_REVERSED ? i-- : i++)
{
enum gimplify_status t;
/* Avoid gimplifying the second argument to va_start, which needs to
be the plain PARM_DECL. */
if ((i != 1) || !builtin_va_start_p)
{
t = gimplify_arg (&CALL_EXPR_ARG (*expr_p, i), pre_p,
EXPR_LOCATION (*expr_p));
if (t == GS_ERROR)
ret = GS_ERROR;
}
}
}
/* Verify the function result. */
if (want_value && fndecl
&& VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fnptrtype))))
{
error_at (loc, "using result of function returning %<void%>");
ret = GS_ERROR;
}
/* Try this again in case gimplification exposed something. */
if (ret != GS_ERROR)
{
tree new_tree = fold_call_expr (input_location, *expr_p, !want_value);
if (new_tree && new_tree != *expr_p)
{
/* There was a transformation of this call which computes the
same value, but in a more efficient way. Return and try
again. */
*expr_p = new_tree;
return GS_OK;
}
}
else
{
*expr_p = error_mark_node;
return GS_ERROR;
}
/* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its
decl. This allows us to eliminate redundant or useless
calls to "const" functions. */
if (TREE_CODE (*expr_p) == CALL_EXPR)
{
int flags = call_expr_flags (*expr_p);
if (flags & (ECF_CONST | ECF_PURE)
/* An infinite loop is considered a side effect. */
&& !(flags & (ECF_LOOPING_CONST_OR_PURE)))
TREE_SIDE_EFFECTS (*expr_p) = 0;
}
/* If the value is not needed by the caller, emit a new GIMPLE_CALL
and clear *EXPR_P. Otherwise, leave *EXPR_P in its gimplified
form and delegate the creation of a GIMPLE_CALL to
gimplify_modify_expr. This is always possible because when
WANT_VALUE is true, the caller wants the result of this call into
a temporary, which means that we will emit an INIT_EXPR in
internal_get_tmp_var which will then be handled by
gimplify_modify_expr. */
if (!want_value)
{
/* The CALL_EXPR in *EXPR_P is already in GIMPLE form, so all we
have to do is replicate it as a GIMPLE_CALL tuple. */
gimple_stmt_iterator gsi;
call = gimple_build_call_from_tree (*expr_p);
gimple_call_set_fntype (call, TREE_TYPE (fnptrtype));
gimplify_seq_add_stmt (pre_p, call);
gsi = gsi_last (*pre_p);
fold_stmt (&gsi);
*expr_p = NULL_TREE;
}
else
/* Remember the original function type. */
CALL_EXPR_FN (*expr_p) = build1 (NOP_EXPR, fnptrtype,
CALL_EXPR_FN (*expr_p));
return ret;
}
/* Handle shortcut semantics in the predicate operand of a COND_EXPR by
rewriting it into multiple COND_EXPRs, and possibly GOTO_EXPRs.
TRUE_LABEL_P and FALSE_LABEL_P point to the labels to jump to if the
condition is true or false, respectively. If null, we should generate
our own to skip over the evaluation of this specific expression.
LOCUS is the source location of the COND_EXPR.
This function is the tree equivalent of do_jump.
shortcut_cond_r should only be called by shortcut_cond_expr. */
static tree
shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p,
location_t locus)
{
tree local_label = NULL_TREE;
tree t, expr = NULL;
/* OK, it's not a simple case; we need to pull apart the COND_EXPR to
retain the shortcut semantics. Just insert the gotos here;
shortcut_cond_expr will append the real blocks later. */
if (TREE_CODE (pred) == TRUTH_ANDIF_EXPR)
{
location_t new_locus;
/* Turn if (a && b) into
if (a); else goto no;
if (b) goto yes; else goto no;
(no:) */
if (false_label_p == NULL)
false_label_p = &local_label;
/* Keep the original source location on the first 'if'. */
t = shortcut_cond_r (TREE_OPERAND (pred, 0), NULL, false_label_p, locus);
append_to_statement_list (t, &expr);
/* Set the source location of the && on the second 'if'. */
new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus;
t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p,
new_locus);
append_to_statement_list (t, &expr);
}
else if (TREE_CODE (pred) == TRUTH_ORIF_EXPR)
{
location_t new_locus;
/* Turn if (a || b) into
if (a) goto yes;
if (b) goto yes; else goto no;
(yes:) */
if (true_label_p == NULL)
true_label_p = &local_label;
/* Keep the original source location on the first 'if'. */
t = shortcut_cond_r (TREE_OPERAND (pred, 0), true_label_p, NULL, locus);
append_to_statement_list (t, &expr);
/* Set the source location of the || on the second 'if'. */
new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus;
t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p,
new_locus);
append_to_statement_list (t, &expr);
}
else if (TREE_CODE (pred) == COND_EXPR
&& !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 1)))
&& !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 2))))
{
location_t new_locus;
/* As long as we're messing with gotos, turn if (a ? b : c) into
if (a)
if (b) goto yes; else goto no;
else
if (c) goto yes; else goto no;
Don't do this if one of the arms has void type, which can happen
in C++ when the arm is throw. */
/* Keep the original source location on the first 'if'. Set the source
location of the ? on the second 'if'. */
new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus;
expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0),
shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p,
false_label_p, locus),
shortcut_cond_r (TREE_OPERAND (pred, 2), true_label_p,
false_label_p, new_locus));
}
else
{
expr = build3 (COND_EXPR, void_type_node, pred,
build_and_jump (true_label_p),
build_and_jump (false_label_p));
SET_EXPR_LOCATION (expr, locus);
}
if (local_label)
{
t = build1 (LABEL_EXPR, void_type_node, local_label);
append_to_statement_list (t, &expr);
}
return expr;
}
/* Given a conditional expression EXPR with short-circuit boolean
predicates using TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR, break the
predicate appart into the equivalent sequence of conditionals. */
static tree
shortcut_cond_expr (tree expr)
{
tree pred = TREE_OPERAND (expr, 0);
tree then_ = TREE_OPERAND (expr, 1);
tree else_ = TREE_OPERAND (expr, 2);
tree true_label, false_label, end_label, t;
tree *true_label_p;
tree *false_label_p;
bool emit_end, emit_false, jump_over_else;
bool then_se = then_ && TREE_SIDE_EFFECTS (then_);
bool else_se = else_ && TREE_SIDE_EFFECTS (else_);
/* First do simple transformations. */
if (!else_se)
{
/* If there is no 'else', turn
if (a && b) then c
into
if (a) if (b) then c. */
while (TREE_CODE (pred) == TRUTH_ANDIF_EXPR)
{
/* Keep the original source location on the first 'if'. */
location_t locus = EXPR_LOC_OR_HERE (expr);
TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1);
/* Set the source location of the && on the second 'if'. */
if (EXPR_HAS_LOCATION (pred))
SET_EXPR_LOCATION (expr, EXPR_LOCATION (pred));
then_ = shortcut_cond_expr (expr);
then_se = then_ && TREE_SIDE_EFFECTS (then_);
pred = TREE_OPERAND (pred, 0);
expr = build3 (COND_EXPR, void_type_node, pred, then_, NULL_TREE);
SET_EXPR_LOCATION (expr, locus);
}
}
if (!then_se)
{
/* If there is no 'then', turn
if (a || b); else d
into
if (a); else if (b); else d. */
while (TREE_CODE (pred) == TRUTH_ORIF_EXPR)
{
/* Keep the original source location on the first 'if'. */
location_t locus = EXPR_LOC_OR_HERE (expr);
TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1);
/* Set the source location of the || on the second 'if'. */
if (EXPR_HAS_LOCATION (pred))
SET_EXPR_LOCATION (expr, EXPR_LOCATION (pred));
else_ = shortcut_cond_expr (expr);
else_se = else_ && TREE_SIDE_EFFECTS (else_);
pred = TREE_OPERAND (pred, 0);
expr = build3 (COND_EXPR, void_type_node, pred, NULL_TREE, else_);
SET_EXPR_LOCATION (expr, locus);
}
}
/* If we're done, great. */
if (TREE_CODE (pred) != TRUTH_ANDIF_EXPR
&& TREE_CODE (pred) != TRUTH_ORIF_EXPR)
return expr;
/* Otherwise we need to mess with gotos. Change
if (a) c; else d;
to
if (a); else goto no;
c; goto end;
no: d; end:
and recursively gimplify the condition. */
true_label = false_label = end_label = NULL_TREE;
/* If our arms just jump somewhere, hijack those labels so we don't
generate jumps to jumps. */
if (then_
&& TREE_CODE (then_) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL)
{
true_label = GOTO_DESTINATION (then_);
then_ = NULL;
then_se = false;
}
if (else_
&& TREE_CODE (else_) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL)
{
false_label = GOTO_DESTINATION (else_);
else_ = NULL;
else_se = false;
}
/* If we aren't hijacking a label for the 'then' branch, it falls through. */
if (true_label)
true_label_p = &true_label;
else
true_label_p = NULL;
/* The 'else' branch also needs a label if it contains interesting code. */
if (false_label || else_se)
false_label_p = &false_label;
else
false_label_p = NULL;
/* If there was nothing else in our arms, just forward the label(s). */
if (!then_se && !else_se)
return shortcut_cond_r (pred, true_label_p, false_label_p,
EXPR_LOC_OR_HERE (expr));
/* If our last subexpression already has a terminal label, reuse it. */
if (else_se)
t = expr_last (else_);
else if (then_se)
t = expr_last (then_);
else
t = NULL;
if (t && TREE_CODE (t) == LABEL_EXPR)
end_label = LABEL_EXPR_LABEL (t);
/* If we don't care about jumping to the 'else' branch, jump to the end
if the condition is false. */
if (!false_label_p)
false_label_p = &end_label;
/* We only want to emit these labels if we aren't hijacking them. */
emit_end = (end_label == NULL_TREE);
emit_false = (false_label == NULL_TREE);
/* We only emit the jump over the else clause if we have to--if the
then clause may fall through. Otherwise we can wind up with a
useless jump and a useless label at the end of gimplified code,
which will cause us to think that this conditional as a whole
falls through even if it doesn't. If we then inline a function
which ends with such a condition, that can cause us to issue an
inappropriate warning about control reaching the end of a
non-void function. */
jump_over_else = block_may_fallthru (then_);
pred = shortcut_cond_r (pred, true_label_p, false_label_p,
EXPR_LOC_OR_HERE (expr));
expr = NULL;
append_to_statement_list (pred, &expr);
append_to_statement_list (then_, &expr);
if (else_se)
{
if (jump_over_else)
{
tree last = expr_last (expr);
t = build_and_jump (&end_label);
if (EXPR_HAS_LOCATION (last))
SET_EXPR_LOCATION (t, EXPR_LOCATION (last));
append_to_statement_list (t, &expr);
}
if (emit_false)
{
t = build1 (LABEL_EXPR, void_type_node, false_label);
append_to_statement_list (t, &expr);
}
append_to_statement_list (else_, &expr);
}
if (emit_end && end_label)
{
t = build1 (LABEL_EXPR, void_type_node, end_label);
append_to_statement_list (t, &expr);
}
return expr;
}
/* EXPR is used in a boolean context; make sure it has BOOLEAN_TYPE. */
tree
gimple_boolify (tree expr)
{
tree type = TREE_TYPE (expr);
location_t loc = EXPR_LOCATION (expr);
if (TREE_CODE (expr) == NE_EXPR
&& TREE_CODE (TREE_OPERAND (expr, 0)) == CALL_EXPR
&& integer_zerop (TREE_OPERAND (expr, 1)))
{
tree call = TREE_OPERAND (expr, 0);
tree fn = get_callee_fndecl (call);
/* For __builtin_expect ((long) (x), y) recurse into x as well
if x is truth_value_p. */
if (fn
&& DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (fn) == BUILT_IN_EXPECT
&& call_expr_nargs (call) == 2)
{
tree arg = CALL_EXPR_ARG (call, 0);
if (arg)
{
if (TREE_CODE (arg) == NOP_EXPR
&& TREE_TYPE (arg) == TREE_TYPE (call))
arg = TREE_OPERAND (arg, 0);
if (truth_value_p (TREE_CODE (arg)))
{
arg = gimple_boolify (arg);
CALL_EXPR_ARG (call, 0)
= fold_convert_loc (loc, TREE_TYPE (call), arg);
}
}
}
}
switch (TREE_CODE (expr))
{
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
/* Also boolify the arguments of truth exprs. */
TREE_OPERAND (expr, 1) = gimple_boolify (TREE_OPERAND (expr, 1));
/* FALLTHRU */
case TRUTH_NOT_EXPR:
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
/* These expressions always produce boolean results. */
if (TREE_CODE (type) != BOOLEAN_TYPE)
TREE_TYPE (expr) = boolean_type_node;
return expr;
default:
if (COMPARISON_CLASS_P (expr))
{
/* There expressions always prduce boolean results. */
if (TREE_CODE (type) != BOOLEAN_TYPE)
TREE_TYPE (expr) = boolean_type_node;
return expr;
}
/* Other expressions that get here must have boolean values, but
might need to be converted to the appropriate mode. */
if (TREE_CODE (type) == BOOLEAN_TYPE)
return expr;
return fold_convert_loc (loc, boolean_type_node, expr);
}
}
/* Given a conditional expression *EXPR_P without side effects, gimplify
its operands. New statements are inserted to PRE_P. */
static enum gimplify_status
gimplify_pure_cond_expr (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p, cond;
enum gimplify_status ret, tret;
enum tree_code code;
cond = gimple_boolify (COND_EXPR_COND (expr));
/* We need to handle && and || specially, as their gimplification
creates pure cond_expr, thus leading to an infinite cycle otherwise. */
code = TREE_CODE (cond);
if (code == TRUTH_ANDIF_EXPR)
TREE_SET_CODE (cond, TRUTH_AND_EXPR);
else if (code == TRUTH_ORIF_EXPR)
TREE_SET_CODE (cond, TRUTH_OR_EXPR);
ret = gimplify_expr (&cond, pre_p, NULL, is_gimple_condexpr, fb_rvalue);
COND_EXPR_COND (*expr_p) = cond;
tret = gimplify_expr (&COND_EXPR_THEN (expr), pre_p, NULL,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
tret = gimplify_expr (&COND_EXPR_ELSE (expr), pre_p, NULL,
is_gimple_val, fb_rvalue);
return MIN (ret, tret);
}
/* Return true if evaluating EXPR could trap.
EXPR is GENERIC, while tree_could_trap_p can be called
only on GIMPLE. */
static bool
generic_expr_could_trap_p (tree expr)
{
unsigned i, n;
if (!expr || is_gimple_val (expr))
return false;
if (!EXPR_P (expr) || tree_could_trap_p (expr))
return true;
n = TREE_OPERAND_LENGTH (expr);
for (i = 0; i < n; i++)
if (generic_expr_could_trap_p (TREE_OPERAND (expr, i)))
return true;
return false;
}
/* Convert the conditional expression pointed to by EXPR_P '(p) ? a : b;'
into
if (p) if (p)
t1 = a; a;
else or else
t1 = b; b;
t1;
The second form is used when *EXPR_P is of type void.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_cond_expr (tree *expr_p, gimple_seq *pre_p, fallback_t fallback)
{
tree expr = *expr_p;
tree type = TREE_TYPE (expr);
location_t loc = EXPR_LOCATION (expr);
tree tmp, arm1, arm2;
enum gimplify_status ret;
tree label_true, label_false, label_cont;
bool have_then_clause_p, have_else_clause_p;
gimple gimple_cond;
enum tree_code pred_code;
gimple_seq seq = NULL;
/* If this COND_EXPR has a value, copy the values into a temporary within
the arms. */
if (!VOID_TYPE_P (type))
{
tree then_ = TREE_OPERAND (expr, 1), else_ = TREE_OPERAND (expr, 2);
tree result;
/* If either an rvalue is ok or we do not require an lvalue, create the
temporary. But we cannot do that if the type is addressable. */
if (((fallback & fb_rvalue) || !(fallback & fb_lvalue))
&& !TREE_ADDRESSABLE (type))
{
if (gimplify_ctxp->allow_rhs_cond_expr
/* If either branch has side effects or could trap, it can't be
evaluated unconditionally. */
&& !TREE_SIDE_EFFECTS (then_)
&& !generic_expr_could_trap_p (then_)
&& !TREE_SIDE_EFFECTS (else_)
&& !generic_expr_could_trap_p (else_))
return gimplify_pure_cond_expr (expr_p, pre_p);
tmp = create_tmp_var (type, "iftmp");
result = tmp;
}
/* Otherwise, only create and copy references to the values. */
else
{
type = build_pointer_type (type);
if (!VOID_TYPE_P (TREE_TYPE (then_)))
then_ = build_fold_addr_expr_loc (loc, then_);
if (!VOID_TYPE_P (TREE_TYPE (else_)))
else_ = build_fold_addr_expr_loc (loc, else_);
expr
= build3 (COND_EXPR, type, TREE_OPERAND (expr, 0), then_, else_);
tmp = create_tmp_var (type, "iftmp");
result = build_simple_mem_ref_loc (loc, tmp);
}
/* Build the new then clause, `tmp = then_;'. But don't build the
assignment if the value is void; in C++ it can be if it's a throw. */
if (!VOID_TYPE_P (TREE_TYPE (then_)))
TREE_OPERAND (expr, 1) = build2 (MODIFY_EXPR, type, tmp, then_);
/* Similarly, build the new else clause, `tmp = else_;'. */
if (!VOID_TYPE_P (TREE_TYPE (else_)))
TREE_OPERAND (expr, 2) = build2 (MODIFY_EXPR, type, tmp, else_);
TREE_TYPE (expr) = void_type_node;
recalculate_side_effects (expr);
/* Move the COND_EXPR to the prequeue. */
gimplify_stmt (&expr, pre_p);
*expr_p = result;
return GS_ALL_DONE;
}
/* Remove any COMPOUND_EXPR so the following cases will be caught. */
STRIP_TYPE_NOPS (TREE_OPERAND (expr, 0));
if (TREE_CODE (TREE_OPERAND (expr, 0)) == COMPOUND_EXPR)
gimplify_compound_expr (&TREE_OPERAND (expr, 0), pre_p, true);
/* Make sure the condition has BOOLEAN_TYPE. */
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
/* Break apart && and || conditions. */
if (TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ANDIF_EXPR
|| TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ORIF_EXPR)
{
expr = shortcut_cond_expr (expr);
if (expr != *expr_p)
{
*expr_p = expr;
/* We can't rely on gimplify_expr to re-gimplify the expanded
form properly, as cleanups might cause the target labels to be
wrapped in a TRY_FINALLY_EXPR. To prevent that, we need to
set up a conditional context. */
gimple_push_condition ();
gimplify_stmt (expr_p, &seq);
gimple_pop_condition (pre_p);
gimple_seq_add_seq (pre_p, seq);
return GS_ALL_DONE;
}
}
/* Now do the normal gimplification. */
/* Gimplify condition. */
ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr,
fb_rvalue);
if (ret == GS_ERROR)
return GS_ERROR;
gcc_assert (TREE_OPERAND (expr, 0) != NULL_TREE);
gimple_push_condition ();
have_then_clause_p = have_else_clause_p = false;
if (TREE_OPERAND (expr, 1) != NULL
&& TREE_CODE (TREE_OPERAND (expr, 1)) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (TREE_OPERAND (expr, 1))) == LABEL_DECL
&& (DECL_CONTEXT (GOTO_DESTINATION (TREE_OPERAND (expr, 1)))
== current_function_decl)
/* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR
have different locations, otherwise we end up with incorrect
location information on the branches. */
&& (optimize
|| !EXPR_HAS_LOCATION (expr)
|| !EXPR_HAS_LOCATION (TREE_OPERAND (expr, 1))
|| EXPR_LOCATION (expr) == EXPR_LOCATION (TREE_OPERAND (expr, 1))))
{
label_true = GOTO_DESTINATION (TREE_OPERAND (expr, 1));
have_then_clause_p = true;
}
else
label_true = create_artificial_label (UNKNOWN_LOCATION);
if (TREE_OPERAND (expr, 2) != NULL
&& TREE_CODE (TREE_OPERAND (expr, 2)) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (TREE_OPERAND (expr, 2))) == LABEL_DECL
&& (DECL_CONTEXT (GOTO_DESTINATION (TREE_OPERAND (expr, 2)))
== current_function_decl)
/* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR
have different locations, otherwise we end up with incorrect
location information on the branches. */
&& (optimize
|| !EXPR_HAS_LOCATION (expr)
|| !EXPR_HAS_LOCATION (TREE_OPERAND (expr, 2))
|| EXPR_LOCATION (expr) == EXPR_LOCATION (TREE_OPERAND (expr, 2))))
{
label_false = GOTO_DESTINATION (TREE_OPERAND (expr, 2));
have_else_clause_p = true;
}
else
label_false = create_artificial_label (UNKNOWN_LOCATION);
gimple_cond_get_ops_from_tree (COND_EXPR_COND (expr), &pred_code, &arm1,
&arm2);
gimple_cond = gimple_build_cond (pred_code, arm1, arm2, label_true,
label_false);
gimplify_seq_add_stmt (&seq, gimple_cond);
label_cont = NULL_TREE;
if (!have_then_clause_p)
{
/* For if (...) {} else { code; } put label_true after
the else block. */
if (TREE_OPERAND (expr, 1) == NULL_TREE
&& !have_else_clause_p
&& TREE_OPERAND (expr, 2) != NULL_TREE)
label_cont = label_true;
else
{
gimplify_seq_add_stmt (&seq, gimple_build_label (label_true));
have_then_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 1), &seq);
/* For if (...) { code; } else {} or
if (...) { code; } else goto label; or
if (...) { code; return; } else { ... }
label_cont isn't needed. */
if (!have_else_clause_p
&& TREE_OPERAND (expr, 2) != NULL_TREE
&& gimple_seq_may_fallthru (seq))
{
gimple g;
label_cont = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_goto (label_cont);
/* GIMPLE_COND's are very low level; they have embedded
gotos. This particular embedded goto should not be marked
with the location of the original COND_EXPR, as it would
correspond to the COND_EXPR's condition, not the ELSE or the
THEN arms. To avoid marking it with the wrong location, flag
it as "no location". */
gimple_set_do_not_emit_location (g);
gimplify_seq_add_stmt (&seq, g);
}
}
}
if (!have_else_clause_p)
{
gimplify_seq_add_stmt (&seq, gimple_build_label (label_false));
have_else_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 2), &seq);
}
if (label_cont)
gimplify_seq_add_stmt (&seq, gimple_build_label (label_cont));
gimple_pop_condition (pre_p);
gimple_seq_add_seq (pre_p, seq);
if (ret == GS_ERROR)
; /* Do nothing. */
else if (have_then_clause_p || have_else_clause_p)
ret = GS_ALL_DONE;
else
{
/* Both arms are empty; replace the COND_EXPR with its predicate. */
expr = TREE_OPERAND (expr, 0);
gimplify_stmt (&expr, pre_p);
}
*expr_p = NULL;
return ret;
}
/* Prepare the node pointed to by EXPR_P, an is_gimple_addressable expression,
to be marked addressable.
We cannot rely on such an expression being directly markable if a temporary
has been created by the gimplification. In this case, we create another
temporary and initialize it with a copy, which will become a store after we
mark it addressable. This can happen if the front-end passed us something
that it could not mark addressable yet, like a Fortran pass-by-reference
parameter (int) floatvar. */
static void
prepare_gimple_addressable (tree *expr_p, gimple_seq *seq_p)
{
while (handled_component_p (*expr_p))
expr_p = &TREE_OPERAND (*expr_p, 0);
if (is_gimple_reg (*expr_p))
*expr_p = get_initialized_tmp_var (*expr_p, seq_p, NULL);
}
/* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with
a call to __builtin_memcpy. */
static enum gimplify_status
gimplify_modify_expr_to_memcpy (tree *expr_p, tree size, bool want_value,
gimple_seq *seq_p)
{
tree t, to, to_ptr, from, from_ptr;
gimple gs;
location_t loc = EXPR_LOCATION (*expr_p);
to = TREE_OPERAND (*expr_p, 0);
from = TREE_OPERAND (*expr_p, 1);
/* Mark the RHS addressable. Beware that it may not be possible to do so
directly if a temporary has been created by the gimplification. */
prepare_gimple_addressable (&from, seq_p);
mark_addressable (from);
from_ptr = build_fold_addr_expr_loc (loc, from);
gimplify_arg (&from_ptr, seq_p, loc);
mark_addressable (to);
to_ptr = build_fold_addr_expr_loc (loc, to);
gimplify_arg (&to_ptr, seq_p, loc);
t = builtin_decl_implicit (BUILT_IN_MEMCPY);
gs = gimple_build_call (t, 3, to_ptr, from_ptr, size);
if (want_value)
{
/* tmp = memcpy() */
t = create_tmp_var (TREE_TYPE (to_ptr), NULL);
gimple_call_set_lhs (gs, t);
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = build_simple_mem_ref (t);
return GS_ALL_DONE;
}
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = NULL;
return GS_ALL_DONE;
}
/* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with
a call to __builtin_memset. In this case we know that the RHS is
a CONSTRUCTOR with an empty element list. */
static enum gimplify_status
gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value,
gimple_seq *seq_p)
{
tree t, from, to, to_ptr;
gimple gs;
location_t loc = EXPR_LOCATION (*expr_p);
/* Assert our assumptions, to abort instead of producing wrong code
silently if they are not met. Beware that the RHS CONSTRUCTOR might
not be immediately exposed. */
from = TREE_OPERAND (*expr_p, 1);
if (TREE_CODE (from) == WITH_SIZE_EXPR)
from = TREE_OPERAND (from, 0);
gcc_assert (TREE_CODE (from) == CONSTRUCTOR
&& VEC_empty (constructor_elt, CONSTRUCTOR_ELTS (from)));
/* Now proceed. */
to = TREE_OPERAND (*expr_p, 0);
to_ptr = build_fold_addr_expr_loc (loc, to);
gimplify_arg (&to_ptr, seq_p, loc);
t = builtin_decl_implicit (BUILT_IN_MEMSET);
gs = gimple_build_call (t, 3, to_ptr, integer_zero_node, size);
if (want_value)
{
/* tmp = memset() */
t = create_tmp_var (TREE_TYPE (to_ptr), NULL);
gimple_call_set_lhs (gs, t);
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = build1 (INDIRECT_REF, TREE_TYPE (to), t);
return GS_ALL_DONE;
}
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = NULL;
return GS_ALL_DONE;
}
/* A subroutine of gimplify_init_ctor_preeval. Called via walk_tree,
determine, cautiously, if a CONSTRUCTOR overlaps the lhs of an
assignment. Return non-null if we detect a potential overlap. */
struct gimplify_init_ctor_preeval_data
{
/* The base decl of the lhs object. May be NULL, in which case we
have to assume the lhs is indirect. */
tree lhs_base_decl;
/* The alias set of the lhs object. */
alias_set_type lhs_alias_set;
};
static tree
gimplify_init_ctor_preeval_1 (tree *tp, int *walk_subtrees, void *xdata)
{
struct gimplify_init_ctor_preeval_data *data
= (struct gimplify_init_ctor_preeval_data *) xdata;
tree t = *tp;
/* If we find the base object, obviously we have overlap. */
if (data->lhs_base_decl == t)
return t;
/* If the constructor component is indirect, determine if we have a
potential overlap with the lhs. The only bits of information we
have to go on at this point are addressability and alias sets. */
if ((INDIRECT_REF_P (t)
|| TREE_CODE (t) == MEM_REF)
&& (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl))
&& alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (t)))
return t;
/* If the constructor component is a call, determine if it can hide a
potential overlap with the lhs through an INDIRECT_REF like above.
??? Ugh - this is completely broken. In fact this whole analysis
doesn't look conservative. */
if (TREE_CODE (t) == CALL_EXPR)
{
tree type, fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (t)));
for (type = TYPE_ARG_TYPES (fntype); type; type = TREE_CHAIN (type))
if (POINTER_TYPE_P (TREE_VALUE (type))
&& (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl))
&& alias_sets_conflict_p (data->lhs_alias_set,
get_alias_set
(TREE_TYPE (TREE_VALUE (type)))))
return t;
}
if (IS_TYPE_OR_DECL_P (t))
*walk_subtrees = 0;
return NULL;
}
/* A subroutine of gimplify_init_constructor. Pre-evaluate EXPR,
force values that overlap with the lhs (as described by *DATA)
into temporaries. */
static void
gimplify_init_ctor_preeval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
struct gimplify_init_ctor_preeval_data *data)
{
enum gimplify_status one;
/* If the value is constant, then there's nothing to pre-evaluate. */
if (TREE_CONSTANT (*expr_p))
{
/* Ensure it does not have side effects, it might contain a reference to
the object we're initializing. */
gcc_assert (!TREE_SIDE_EFFECTS (*expr_p));
return;
}
/* If the type has non-trivial constructors, we can't pre-evaluate. */
if (TREE_ADDRESSABLE (TREE_TYPE (*expr_p)))
return;
/* Recurse for nested constructors. */
if (TREE_CODE (*expr_p) == CONSTRUCTOR)
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (*expr_p);
FOR_EACH_VEC_ELT (constructor_elt, v, ix, ce)
gimplify_init_ctor_preeval (&ce->value, pre_p, post_p, data);
return;
}
/* If this is a variable sized type, we must remember the size. */
maybe_with_size_expr (expr_p);
/* Gimplify the constructor element to something appropriate for the rhs
of a MODIFY_EXPR. Given that we know the LHS is an aggregate, we know
the gimplifier will consider this a store to memory. Doing this
gimplification now means that we won't have to deal with complicated
language-specific trees, nor trees like SAVE_EXPR that can induce
exponential search behavior. */
one = gimplify_expr (expr_p, pre_p, post_p, is_gimple_mem_rhs, fb_rvalue);
if (one == GS_ERROR)
{
*expr_p = NULL;
return;
}
/* If we gimplified to a bare decl, we can be sure that it doesn't overlap
with the lhs, since "a = { .x=a }" doesn't make sense. This will
always be true for all scalars, since is_gimple_mem_rhs insists on a
temporary variable for them. */
if (DECL_P (*expr_p))
return;
/* If this is of variable size, we have no choice but to assume it doesn't
overlap since we can't make a temporary for it. */
if (TREE_CODE (TYPE_SIZE (TREE_TYPE (*expr_p))) != INTEGER_CST)
return;
/* Otherwise, we must search for overlap ... */
if (!walk_tree (expr_p, gimplify_init_ctor_preeval_1, data, NULL))
return;
/* ... and if found, force the value into a temporary. */
*expr_p = get_formal_tmp_var (*expr_p, pre_p);
}
/* A subroutine of gimplify_init_ctor_eval. Create a loop for
a RANGE_EXPR in a CONSTRUCTOR for an array.
var = lower;
loop_entry:
object[var] = value;
if (var == upper)
goto loop_exit;
var = var + 1;
goto loop_entry;
loop_exit:
We increment var _after_ the loop exit check because we might otherwise
fail if upper == TYPE_MAX_VALUE (type for upper).
Note that we never have to deal with SAVE_EXPRs here, because this has
already been taken care of for us, in gimplify_init_ctor_preeval(). */
static void gimplify_init_ctor_eval (tree, VEC(constructor_elt,gc) *,
gimple_seq *, bool);
static void
gimplify_init_ctor_eval_range (tree object, tree lower, tree upper,
tree value, tree array_elt_type,
gimple_seq *pre_p, bool cleared)
{
tree loop_entry_label, loop_exit_label, fall_thru_label;
tree var, var_type, cref, tmp;
loop_entry_label = create_artificial_label (UNKNOWN_LOCATION);
loop_exit_label = create_artificial_label (UNKNOWN_LOCATION);
fall_thru_label = create_artificial_label (UNKNOWN_LOCATION);
/* Create and initialize the index variable. */
var_type = TREE_TYPE (upper);
var = create_tmp_var (var_type, NULL);
gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, lower));
/* Add the loop entry label. */
gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_entry_label));
/* Build the reference. */
cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object),
var, NULL_TREE, NULL_TREE);
/* If we are a constructor, just call gimplify_init_ctor_eval to do
the store. Otherwise just assign value to the reference. */
if (TREE_CODE (value) == CONSTRUCTOR)
/* NB we might have to call ourself recursively through
gimplify_init_ctor_eval if the value is a constructor. */
gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value),
pre_p, cleared);
else
gimplify_seq_add_stmt (pre_p, gimple_build_assign (cref, value));
/* We exit the loop when the index var is equal to the upper bound. */
gimplify_seq_add_stmt (pre_p,
gimple_build_cond (EQ_EXPR, var, upper,
loop_exit_label, fall_thru_label));
gimplify_seq_add_stmt (pre_p, gimple_build_label (fall_thru_label));
/* Otherwise, increment the index var... */
tmp = build2 (PLUS_EXPR, var_type, var,
fold_convert (var_type, integer_one_node));
gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, tmp));
/* ...and jump back to the loop entry. */
gimplify_seq_add_stmt (pre_p, gimple_build_goto (loop_entry_label));
/* Add the loop exit label. */
gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_exit_label));
}
/* Return true if FDECL is accessing a field that is zero sized. */
static bool
zero_sized_field_decl (const_tree fdecl)
{
if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl)
&& integer_zerop (DECL_SIZE (fdecl)))
return true;
return false;
}
/* Return true if TYPE is zero sized. */
static bool
zero_sized_type (const_tree type)
{
if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type)
&& integer_zerop (TYPE_SIZE (type)))
return true;
return false;
}
/* A subroutine of gimplify_init_constructor. Generate individual
MODIFY_EXPRs for a CONSTRUCTOR. OBJECT is the LHS against which the
assignments should happen. ELTS is the CONSTRUCTOR_ELTS of the
CONSTRUCTOR. CLEARED is true if the entire LHS object has been
zeroed first. */
static void
gimplify_init_ctor_eval (tree object, VEC(constructor_elt,gc) *elts,
gimple_seq *pre_p, bool cleared)
{
tree array_elt_type = NULL;
unsigned HOST_WIDE_INT ix;
tree purpose, value;
if (TREE_CODE (TREE_TYPE (object)) == ARRAY_TYPE)
array_elt_type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (object)));
FOR_EACH_CONSTRUCTOR_ELT (elts, ix, purpose, value)
{
tree cref;
/* NULL values are created above for gimplification errors. */
if (value == NULL)
continue;
if (cleared && initializer_zerop (value))
continue;
/* ??? Here's to hoping the front end fills in all of the indices,
so we don't have to figure out what's missing ourselves. */
gcc_assert (purpose);
/* Skip zero-sized fields, unless value has side-effects. This can
happen with calls to functions returning a zero-sized type, which
we shouldn't discard. As a number of downstream passes don't
expect sets of zero-sized fields, we rely on the gimplification of
the MODIFY_EXPR we make below to drop the assignment statement. */
if (! TREE_SIDE_EFFECTS (value) && zero_sized_field_decl (purpose))
continue;
/* If we have a RANGE_EXPR, we have to build a loop to assign the
whole range. */
if (TREE_CODE (purpose) == RANGE_EXPR)
{
tree lower = TREE_OPERAND (purpose, 0);
tree upper = TREE_OPERAND (purpose, 1);
/* If the lower bound is equal to upper, just treat it as if
upper was the index. */
if (simple_cst_equal (lower, upper))
purpose = upper;
else
{
gimplify_init_ctor_eval_range (object, lower, upper, value,
array_elt_type, pre_p, cleared);
continue;
}
}
if (array_elt_type)
{
/* Do not use bitsizetype for ARRAY_REF indices. */
if (TYPE_DOMAIN (TREE_TYPE (object)))
purpose
= fold_convert (TREE_TYPE (TYPE_DOMAIN (TREE_TYPE (object))),
purpose);
cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object),
purpose, NULL_TREE, NULL_TREE);
}
else
{
gcc_assert (TREE_CODE (purpose) == FIELD_DECL);
cref = build3 (COMPONENT_REF, TREE_TYPE (purpose),
unshare_expr (object), purpose, NULL_TREE);
}
if (TREE_CODE (value) == CONSTRUCTOR
&& TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE)
gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value),
pre_p, cleared);
else
{
tree init = build2 (INIT_EXPR, TREE_TYPE (cref), cref, value);
gimplify_and_add (init, pre_p);
ggc_free (init);
}
}
}
/* Return the appropriate RHS predicate for this LHS. */
gimple_predicate
rhs_predicate_for (tree lhs)
{
if (is_gimple_reg (lhs))
return is_gimple_reg_rhs_or_call;
else
return is_gimple_mem_rhs_or_call;
}
/* Gimplify a C99 compound literal expression. This just means adding
the DECL_EXPR before the current statement and using its anonymous
decl instead. */
static enum gimplify_status
gimplify_compound_literal_expr (tree *expr_p, gimple_seq *pre_p)
{
tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (*expr_p);
tree decl = DECL_EXPR_DECL (decl_s);
/* Mark the decl as addressable if the compound literal
expression is addressable now, otherwise it is marked too late
after we gimplify the initialization expression. */
if (TREE_ADDRESSABLE (*expr_p))
TREE_ADDRESSABLE (decl) = 1;
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (decl)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (decl)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (decl)
&& !needs_to_live_in_memory (decl))
DECL_GIMPLE_REG_P (decl) = 1;
/* This decl isn't mentioned in the enclosing block, so add it to the
list of temps. FIXME it seems a bit of a kludge to say that
anonymous artificial vars aren't pushed, but everything else is. */
if (DECL_NAME (decl) == NULL_TREE && !DECL_SEEN_IN_BIND_EXPR_P (decl))
gimple_add_tmp_var (decl);
gimplify_and_add (decl_s, pre_p);
*expr_p = decl;
return GS_OK;
}
/* Optimize embedded COMPOUND_LITERAL_EXPRs within a CONSTRUCTOR,
return a new CONSTRUCTOR if something changed. */
static tree
optimize_compound_literals_in_ctor (tree orig_ctor)
{
tree ctor = orig_ctor;
VEC(constructor_elt,gc) *elts = CONSTRUCTOR_ELTS (ctor);
unsigned int idx, num = VEC_length (constructor_elt, elts);
for (idx = 0; idx < num; idx++)
{
tree value = VEC_index (constructor_elt, elts, idx)->value;
tree newval = value;
if (TREE_CODE (value) == CONSTRUCTOR)
newval = optimize_compound_literals_in_ctor (value);
else if (TREE_CODE (value) == COMPOUND_LITERAL_EXPR)
{
tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (value);
tree decl = DECL_EXPR_DECL (decl_s);
tree init = DECL_INITIAL (decl);
if (!TREE_ADDRESSABLE (value)
&& !TREE_ADDRESSABLE (decl)
&& init
&& TREE_CODE (init) == CONSTRUCTOR)
newval = optimize_compound_literals_in_ctor (init);
}
if (newval == value)
continue;
if (ctor == orig_ctor)
{
ctor = copy_node (orig_ctor);
CONSTRUCTOR_ELTS (ctor) = VEC_copy (constructor_elt, gc, elts);
elts = CONSTRUCTOR_ELTS (ctor);
}
VEC_index (constructor_elt, elts, idx)->value = newval;
}
return ctor;
}
/* A subroutine of gimplify_modify_expr. Break out elements of a
CONSTRUCTOR used as an initializer into separate MODIFY_EXPRs.
Note that we still need to clear any elements that don't have explicit
initializers, so if not all elements are initialized we keep the
original MODIFY_EXPR, we just remove all of the constructor elements.
If NOTIFY_TEMP_CREATION is true, do not gimplify, just return
GS_ERROR if we would have to create a temporary when gimplifying
this constructor. Otherwise, return GS_OK.
If NOTIFY_TEMP_CREATION is false, just do the gimplification. */
static enum gimplify_status
gimplify_init_constructor (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value, bool notify_temp_creation)
{
tree object, ctor, type;
enum gimplify_status ret;
VEC(constructor_elt,gc) *elts;
gcc_assert (TREE_CODE (TREE_OPERAND (*expr_p, 1)) == CONSTRUCTOR);
if (!notify_temp_creation)
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
}
object = TREE_OPERAND (*expr_p, 0);
ctor = TREE_OPERAND (*expr_p, 1) =
optimize_compound_literals_in_ctor (TREE_OPERAND (*expr_p, 1));
type = TREE_TYPE (ctor);
elts = CONSTRUCTOR_ELTS (ctor);
ret = GS_ALL_DONE;
switch (TREE_CODE (type))
{
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
case ARRAY_TYPE:
{
struct gimplify_init_ctor_preeval_data preeval_data;
HOST_WIDE_INT num_ctor_elements, num_nonzero_elements;
bool cleared, complete_p, valid_const_initializer;
/* Aggregate types must lower constructors to initialization of
individual elements. The exception is that a CONSTRUCTOR node
with no elements indicates zero-initialization of the whole. */
if (VEC_empty (constructor_elt, elts))
{
if (notify_temp_creation)
return GS_OK;
break;
}
/* Fetch information about the constructor to direct later processing.
We might want to make static versions of it in various cases, and
can only do so if it known to be a valid constant initializer. */
valid_const_initializer
= categorize_ctor_elements (ctor, &num_nonzero_elements,
&num_ctor_elements, &complete_p);
/* If a const aggregate variable is being initialized, then it
should never be a lose to promote the variable to be static. */
if (valid_const_initializer
&& num_nonzero_elements > 1
&& TREE_READONLY (object)
&& TREE_CODE (object) == VAR_DECL
&& (flag_merge_constants >= 2 || !TREE_ADDRESSABLE (object)))
{
if (notify_temp_creation)
return GS_ERROR;
DECL_INITIAL (object) = ctor;
TREE_STATIC (object) = 1;
if (!DECL_NAME (object))
DECL_NAME (object) = create_tmp_var_name ("C");
walk_tree (&DECL_INITIAL (object), force_labels_r, NULL, NULL);
/* ??? C++ doesn't automatically append a .<number> to the
assembler name, and even when it does, it looks a FE private
data structures to figure out what that number should be,
which are not set for this variable. I suppose this is
important for local statics for inline functions, which aren't
"local" in the object file sense. So in order to get a unique
TU-local symbol, we must invoke the lhd version now. */
lhd_set_decl_assembler_name (object);
*expr_p = NULL_TREE;
break;
}
/* If there are "lots" of initialized elements, even discounting
those that are not address constants (and thus *must* be
computed at runtime), then partition the constructor into
constant and non-constant parts. Block copy the constant
parts in, then generate code for the non-constant parts. */
/* TODO. There's code in cp/typeck.c to do this. */
if (int_size_in_bytes (TREE_TYPE (ctor)) < 0)
/* store_constructor will ignore the clearing of variable-sized
objects. Initializers for such objects must explicitly set
every field that needs to be set. */
cleared = false;
else if (!complete_p)
/* If the constructor isn't complete, clear the whole object
beforehand.
??? This ought not to be needed. For any element not present
in the initializer, we should simply set them to zero. Except
we'd need to *find* the elements that are not present, and that
requires trickery to avoid quadratic compile-time behavior in
large cases or excessive memory use in small cases. */
cleared = true;
else if (num_ctor_elements - num_nonzero_elements
> CLEAR_RATIO (optimize_function_for_speed_p (cfun))
&& num_nonzero_elements < num_ctor_elements / 4)
/* If there are "lots" of zeros, it's more efficient to clear
the memory and then set the nonzero elements. */
cleared = true;
else
cleared = false;
/* If there are "lots" of initialized elements, and all of them
are valid address constants, then the entire initializer can
be dropped to memory, and then memcpy'd out. Don't do this
for sparse arrays, though, as it's more efficient to follow
the standard CONSTRUCTOR behavior of memset followed by
individual element initialization. Also don't do this for small
all-zero initializers (which aren't big enough to merit
clearing), and don't try to make bitwise copies of
TREE_ADDRESSABLE types. */
if (valid_const_initializer
&& !(cleared || num_nonzero_elements == 0)
&& !TREE_ADDRESSABLE (type))
{
HOST_WIDE_INT size = int_size_in_bytes (type);
unsigned int align;
/* ??? We can still get unbounded array types, at least
from the C++ front end. This seems wrong, but attempt
to work around it for now. */
if (size < 0)
{
size = int_size_in_bytes (TREE_TYPE (object));
if (size >= 0)
TREE_TYPE (ctor) = type = TREE_TYPE (object);
}
/* Find the maximum alignment we can assume for the object. */
/* ??? Make use of DECL_OFFSET_ALIGN. */
if (DECL_P (object))
align = DECL_ALIGN (object);
else
align = TYPE_ALIGN (type);
if (size > 0
&& num_nonzero_elements > 1
&& !can_move_by_pieces (size, align))
{
if (notify_temp_creation)
return GS_ERROR;
walk_tree (&ctor, force_labels_r, NULL, NULL);
ctor = tree_output_constant_def (ctor);
if (!useless_type_conversion_p (type, TREE_TYPE (ctor)))
ctor = build1 (VIEW_CONVERT_EXPR, type, ctor);
TREE_OPERAND (*expr_p, 1) = ctor;
/* This is no longer an assignment of a CONSTRUCTOR, but
we still may have processing to do on the LHS. So
pretend we didn't do anything here to let that happen. */
return GS_UNHANDLED;
}
}
/* If the target is volatile, we have non-zero elements and more than
one field to assign, initialize the target from a temporary. */
if (TREE_THIS_VOLATILE (object)
&& !TREE_ADDRESSABLE (type)
&& num_nonzero_elements > 0
&& VEC_length (constructor_elt, elts) > 1)
{
tree temp = create_tmp_var (TYPE_MAIN_VARIANT (type), NULL);
TREE_OPERAND (*expr_p, 0) = temp;
*expr_p = build2 (COMPOUND_EXPR, TREE_TYPE (*expr_p),
*expr_p,
build2 (MODIFY_EXPR, void_type_node,
object, temp));
return GS_OK;
}
if (notify_temp_creation)
return GS_OK;
/* If there are nonzero elements and if needed, pre-evaluate to capture
elements overlapping with the lhs into temporaries. We must do this
before clearing to fetch the values before they are zeroed-out. */
if (num_nonzero_elements > 0 && TREE_CODE (*expr_p) != INIT_EXPR)
{
preeval_data.lhs_base_decl = get_base_address (object);
if (!DECL_P (preeval_data.lhs_base_decl))
preeval_data.lhs_base_decl = NULL;
preeval_data.lhs_alias_set = get_alias_set (object);
gimplify_init_ctor_preeval (&TREE_OPERAND (*expr_p, 1),
pre_p, post_p, &preeval_data);
}
if (cleared)
{
/* Zap the CONSTRUCTOR element list, which simplifies this case.
Note that we still have to gimplify, in order to handle the
case of variable sized types. Avoid shared tree structures. */
CONSTRUCTOR_ELTS (ctor) = NULL;
TREE_SIDE_EFFECTS (ctor) = 0;
object = unshare_expr (object);
gimplify_stmt (expr_p, pre_p);
}
/* If we have not block cleared the object, or if there are nonzero
elements in the constructor, add assignments to the individual
scalar fields of the object. */
if (!cleared || num_nonzero_elements > 0)
gimplify_init_ctor_eval (object, elts, pre_p, cleared);
*expr_p = NULL_TREE;
}
break;
case COMPLEX_TYPE:
{
tree r, i;
if (notify_temp_creation)
return GS_OK;
/* Extract the real and imaginary parts out of the ctor. */
gcc_assert (VEC_length (constructor_elt, elts) == 2);
r = VEC_index (constructor_elt, elts, 0)->value;
i = VEC_index (constructor_elt, elts, 1)->value;
if (r == NULL || i == NULL)
{
tree zero = build_zero_cst (TREE_TYPE (type));
if (r == NULL)
r = zero;
if (i == NULL)
i = zero;
}
/* Complex types have either COMPLEX_CST or COMPLEX_EXPR to
represent creation of a complex value. */
if (TREE_CONSTANT (r) && TREE_CONSTANT (i))
{
ctor = build_complex (type, r, i);
TREE_OPERAND (*expr_p, 1) = ctor;
}
else
{
ctor = build2 (COMPLEX_EXPR, type, r, i);
TREE_OPERAND (*expr_p, 1) = ctor;
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 1),
pre_p,
post_p,
rhs_predicate_for (TREE_OPERAND (*expr_p, 0)),
fb_rvalue);
}
}
break;
case VECTOR_TYPE:
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
if (notify_temp_creation)
return GS_OK;
/* Go ahead and simplify constant constructors to VECTOR_CST. */
if (TREE_CONSTANT (ctor))
{
bool constant_p = true;
tree value;
/* Even when ctor is constant, it might contain non-*_CST
elements, such as addresses or trapping values like
1.0/0.0 - 1.0/0.0. Such expressions don't belong
in VECTOR_CST nodes. */
FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value)
if (!CONSTANT_CLASS_P (value))
{
constant_p = false;
break;
}
if (constant_p)
{
TREE_OPERAND (*expr_p, 1) = build_vector_from_ctor (type, elts);
break;
}
/* Don't reduce an initializer constant even if we can't
make a VECTOR_CST. It won't do anything for us, and it'll
prevent us from representing it as a single constant. */
if (initializer_constant_valid_p (ctor, type))
break;
TREE_CONSTANT (ctor) = 0;
}
/* Vector types use CONSTRUCTOR all the way through gimple
compilation as a general initializer. */
FOR_EACH_VEC_ELT (constructor_elt, elts, ix, ce)
{
enum gimplify_status tret;
tret = gimplify_expr (&ce->value, pre_p, post_p, is_gimple_val,
fb_rvalue);
if (tret == GS_ERROR)
ret = GS_ERROR;
}
if (!is_gimple_reg (TREE_OPERAND (*expr_p, 0)))
TREE_OPERAND (*expr_p, 1) = get_formal_tmp_var (ctor, pre_p);
}
break;
default:
/* So how did we get a CONSTRUCTOR for a scalar type? */
gcc_unreachable ();
}
if (ret == GS_ERROR)
return GS_ERROR;
else if (want_value)
{
*expr_p = object;
return GS_OK;
}
else
{
/* If we have gimplified both sides of the initializer but have
not emitted an assignment, do so now. */
if (*expr_p)
{
tree lhs = TREE_OPERAND (*expr_p, 0);
tree rhs = TREE_OPERAND (*expr_p, 1);
gimple init = gimple_build_assign (lhs, rhs);
gimplify_seq_add_stmt (pre_p, init);
*expr_p = NULL;
}
return GS_ALL_DONE;
}
}
/* Given a pointer value OP0, return a simplified version of an
indirection through OP0, or NULL_TREE if no simplification is
possible. Note that the resulting type may be different from
the type pointed to in the sense that it is still compatible
from the langhooks point of view. */
tree
gimple_fold_indirect_ref (tree t)
{
tree ptype = TREE_TYPE (t), type = TREE_TYPE (ptype);
tree sub = t;
tree subtype;
STRIP_NOPS (sub);
subtype = TREE_TYPE (sub);
if (!POINTER_TYPE_P (subtype))
return NULL_TREE;
if (TREE_CODE (sub) == ADDR_EXPR)
{
tree op = TREE_OPERAND (sub, 0);
tree optype = TREE_TYPE (op);
/* *&p => p */
if (useless_type_conversion_p (type, optype))
return op;
/* *(foo *)&fooarray => fooarray[0] */
if (TREE_CODE (optype) == ARRAY_TYPE
&& TREE_CODE (TYPE_SIZE (TREE_TYPE (optype))) == INTEGER_CST
&& useless_type_conversion_p (type, TREE_TYPE (optype)))
{
tree type_domain = TYPE_DOMAIN (optype);
tree min_val = size_zero_node;
if (type_domain && TYPE_MIN_VALUE (type_domain))
min_val = TYPE_MIN_VALUE (type_domain);
if (TREE_CODE (min_val) == INTEGER_CST)
return build4 (ARRAY_REF, type, op, min_val, NULL_TREE, NULL_TREE);
}
/* *(foo *)&complexfoo => __real__ complexfoo */
else if (TREE_CODE (optype) == COMPLEX_TYPE
&& useless_type_conversion_p (type, TREE_TYPE (optype)))
return fold_build1 (REALPART_EXPR, type, op);
/* *(foo *)&vectorfoo => BIT_FIELD_REF<vectorfoo,...> */
else if (TREE_CODE (optype) == VECTOR_TYPE
&& useless_type_conversion_p (type, TREE_TYPE (optype)))
{
tree part_width = TYPE_SIZE (type);
tree index = bitsize_int (0);
return fold_build3 (BIT_FIELD_REF, type, op, part_width, index);
}
}
/* *(p + CST) -> ... */
if (TREE_CODE (sub) == POINTER_PLUS_EXPR
&& TREE_CODE (TREE_OPERAND (sub, 1)) == INTEGER_CST)
{
tree addr = TREE_OPERAND (sub, 0);
tree off = TREE_OPERAND (sub, 1);
tree addrtype;
STRIP_NOPS (addr);
addrtype = TREE_TYPE (addr);
/* ((foo*)&vectorfoo)[1] -> BIT_FIELD_REF<vectorfoo,...> */
if (TREE_CODE (addr) == ADDR_EXPR
&& TREE_CODE (TREE_TYPE (addrtype)) == VECTOR_TYPE
&& useless_type_conversion_p (type, TREE_TYPE (TREE_TYPE (addrtype)))
&& host_integerp (off, 1))
{
unsigned HOST_WIDE_INT offset = tree_low_cst (off, 1);
tree part_width = TYPE_SIZE (type);
unsigned HOST_WIDE_INT part_widthi
= tree_low_cst (part_width, 0) / BITS_PER_UNIT;
unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT;
tree index = bitsize_int (indexi);
if (offset / part_widthi
<= TYPE_VECTOR_SUBPARTS (TREE_TYPE (addrtype)))
return fold_build3 (BIT_FIELD_REF, type, TREE_OPERAND (addr, 0),
part_width, index);
}
/* ((foo*)&complexfoo)[1] -> __imag__ complexfoo */
if (TREE_CODE (addr) == ADDR_EXPR
&& TREE_CODE (TREE_TYPE (addrtype)) == COMPLEX_TYPE
&& useless_type_conversion_p (type, TREE_TYPE (TREE_TYPE (addrtype))))
{
tree size = TYPE_SIZE_UNIT (type);
if (tree_int_cst_equal (size, off))
return fold_build1 (IMAGPART_EXPR, type, TREE_OPERAND (addr, 0));
}
/* *(p + CST) -> MEM_REF <p, CST>. */
if (TREE_CODE (addr) != ADDR_EXPR
|| DECL_P (TREE_OPERAND (addr, 0)))
return fold_build2 (MEM_REF, type,
addr,
build_int_cst_wide (ptype,
TREE_INT_CST_LOW (off),
TREE_INT_CST_HIGH (off)));
}
/* *(foo *)fooarrptr => (*fooarrptr)[0] */
if (TREE_CODE (TREE_TYPE (subtype)) == ARRAY_TYPE
&& TREE_CODE (TYPE_SIZE (TREE_TYPE (TREE_TYPE (subtype)))) == INTEGER_CST
&& useless_type_conversion_p (type, TREE_TYPE (TREE_TYPE (subtype))))
{
tree type_domain;
tree min_val = size_zero_node;
tree osub = sub;
sub = gimple_fold_indirect_ref (sub);
if (! sub)
sub = build1 (INDIRECT_REF, TREE_TYPE (subtype), osub);
type_domain = TYPE_DOMAIN (TREE_TYPE (sub));
if (type_domain && TYPE_MIN_VALUE (type_domain))
min_val = TYPE_MIN_VALUE (type_domain);
if (TREE_CODE (min_val) == INTEGER_CST)
return build4 (ARRAY_REF, type, sub, min_val, NULL_TREE, NULL_TREE);
}
return NULL_TREE;
}
/* Given a pointer value OP0, return a simplified version of an
indirection through OP0, or NULL_TREE if no simplification is
possible. This may only be applied to a rhs of an expression.
Note that the resulting type may be different from the type pointed
to in the sense that it is still compatible from the langhooks
point of view. */
static tree
gimple_fold_indirect_ref_rhs (tree t)
{
return gimple_fold_indirect_ref (t);
}
/* Subroutine of gimplify_modify_expr to do simplifications of
MODIFY_EXPRs based on the code of the RHS. We loop for as long as
something changes. */
static enum gimplify_status
gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p,
gimple_seq *pre_p, gimple_seq *post_p,
bool want_value)
{
enum gimplify_status ret = GS_UNHANDLED;
bool changed;
do
{
changed = false;
switch (TREE_CODE (*from_p))
{
case VAR_DECL:
/* If we're assigning from a read-only variable initialized with
a constructor, do the direct assignment from the constructor,
but only if neither source nor target are volatile since this
latter assignment might end up being done on a per-field basis. */
if (DECL_INITIAL (*from_p)
&& TREE_READONLY (*from_p)
&& !TREE_THIS_VOLATILE (*from_p)
&& !TREE_THIS_VOLATILE (*to_p)
&& TREE_CODE (DECL_INITIAL (*from_p)) == CONSTRUCTOR)
{
tree old_from = *from_p;
enum gimplify_status subret;
/* Move the constructor into the RHS. */
*from_p = unshare_expr (DECL_INITIAL (*from_p));
/* Let's see if gimplify_init_constructor will need to put
it in memory. */
subret = gimplify_init_constructor (expr_p, NULL, NULL,
false, true);
if (subret == GS_ERROR)
{
/* If so, revert the change. */
*from_p = old_from;
}
else
{
ret = GS_OK;
changed = true;
}
}
break;
case INDIRECT_REF:
{
/* If we have code like
*(const A*)(A*)&x
where the type of "x" is a (possibly cv-qualified variant
of "A"), treat the entire expression as identical to "x".
This kind of code arises in C++ when an object is bound
to a const reference, and if "x" is a TARGET_EXPR we want
to take advantage of the optimization below. */
bool volatile_p = TREE_THIS_VOLATILE (*from_p);
tree t = gimple_fold_indirect_ref_rhs (TREE_OPERAND (*from_p, 0));
if (t)
{
if (TREE_THIS_VOLATILE (t) != volatile_p)
{
if (TREE_CODE_CLASS (TREE_CODE (t)) == tcc_declaration)
t = build_simple_mem_ref_loc (EXPR_LOCATION (*from_p),
build_fold_addr_expr (t));
if (REFERENCE_CLASS_P (t))
TREE_THIS_VOLATILE (t) = volatile_p;
}
*from_p = t;
ret = GS_OK;
changed = true;
}
break;
}
case TARGET_EXPR:
{
/* If we are initializing something from a TARGET_EXPR, strip the
TARGET_EXPR and initialize it directly, if possible. This can't
be done if the initializer is void, since that implies that the
temporary is set in some non-trivial way.
??? What about code that pulls out the temp and uses it
elsewhere? I think that such code never uses the TARGET_EXPR as
an initializer. If I'm wrong, we'll die because the temp won't
have any RTL. In that case, I guess we'll need to replace
references somehow. */
tree init = TARGET_EXPR_INITIAL (*from_p);
if (init
&& !VOID_TYPE_P (TREE_TYPE (init)))
{
*from_p = init;
ret = GS_OK;
changed = true;
}
}
break;
case COMPOUND_EXPR:
/* Remove any COMPOUND_EXPR in the RHS so the following cases will be
caught. */
gimplify_compound_expr (from_p, pre_p, true);
ret = GS_OK;
changed = true;
break;
case CONSTRUCTOR:
/* If we already made some changes, let the front end have a
crack at this before we break it down. */
if (ret != GS_UNHANDLED)
break;
/* If we're initializing from a CONSTRUCTOR, break this into
individual MODIFY_EXPRs. */
return gimplify_init_constructor (expr_p, pre_p, post_p, want_value,
false);
case COND_EXPR:
/* If we're assigning to a non-register type, push the assignment
down into the branches. This is mandatory for ADDRESSABLE types,
since we cannot generate temporaries for such, but it saves a
copy in other cases as well. */
if (!is_gimple_reg_type (TREE_TYPE (*from_p)))
{
/* This code should mirror the code in gimplify_cond_expr. */
enum tree_code code = TREE_CODE (*expr_p);
tree cond = *from_p;
tree result = *to_p;
ret = gimplify_expr (&result, pre_p, post_p,
is_gimple_lvalue, fb_lvalue);
if (ret != GS_ERROR)
ret = GS_OK;
if (TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node)
TREE_OPERAND (cond, 1)
= build2 (code, void_type_node, result,
TREE_OPERAND (cond, 1));
if (TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node)
TREE_OPERAND (cond, 2)
= build2 (code, void_type_node, unshare_expr (result),
TREE_OPERAND (cond, 2));
TREE_TYPE (cond) = void_type_node;
recalculate_side_effects (cond);
if (want_value)
{
gimplify_and_add (cond, pre_p);
*expr_p = unshare_expr (result);
}
else
*expr_p = cond;
return ret;
}
break;
case CALL_EXPR:
/* For calls that return in memory, give *to_p as the CALL_EXPR's
return slot so that we don't generate a temporary. */
if (!CALL_EXPR_RETURN_SLOT_OPT (*from_p)
&& aggregate_value_p (*from_p, *from_p))
{
bool use_target;
if (!(rhs_predicate_for (*to_p))(*from_p))
/* If we need a temporary, *to_p isn't accurate. */
use_target = false;
/* It's OK to use the return slot directly unless it's an NRV. */
else if (TREE_CODE (*to_p) == RESULT_DECL
&& DECL_NAME (*to_p) == NULL_TREE
&& needs_to_live_in_memory (*to_p))
use_target = true;
else if (is_gimple_reg_type (TREE_TYPE (*to_p))
|| (DECL_P (*to_p) && DECL_REGISTER (*to_p)))
/* Don't force regs into memory. */
use_target = false;
else if (TREE_CODE (*expr_p) == INIT_EXPR)
/* It's OK to use the target directly if it's being
initialized. */
use_target = true;
else if (variably_modified_type_p (TREE_TYPE (*to_p), NULL_TREE))
/* Always use the target and thus RSO for variable-sized types.
GIMPLE cannot deal with a variable-sized assignment
embedded in a call statement. */
use_target = true;
else if (TREE_CODE (*to_p) != SSA_NAME
&& (!is_gimple_variable (*to_p)
|| needs_to_live_in_memory (*to_p)))
/* Don't use the original target if it's already addressable;
if its address escapes, and the called function uses the
NRV optimization, a conforming program could see *to_p
change before the called function returns; see c++/19317.
When optimizing, the return_slot pass marks more functions
as safe after we have escape info. */
use_target = false;
else
use_target = true;
if (use_target)
{
CALL_EXPR_RETURN_SLOT_OPT (*from_p) = 1;
mark_addressable (*to_p);
}
}
break;
case WITH_SIZE_EXPR:
/* Likewise for calls that return an aggregate of non-constant size,
since we would not be able to generate a temporary at all. */
if (TREE_CODE (TREE_OPERAND (*from_p, 0)) == CALL_EXPR)
{
*from_p = TREE_OPERAND (*from_p, 0);
/* We don't change ret in this case because the
WITH_SIZE_EXPR might have been added in
gimplify_modify_expr, so returning GS_OK would lead to an
infinite loop. */
changed = true;
}
break;
/* If we're initializing from a container, push the initialization
inside it. */
case CLEANUP_POINT_EXPR:
case BIND_EXPR:
case STATEMENT_LIST:
{
tree wrap = *from_p;
tree t;
ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_min_lval,
fb_lvalue);
if (ret != GS_ERROR)
ret = GS_OK;
t = voidify_wrapper_expr (wrap, *expr_p);
gcc_assert (t == *expr_p);
if (want_value)
{
gimplify_and_add (wrap, pre_p);
*expr_p = unshare_expr (*to_p);
}
else
*expr_p = wrap;
return GS_OK;
}
case COMPOUND_LITERAL_EXPR:
{
tree complit = TREE_OPERAND (*expr_p, 1);
tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (complit);
tree decl = DECL_EXPR_DECL (decl_s);
tree init = DECL_INITIAL (decl);
/* struct T x = (struct T) { 0, 1, 2 } can be optimized
into struct T x = { 0, 1, 2 } if the address of the
compound literal has never been taken. */
if (!TREE_ADDRESSABLE (complit)
&& !TREE_ADDRESSABLE (decl)
&& init)
{
*expr_p = copy_node (*expr_p);
TREE_OPERAND (*expr_p, 1) = init;
return GS_OK;
}
}
default:
break;
}
}
while (changed);
return ret;
}
/* Promote partial stores to COMPLEX variables to total stores. *EXPR_P is
a MODIFY_EXPR with a lhs of a REAL/IMAGPART_EXPR of a variable with
DECL_GIMPLE_REG_P set.
IMPORTANT NOTE: This promotion is performed by introducing a load of the
other, unmodified part of the complex object just before the total store.
As a consequence, if the object is still uninitialized, an undefined value
will be loaded into a register, which may result in a spurious exception
if the register is floating-point and the value happens to be a signaling
NaN for example. Then the fully-fledged complex operations lowering pass
followed by a DCE pass are necessary in order to fix things up. */
static enum gimplify_status
gimplify_modify_expr_complex_part (tree *expr_p, gimple_seq *pre_p,
bool want_value)
{
enum tree_code code, ocode;
tree lhs, rhs, new_rhs, other, realpart, imagpart;
lhs = TREE_OPERAND (*expr_p, 0);
rhs = TREE_OPERAND (*expr_p, 1);
code = TREE_CODE (lhs);
lhs = TREE_OPERAND (lhs, 0);
ocode = code == REALPART_EXPR ? IMAGPART_EXPR : REALPART_EXPR;
other = build1 (ocode, TREE_TYPE (rhs), lhs);
TREE_NO_WARNING (other) = 1;
other = get_formal_tmp_var (other, pre_p);
realpart = code == REALPART_EXPR ? rhs : other;
imagpart = code == REALPART_EXPR ? other : rhs;
if (TREE_CONSTANT (realpart) && TREE_CONSTANT (imagpart))
new_rhs = build_complex (TREE_TYPE (lhs), realpart, imagpart);
else
new_rhs = build2 (COMPLEX_EXPR, TREE_TYPE (lhs), realpart, imagpart);
gimplify_seq_add_stmt (pre_p, gimple_build_assign (lhs, new_rhs));
*expr_p = (want_value) ? rhs : NULL_TREE;
return GS_ALL_DONE;
}
/* Gimplify the MODIFY_EXPR node pointed to by EXPR_P.
modify_expr
: varname '=' rhs
| '*' ID '=' rhs
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored.
WANT_VALUE is nonzero iff we want to use the value of this expression
in another expression. */
static enum gimplify_status
gimplify_modify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value)
{
tree *from_p = &TREE_OPERAND (*expr_p, 1);
tree *to_p = &TREE_OPERAND (*expr_p, 0);
enum gimplify_status ret = GS_UNHANDLED;
gimple assign;
location_t loc = EXPR_LOCATION (*expr_p);
gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR
|| TREE_CODE (*expr_p) == INIT_EXPR);
/* Trying to simplify a clobber using normal logic doesn't work,
so handle it here. */
if (TREE_CLOBBER_P (*from_p))
{
gcc_assert (!want_value && TREE_CODE (*to_p) == VAR_DECL);
gimplify_seq_add_stmt (pre_p, gimple_build_assign (*to_p, *from_p));
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Insert pointer conversions required by the middle-end that are not
required by the frontend. This fixes middle-end type checking for
for example gcc.dg/redecl-6.c. */
if (POINTER_TYPE_P (TREE_TYPE (*to_p)))
{
STRIP_USELESS_TYPE_CONVERSION (*from_p);
if (!useless_type_conversion_p (TREE_TYPE (*to_p), TREE_TYPE (*from_p)))
*from_p = fold_convert_loc (loc, TREE_TYPE (*to_p), *from_p);
}
/* See if any simplifications can be done based on what the RHS is. */
ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
want_value);
if (ret != GS_UNHANDLED)
return ret;
/* For zero sized types only gimplify the left hand side and right hand
side as statements and throw away the assignment. Do this after
gimplify_modify_expr_rhs so we handle TARGET_EXPRs of addressable
types properly. */
if (zero_sized_type (TREE_TYPE (*from_p)) && !want_value)
{
gimplify_stmt (from_p, pre_p);
gimplify_stmt (to_p, pre_p);
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* If the value being copied is of variable width, compute the length
of the copy into a WITH_SIZE_EXPR. Note that we need to do this
before gimplifying any of the operands so that we can resolve any
PLACEHOLDER_EXPRs in the size. Also note that the RTL expander uses
the size of the expression to be copied, not of the destination, so
that is what we must do here. */
maybe_with_size_expr (from_p);
ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
/* As a special case, we have to temporarily allow for assignments
with a CALL_EXPR on the RHS. Since in GIMPLE a function call is
a toplevel statement, when gimplifying the GENERIC expression
MODIFY_EXPR <a, CALL_EXPR <foo>>, we cannot create the tuple
GIMPLE_ASSIGN <a, GIMPLE_CALL <foo>>.
Instead, we need to create the tuple GIMPLE_CALL <a, foo>. To
prevent gimplify_expr from trying to create a new temporary for
foo's LHS, we tell it that it should only gimplify until it
reaches the CALL_EXPR. On return from gimplify_expr, the newly
created GIMPLE_CALL <foo> will be the last statement in *PRE_P
and all we need to do here is set 'a' to be its LHS. */
ret = gimplify_expr (from_p, pre_p, post_p, rhs_predicate_for (*to_p),
fb_rvalue);
if (ret == GS_ERROR)
return ret;
/* Now see if the above changed *from_p to something we handle specially. */
ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
want_value);
if (ret != GS_UNHANDLED)
return ret;
/* If we've got a variable sized assignment between two lvalues (i.e. does
not involve a call), then we can make things a bit more straightforward
by converting the assignment to memcpy or memset. */
if (TREE_CODE (*from_p) == WITH_SIZE_EXPR)
{
tree from = TREE_OPERAND (*from_p, 0);
tree size = TREE_OPERAND (*from_p, 1);
if (TREE_CODE (from) == CONSTRUCTOR)
return gimplify_modify_expr_to_memset (expr_p, size, want_value, pre_p);
if (is_gimple_addressable (from))
{
*from_p = from;
return gimplify_modify_expr_to_memcpy (expr_p, size, want_value,
pre_p);
}
}
/* Transform partial stores to non-addressable complex variables into
total stores. This allows us to use real instead of virtual operands
for these variables, which improves optimization. */
if ((TREE_CODE (*to_p) == REALPART_EXPR
|| TREE_CODE (*to_p) == IMAGPART_EXPR)
&& is_gimple_reg (TREE_OPERAND (*to_p, 0)))
return gimplify_modify_expr_complex_part (expr_p, pre_p, want_value);
/* Try to alleviate the effects of the gimplification creating artificial
temporaries (see for example is_gimple_reg_rhs) on the debug info. */
if (!gimplify_ctxp->into_ssa
&& TREE_CODE (*from_p) == VAR_DECL
&& DECL_IGNORED_P (*from_p)
&& DECL_P (*to_p)
&& !DECL_IGNORED_P (*to_p))
{
if (!DECL_NAME (*from_p) && DECL_NAME (*to_p))
DECL_NAME (*from_p)
= create_tmp_var_name (IDENTIFIER_POINTER (DECL_NAME (*to_p)));
DECL_DEBUG_EXPR_IS_FROM (*from_p) = 1;
SET_DECL_DEBUG_EXPR (*from_p, *to_p);
}
if (want_value && TREE_THIS_VOLATILE (*to_p))
*from_p = get_initialized_tmp_var (*from_p, pre_p, post_p);
if (TREE_CODE (*from_p) == CALL_EXPR)
{
/* Since the RHS is a CALL_EXPR, we need to create a GIMPLE_CALL
instead of a GIMPLE_ASSIGN. */
tree fnptrtype = TREE_TYPE (CALL_EXPR_FN (*from_p));
CALL_EXPR_FN (*from_p) = TREE_OPERAND (CALL_EXPR_FN (*from_p), 0);
STRIP_USELESS_TYPE_CONVERSION (CALL_EXPR_FN (*from_p));
assign = gimple_build_call_from_tree (*from_p);
gimple_call_set_fntype (assign, TREE_TYPE (fnptrtype));
if (!gimple_call_noreturn_p (assign))
gimple_call_set_lhs (assign, *to_p);
}
else
{
assign = gimple_build_assign (*to_p, *from_p);
gimple_set_location (assign, EXPR_LOCATION (*expr_p));
}
gimplify_seq_add_stmt (pre_p, assign);
if (gimplify_ctxp->into_ssa && is_gimple_reg (*to_p))
{
/* If we've somehow already got an SSA_NAME on the LHS, then
we've probably modified it twice. Not good. */
gcc_assert (TREE_CODE (*to_p) != SSA_NAME);
*to_p = make_ssa_name (*to_p, assign);
gimple_set_lhs (assign, *to_p);
}
if (want_value)
{
*expr_p = TREE_THIS_VOLATILE (*to_p) ? *from_p : unshare_expr (*to_p);
return GS_OK;
}
else
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Gimplify a comparison between two variable-sized objects. Do this
with a call to BUILT_IN_MEMCMP. */
static enum gimplify_status
gimplify_variable_sized_compare (tree *expr_p)
{
location_t loc = EXPR_LOCATION (*expr_p);
tree op0 = TREE_OPERAND (*expr_p, 0);
tree op1 = TREE_OPERAND (*expr_p, 1);
tree t, arg, dest, src, expr;
arg = TYPE_SIZE_UNIT (TREE_TYPE (op0));
arg = unshare_expr (arg);
arg = SUBSTITUTE_PLACEHOLDER_IN_EXPR (arg, op0);
src = build_fold_addr_expr_loc (loc, op1);
dest = build_fold_addr_expr_loc (loc, op0);
t = builtin_decl_implicit (BUILT_IN_MEMCMP);
t = build_call_expr_loc (loc, t, 3, dest, src, arg);
expr
= build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), t, integer_zero_node);
SET_EXPR_LOCATION (expr, loc);
*expr_p = expr;
return GS_OK;
}
/* Gimplify a comparison between two aggregate objects of integral scalar
mode as a comparison between the bitwise equivalent scalar values. */
static enum gimplify_status
gimplify_scalar_mode_aggregate_compare (tree *expr_p)
{
location_t loc = EXPR_LOCATION (*expr_p);
tree op0 = TREE_OPERAND (*expr_p, 0);
tree op1 = TREE_OPERAND (*expr_p, 1);
tree type = TREE_TYPE (op0);
tree scalar_type = lang_hooks.types.type_for_mode (TYPE_MODE (type), 1);
op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, scalar_type, op0);
op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, scalar_type, op1);
*expr_p
= fold_build2_loc (loc, TREE_CODE (*expr_p), TREE_TYPE (*expr_p), op0, op1);
return GS_OK;
}
/* Gimplify an expression sequence. This function gimplifies each
expression and rewrites the original expression with the last
expression of the sequence in GIMPLE form.
PRE_P points to the list where the side effects for all the
expressions in the sequence will be emitted.
WANT_VALUE is true when the result of the last COMPOUND_EXPR is used. */
static enum gimplify_status
gimplify_compound_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
{
tree t = *expr_p;
do
{
tree *sub_p = &TREE_OPERAND (t, 0);
if (TREE_CODE (*sub_p) == COMPOUND_EXPR)
gimplify_compound_expr (sub_p, pre_p, false);
else
gimplify_stmt (sub_p, pre_p);
t = TREE_OPERAND (t, 1);
}
while (TREE_CODE (t) == COMPOUND_EXPR);
*expr_p = t;
if (want_value)
return GS_OK;
else
{
gimplify_stmt (expr_p, pre_p);
return GS_ALL_DONE;
}
}
/* Gimplify a SAVE_EXPR node. EXPR_P points to the expression to
gimplify. After gimplification, EXPR_P will point to a new temporary
that holds the original value of the SAVE_EXPR node.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_save_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
enum gimplify_status ret = GS_ALL_DONE;
tree val;
gcc_assert (TREE_CODE (*expr_p) == SAVE_EXPR);
val = TREE_OPERAND (*expr_p, 0);
/* If the SAVE_EXPR has not been resolved, then evaluate it once. */
if (!SAVE_EXPR_RESOLVED_P (*expr_p))
{
/* The operand may be a void-valued expression such as SAVE_EXPRs
generated by the Java frontend for class initialization. It is
being executed only for its side-effects. */
if (TREE_TYPE (val) == void_type_node)
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_stmt, fb_none);
val = NULL;
}
else
val = get_initialized_tmp_var (val, pre_p, post_p);
TREE_OPERAND (*expr_p, 0) = val;
SAVE_EXPR_RESOLVED_P (*expr_p) = 1;
}
*expr_p = val;
return ret;
}
/* Rewrite the ADDR_EXPR node pointed to by EXPR_P
unary_expr
: ...
| '&' varname
...
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_addr_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree expr = *expr_p;
tree op0 = TREE_OPERAND (expr, 0);
enum gimplify_status ret;
location_t loc = EXPR_LOCATION (*expr_p);
switch (TREE_CODE (op0))
{
case INDIRECT_REF:
do_indirect_ref:
/* Check if we are dealing with an expression of the form '&*ptr'.
While the front end folds away '&*ptr' into 'ptr', these
expressions may be generated internally by the compiler (e.g.,
builtins like __builtin_va_end). */
/* Caution: the silent array decomposition semantics we allow for
ADDR_EXPR means we can't always discard the pair. */
/* Gimplification of the ADDR_EXPR operand may drop
cv-qualification conversions, so make sure we add them if
needed. */
{
tree op00 = TREE_OPERAND (op0, 0);
tree t_expr = TREE_TYPE (expr);
tree t_op00 = TREE_TYPE (op00);
if (!useless_type_conversion_p (t_expr, t_op00))
op00 = fold_convert_loc (loc, TREE_TYPE (expr), op00);
*expr_p = op00;
ret = GS_OK;
}
break;
case VIEW_CONVERT_EXPR:
/* Take the address of our operand and then convert it to the type of
this ADDR_EXPR.
??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at
all clear. The impact of this transformation is even less clear. */
/* If the operand is a useless conversion, look through it. Doing so
guarantees that the ADDR_EXPR and its operand will remain of the
same type. */
if (tree_ssa_useless_type_conversion (TREE_OPERAND (op0, 0)))
op0 = TREE_OPERAND (op0, 0);
*expr_p = fold_convert_loc (loc, TREE_TYPE (expr),
build_fold_addr_expr_loc (loc,
TREE_OPERAND (op0, 0)));
ret = GS_OK;
break;
default:
/* We use fb_either here because the C frontend sometimes takes
the address of a call that returns a struct; see
gcc.dg/c99-array-lval-1.c. The gimplifier will correctly make
the implied temporary explicit. */
/* Make the operand addressable. */
ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, post_p,
is_gimple_addressable, fb_either);
if (ret == GS_ERROR)
break;
/* Then mark it. Beware that it may not be possible to do so directly
if a temporary has been created by the gimplification. */
prepare_gimple_addressable (&TREE_OPERAND (expr, 0), pre_p);
op0 = TREE_OPERAND (expr, 0);
/* For various reasons, the gimplification of the expression
may have made a new INDIRECT_REF. */
if (TREE_CODE (op0) == INDIRECT_REF)
goto do_indirect_ref;
mark_addressable (TREE_OPERAND (expr, 0));
/* The FEs may end up building ADDR_EXPRs early on a decl with
an incomplete type. Re-build ADDR_EXPRs in canonical form
here. */
if (!types_compatible_p (TREE_TYPE (op0), TREE_TYPE (TREE_TYPE (expr))))
*expr_p = build_fold_addr_expr (op0);
/* Make sure TREE_CONSTANT and TREE_SIDE_EFFECTS are set properly. */
recompute_tree_invariant_for_addr_expr (*expr_p);
/* If we re-built the ADDR_EXPR add a conversion to the original type
if required. */
if (!useless_type_conversion_p (TREE_TYPE (expr), TREE_TYPE (*expr_p)))
*expr_p = fold_convert (TREE_TYPE (expr), *expr_p);
break;
}
return ret;
}
/* Gimplify the operands of an ASM_EXPR. Input operands should be a gimple
value; output operands should be a gimple lvalue. */
static enum gimplify_status
gimplify_asm_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree expr;
int noutputs;
const char **oconstraints;
int i;
tree link;
const char *constraint;
bool allows_mem, allows_reg, is_inout;
enum gimplify_status ret, tret;
gimple stmt;
VEC(tree, gc) *inputs;
VEC(tree, gc) *outputs;
VEC(tree, gc) *clobbers;
VEC(tree, gc) *labels;
tree link_next;
expr = *expr_p;
noutputs = list_length (ASM_OUTPUTS (expr));
oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *));
inputs = outputs = clobbers = labels = NULL;
ret = GS_ALL_DONE;
link_next = NULL_TREE;
for (i = 0, link = ASM_OUTPUTS (expr); link; ++i, link = link_next)
{
bool ok;
size_t constraint_len;
link_next = TREE_CHAIN (link);
oconstraints[i]
= constraint
= TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link)));
constraint_len = strlen (constraint);
if (constraint_len == 0)
continue;
ok = parse_output_constraint (&constraint, i, 0, 0,
&allows_mem, &allows_reg, &is_inout);
if (!ok)
{
ret = GS_ERROR;
is_inout = false;
}
if (!allows_reg && allows_mem)
mark_addressable (TREE_VALUE (link));
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_inout ? is_gimple_min_lval : is_gimple_lvalue,
fb_lvalue | fb_mayfail);
if (tret == GS_ERROR)
{
error ("invalid lvalue in asm output %d", i);
ret = tret;
}
VEC_safe_push (tree, gc, outputs, link);
TREE_CHAIN (link) = NULL_TREE;
if (is_inout)
{
/* An input/output operand. To give the optimizers more
flexibility, split it into separate input and output
operands. */
tree input;
char buf[10];
/* Turn the in/out constraint into an output constraint. */
char *p = xstrdup (constraint);
p[0] = '=';
TREE_VALUE (TREE_PURPOSE (link)) = build_string (constraint_len, p);
/* And add a matching input constraint. */
if (allows_reg)
{
sprintf (buf, "%d", i);
/* If there are multiple alternatives in the constraint,
handle each of them individually. Those that allow register
will be replaced with operand number, the others will stay
unchanged. */
if (strchr (p, ',') != NULL)
{
size_t len = 0, buflen = strlen (buf);
char *beg, *end, *str, *dst;
for (beg = p + 1;;)
{
end = strchr (beg, ',');
if (end == NULL)
end = strchr (beg, '\0');
if ((size_t) (end - beg) < buflen)
len += buflen + 1;
else
len += end - beg + 1;
if (*end)
beg = end + 1;
else
break;
}
str = (char *) alloca (len);
for (beg = p + 1, dst = str;;)
{
const char *tem;
bool mem_p, reg_p, inout_p;
end = strchr (beg, ',');
if (end)
*end = '\0';
beg[-1] = '=';
tem = beg - 1;
parse_output_constraint (&tem, i, 0, 0,
&mem_p, ®_p, &inout_p);
if (dst != str)
*dst++ = ',';
if (reg_p)
{
memcpy (dst, buf, buflen);
dst += buflen;
}
else
{
if (end)
len = end - beg;
else
len = strlen (beg);
memcpy (dst, beg, len);
dst += len;
}
if (end)
beg = end + 1;
else
break;
}
*dst = '\0';
input = build_string (dst - str, str);
}
else
input = build_string (strlen (buf), buf);
}
else
input = build_string (constraint_len - 1, constraint + 1);
free (p);
input = build_tree_list (build_tree_list (NULL_TREE, input),
unshare_expr (TREE_VALUE (link)));
ASM_INPUTS (expr) = chainon (ASM_INPUTS (expr), input);
}
}
link_next = NULL_TREE;
for (link = ASM_INPUTS (expr); link; ++i, link = link_next)
{
link_next = TREE_CHAIN (link);
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link)));
parse_input_constraint (&constraint, 0, 0, noutputs, 0,
oconstraints, &allows_mem, &allows_reg);
/* If we can't make copies, we can only accept memory. */
if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (link))))
{
if (allows_mem)
allows_reg = 0;
else
{
error ("impossible constraint in %<asm%>");
error ("non-memory input %d must stay in memory", i);
return GS_ERROR;
}
}
/* If the operand is a memory input, it should be an lvalue. */
if (!allows_reg && allows_mem)
{
tree inputv = TREE_VALUE (link);
STRIP_NOPS (inputv);
if (TREE_CODE (inputv) == PREDECREMENT_EXPR
|| TREE_CODE (inputv) == PREINCREMENT_EXPR
|| TREE_CODE (inputv) == POSTDECREMENT_EXPR
|| TREE_CODE (inputv) == POSTINCREMENT_EXPR)
TREE_VALUE (link) = error_mark_node;
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_gimple_lvalue, fb_lvalue | fb_mayfail);
mark_addressable (TREE_VALUE (link));
if (tret == GS_ERROR)
{
if (EXPR_HAS_LOCATION (TREE_VALUE (link)))
input_location = EXPR_LOCATION (TREE_VALUE (link));
error ("memory input %d is not directly addressable", i);
ret = tret;
}
}
else
{
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_gimple_asm_val, fb_rvalue);
if (tret == GS_ERROR)
ret = tret;
}
TREE_CHAIN (link) = NULL_TREE;
VEC_safe_push (tree, gc, inputs, link);
}
for (link = ASM_CLOBBERS (expr); link; ++i, link = TREE_CHAIN (link))
VEC_safe_push (tree, gc, clobbers, link);
for (link = ASM_LABELS (expr); link; ++i, link = TREE_CHAIN (link))
VEC_safe_push (tree, gc, labels, link);
/* Do not add ASMs with errors to the gimple IL stream. */
if (ret != GS_ERROR)
{
stmt = gimple_build_asm_vec (TREE_STRING_POINTER (ASM_STRING (expr)),
inputs, outputs, clobbers, labels);
gimple_asm_set_volatile (stmt, ASM_VOLATILE_P (expr));
gimple_asm_set_input (stmt, ASM_INPUT_P (expr));
gimplify_seq_add_stmt (pre_p, stmt);
}
return ret;
}
/* Gimplify a CLEANUP_POINT_EXPR. Currently this works by adding
GIMPLE_WITH_CLEANUP_EXPRs to the prequeue as we encounter cleanups while
gimplifying the body, and converting them to TRY_FINALLY_EXPRs when we
return to this function.
FIXME should we complexify the prequeue handling instead? Or use flags
for all the cleanups and let the optimizer tighten them up? The current
code seems pretty fragile; it will break on a cleanup within any
non-conditional nesting. But any such nesting would be broken, anyway;
we can't write a TRY_FINALLY_EXPR that starts inside a nesting construct
and continues out of it. We can do that at the RTL level, though, so
having an optimizer to tighten up try/finally regions would be a Good
Thing. */
static enum gimplify_status
gimplify_cleanup_point_expr (tree *expr_p, gimple_seq *pre_p)
{
gimple_stmt_iterator iter;
gimple_seq body_sequence = NULL;
tree temp = voidify_wrapper_expr (*expr_p, NULL);
/* We only care about the number of conditions between the innermost
CLEANUP_POINT_EXPR and the cleanup. So save and reset the count and
any cleanups collected outside the CLEANUP_POINT_EXPR. */
int old_conds = gimplify_ctxp->conditions;
gimple_seq old_cleanups = gimplify_ctxp->conditional_cleanups;
bool old_in_cleanup_point_expr = gimplify_ctxp->in_cleanup_point_expr;
gimplify_ctxp->conditions = 0;
gimplify_ctxp->conditional_cleanups = NULL;
gimplify_ctxp->in_cleanup_point_expr = true;
gimplify_stmt (&TREE_OPERAND (*expr_p, 0), &body_sequence);
gimplify_ctxp->conditions = old_conds;
gimplify_ctxp->conditional_cleanups = old_cleanups;
gimplify_ctxp->in_cleanup_point_expr = old_in_cleanup_point_expr;
for (iter = gsi_start (body_sequence); !gsi_end_p (iter); )
{
gimple wce = gsi_stmt (iter);
if (gimple_code (wce) == GIMPLE_WITH_CLEANUP_EXPR)
{
if (gsi_one_before_end_p (iter))
{
/* Note that gsi_insert_seq_before and gsi_remove do not
scan operands, unlike some other sequence mutators. */
if (!gimple_wce_cleanup_eh_only (wce))
gsi_insert_seq_before_without_update (&iter,
gimple_wce_cleanup (wce),
GSI_SAME_STMT);
gsi_remove (&iter, true);
break;
}
else
{
gimple gtry;
gimple_seq seq;
enum gimple_try_flags kind;
if (gimple_wce_cleanup_eh_only (wce))
kind = GIMPLE_TRY_CATCH;
else
kind = GIMPLE_TRY_FINALLY;
seq = gsi_split_seq_after (iter);
gtry = gimple_build_try (seq, gimple_wce_cleanup (wce), kind);
/* Do not use gsi_replace here, as it may scan operands.
We want to do a simple structural modification only. */
*gsi_stmt_ptr (&iter) = gtry;
iter = gsi_start (seq);
}
}
else
gsi_next (&iter);
}
gimplify_seq_add_seq (pre_p, body_sequence);
if (temp)
{
*expr_p = temp;
return GS_OK;
}
else
{
*expr_p = NULL;
return GS_ALL_DONE;
}
}
/* Insert a cleanup marker for gimplify_cleanup_point_expr. CLEANUP
is the cleanup action required. EH_ONLY is true if the cleanup should
only be executed if an exception is thrown, not on normal exit. */
static void
gimple_push_cleanup (tree var, tree cleanup, bool eh_only, gimple_seq *pre_p)
{
gimple wce;
gimple_seq cleanup_stmts = NULL;
/* Errors can result in improperly nested cleanups. Which results in
confusion when trying to resolve the GIMPLE_WITH_CLEANUP_EXPR. */
if (seen_error ())
return;
if (gimple_conditional_context ())
{
/* If we're in a conditional context, this is more complex. We only
want to run the cleanup if we actually ran the initialization that
necessitates it, but we want to run it after the end of the
conditional context. So we wrap the try/finally around the
condition and use a flag to determine whether or not to actually
run the destructor. Thus
test ? f(A()) : 0
becomes (approximately)
flag = 0;
try {
if (test) { A::A(temp); flag = 1; val = f(temp); }
else { val = 0; }
} finally {
if (flag) A::~A(temp);
}
val
*/
tree flag = create_tmp_var (boolean_type_node, "cleanup");
gimple ffalse = gimple_build_assign (flag, boolean_false_node);
gimple ftrue = gimple_build_assign (flag, boolean_true_node);
cleanup = build3 (COND_EXPR, void_type_node, flag, cleanup, NULL);
gimplify_stmt (&cleanup, &cleanup_stmts);
wce = gimple_build_wce (cleanup_stmts);
gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, ffalse);
gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, wce);
gimplify_seq_add_stmt (pre_p, ftrue);
/* Because of this manipulation, and the EH edges that jump
threading cannot redirect, the temporary (VAR) will appear
to be used uninitialized. Don't warn. */
TREE_NO_WARNING (var) = 1;
}
else
{
gimplify_stmt (&cleanup, &cleanup_stmts);
wce = gimple_build_wce (cleanup_stmts);
gimple_wce_set_cleanup_eh_only (wce, eh_only);
gimplify_seq_add_stmt (pre_p, wce);
}
}
/* Gimplify a TARGET_EXPR which doesn't appear on the rhs of an INIT_EXPR. */
static enum gimplify_status
gimplify_target_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree targ = *expr_p;
tree temp = TARGET_EXPR_SLOT (targ);
tree init = TARGET_EXPR_INITIAL (targ);
enum gimplify_status ret;
if (init)
{
tree cleanup = NULL_TREE;
/* TARGET_EXPR temps aren't part of the enclosing block, so add it
to the temps list. Handle also variable length TARGET_EXPRs. */
if (TREE_CODE (DECL_SIZE (temp)) != INTEGER_CST)
{
if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (temp)))
gimplify_type_sizes (TREE_TYPE (temp), pre_p);
gimplify_vla_decl (temp, pre_p);
}
else
gimple_add_tmp_var (temp);
/* If TARGET_EXPR_INITIAL is void, then the mere evaluation of the
expression is supposed to initialize the slot. */
if (VOID_TYPE_P (TREE_TYPE (init)))
ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none);
else
{
tree init_expr = build2 (INIT_EXPR, void_type_node, temp, init);
init = init_expr;
ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none);
init = NULL;
ggc_free (init_expr);
}
if (ret == GS_ERROR)
{
/* PR c++/28266 Make sure this is expanded only once. */
TARGET_EXPR_INITIAL (targ) = NULL_TREE;
return GS_ERROR;
}
if (init)
gimplify_and_add (init, pre_p);
/* If needed, push the cleanup for the temp. */
if (TARGET_EXPR_CLEANUP (targ))
{
if (CLEANUP_EH_ONLY (targ))
gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ),
CLEANUP_EH_ONLY (targ), pre_p);
else
cleanup = TARGET_EXPR_CLEANUP (targ);
}
/* Add a clobber for the temporary going out of scope, like
gimplify_bind_expr. */
if (gimplify_ctxp->in_cleanup_point_expr
&& needs_to_live_in_memory (temp))
{
tree clobber = build_constructor (TREE_TYPE (temp), NULL);
TREE_THIS_VOLATILE (clobber) = true;
clobber = build2 (MODIFY_EXPR, TREE_TYPE (temp), temp, clobber);
if (cleanup)
cleanup = build2 (COMPOUND_EXPR, void_type_node, cleanup,
clobber);
else
cleanup = clobber;
}
if (cleanup)
gimple_push_cleanup (temp, cleanup, false, pre_p);
/* Only expand this once. */
TREE_OPERAND (targ, 3) = init;
TARGET_EXPR_INITIAL (targ) = NULL_TREE;
}
else
/* We should have expanded this before. */
gcc_assert (DECL_SEEN_IN_BIND_EXPR_P (temp));
*expr_p = temp;
return GS_OK;
}
/* Gimplification of expression trees. */
/* Gimplify an expression which appears at statement context. The
corresponding GIMPLE statements are added to *SEQ_P. If *SEQ_P is
NULL, a new sequence is allocated.
Return true if we actually added a statement to the queue. */
bool
gimplify_stmt (tree *stmt_p, gimple_seq *seq_p)
{
gimple_seq_node last;
if (!*seq_p)
*seq_p = gimple_seq_alloc ();
last = gimple_seq_last (*seq_p);
gimplify_expr (stmt_p, seq_p, NULL, is_gimple_stmt, fb_none);
return last != gimple_seq_last (*seq_p);
}
/* Add FIRSTPRIVATE entries for DECL in the OpenMP the surrounding parallels
to CTX. If entries already exist, force them to be some flavor of private.
If there is no enclosing parallel, do nothing. */
void
omp_firstprivatize_variable (struct gimplify_omp_ctx *ctx, tree decl)
{
splay_tree_node n;
if (decl == NULL || !DECL_P (decl))
return;
do
{
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
if (n->value & GOVD_SHARED)
n->value = GOVD_FIRSTPRIVATE | (n->value & GOVD_SEEN);
else
return;
}
else if (ctx->region_type != ORT_WORKSHARE)
omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE);
ctx = ctx->outer_context;
}
while (ctx);
}
/* Similarly for each of the type sizes of TYPE. */
static void
omp_firstprivatize_type_sizes (struct gimplify_omp_ctx *ctx, tree type)
{
if (type == NULL || type == error_mark_node)
return;
type = TYPE_MAIN_VARIANT (type);
if (pointer_set_insert (ctx->privatized_types, type))
return;
switch (TREE_CODE (type))
{
case INTEGER_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
omp_firstprivatize_variable (ctx, TYPE_MIN_VALUE (type));
omp_firstprivatize_variable (ctx, TYPE_MAX_VALUE (type));
break;
case ARRAY_TYPE:
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type));
omp_firstprivatize_type_sizes (ctx, TYPE_DOMAIN (type));
break;
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
{
tree field;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
{
omp_firstprivatize_variable (ctx, DECL_FIELD_OFFSET (field));
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (field));
}
}
break;
case POINTER_TYPE:
case REFERENCE_TYPE:
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type));
break;
default:
break;
}
omp_firstprivatize_variable (ctx, TYPE_SIZE (type));
omp_firstprivatize_variable (ctx, TYPE_SIZE_UNIT (type));
lang_hooks.types.omp_firstprivatize_type_sizes (ctx, type);
}
/* Add an entry for DECL in the OpenMP context CTX with FLAGS. */
static void
omp_add_variable (struct gimplify_omp_ctx *ctx, tree decl, unsigned int flags)
{
splay_tree_node n;
unsigned int nflags;
tree t;
if (error_operand_p (decl))
return;
/* Never elide decls whose type has TREE_ADDRESSABLE set. This means
there are constructors involved somewhere. */
if (TREE_ADDRESSABLE (TREE_TYPE (decl))
|| TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (decl)))
flags |= GOVD_SEEN;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
/* We shouldn't be re-adding the decl with the same data
sharing class. */
gcc_assert ((n->value & GOVD_DATA_SHARE_CLASS & flags) == 0);
/* The only combination of data sharing classes we should see is
FIRSTPRIVATE and LASTPRIVATE. */
nflags = n->value | flags;
gcc_assert ((nflags & GOVD_DATA_SHARE_CLASS)
== (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE));
n->value = nflags;
return;
}
/* When adding a variable-sized variable, we have to handle all sorts
of additional bits of data: the pointer replacement variable, and
the parameters of the type. */
if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
/* Add the pointer replacement variable as PRIVATE if the variable
replacement is private, else FIRSTPRIVATE since we'll need the
address of the original variable either for SHARED, or for the
copy into or out of the context. */
if (!(flags & GOVD_LOCAL))
{
nflags = flags & GOVD_PRIVATE ? GOVD_PRIVATE : GOVD_FIRSTPRIVATE;
nflags |= flags & GOVD_SEEN;
t = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (t) == INDIRECT_REF);
t = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (t));
omp_add_variable (ctx, t, nflags);
}
/* Add all of the variable and type parameters (which should have
been gimplified to a formal temporary) as FIRSTPRIVATE. */
omp_firstprivatize_variable (ctx, DECL_SIZE_UNIT (decl));
omp_firstprivatize_variable (ctx, DECL_SIZE (decl));
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl));
/* The variable-sized variable itself is never SHARED, only some form
of PRIVATE. The sharing would take place via the pointer variable
which we remapped above. */
if (flags & GOVD_SHARED)
flags = GOVD_PRIVATE | GOVD_DEBUG_PRIVATE
| (flags & (GOVD_SEEN | GOVD_EXPLICIT));
/* We're going to make use of the TYPE_SIZE_UNIT at least in the
alloca statement we generate for the variable, so make sure it
is available. This isn't automatically needed for the SHARED
case, since we won't be allocating local storage then.
For local variables TYPE_SIZE_UNIT might not be gimplified yet,
in this case omp_notice_variable will be called later
on when it is gimplified. */
else if (! (flags & GOVD_LOCAL)
&& DECL_P (TYPE_SIZE_UNIT (TREE_TYPE (decl))))
omp_notice_variable (ctx, TYPE_SIZE_UNIT (TREE_TYPE (decl)), true);
}
else if (lang_hooks.decls.omp_privatize_by_reference (decl))
{
gcc_assert ((flags & GOVD_LOCAL) == 0);
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl));
/* Similar to the direct variable sized case above, we'll need the
size of references being privatized. */
if ((flags & GOVD_SHARED) == 0)
{
t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)));
if (TREE_CODE (t) != INTEGER_CST)
omp_notice_variable (ctx, t, true);
}
}
splay_tree_insert (ctx->variables, (splay_tree_key)decl, flags);
}
/* Notice a threadprivate variable DECL used in OpenMP context CTX.
This just prints out diagnostics about threadprivate variable uses
in untied tasks. If DECL2 is non-NULL, prevent this warning
on that variable. */
static bool
omp_notice_threadprivate_variable (struct gimplify_omp_ctx *ctx, tree decl,
tree decl2)
{
splay_tree_node n;
if (ctx->region_type != ORT_UNTIED_TASK)
return false;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n == NULL)
{
error ("threadprivate variable %qE used in untied task",
DECL_NAME (decl));
error_at (ctx->location, "enclosing task");
splay_tree_insert (ctx->variables, (splay_tree_key)decl, 0);
}
if (decl2)
splay_tree_insert (ctx->variables, (splay_tree_key)decl2, 0);
return false;
}
/* Record the fact that DECL was used within the OpenMP context CTX.
IN_CODE is true when real code uses DECL, and false when we should
merely emit default(none) errors. Return true if DECL is going to
be remapped and thus DECL shouldn't be gimplified into its
DECL_VALUE_EXPR (if any). */
static bool
omp_notice_variable (struct gimplify_omp_ctx *ctx, tree decl, bool in_code)
{
splay_tree_node n;
unsigned flags = in_code ? GOVD_SEEN : 0;
bool ret = false, shared;
if (error_operand_p (decl))
return false;
/* Threadprivate variables are predetermined. */
if (is_global_var (decl))
{
if (DECL_THREAD_LOCAL_P (decl))
return omp_notice_threadprivate_variable (ctx, decl, NULL_TREE);
if (DECL_HAS_VALUE_EXPR_P (decl))
{
tree value = get_base_address (DECL_VALUE_EXPR (decl));
if (value && DECL_P (value) && DECL_THREAD_LOCAL_P (value))
return omp_notice_threadprivate_variable (ctx, decl, value);
}
}
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n == NULL)
{
enum omp_clause_default_kind default_kind, kind;
struct gimplify_omp_ctx *octx;
if (ctx->region_type == ORT_WORKSHARE)
goto do_outer;
/* ??? Some compiler-generated variables (like SAVE_EXPRs) could be
remapped firstprivate instead of shared. To some extent this is
addressed in omp_firstprivatize_type_sizes, but not effectively. */
default_kind = ctx->default_kind;
kind = lang_hooks.decls.omp_predetermined_sharing (decl);
if (kind != OMP_CLAUSE_DEFAULT_UNSPECIFIED)
default_kind = kind;
switch (default_kind)
{
case OMP_CLAUSE_DEFAULT_NONE:
error ("%qE not specified in enclosing parallel",
DECL_NAME (lang_hooks.decls.omp_report_decl (decl)));
if ((ctx->region_type & ORT_TASK) != 0)
error_at (ctx->location, "enclosing task");
else
error_at (ctx->location, "enclosing parallel");
/* FALLTHRU */
case OMP_CLAUSE_DEFAULT_SHARED:
flags |= GOVD_SHARED;
break;
case OMP_CLAUSE_DEFAULT_PRIVATE:
flags |= GOVD_PRIVATE;
break;
case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE:
flags |= GOVD_FIRSTPRIVATE;
break;
case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
/* decl will be either GOVD_FIRSTPRIVATE or GOVD_SHARED. */
gcc_assert ((ctx->region_type & ORT_TASK) != 0);
if (ctx->outer_context)
omp_notice_variable (ctx->outer_context, decl, in_code);
for (octx = ctx->outer_context; octx; octx = octx->outer_context)
{
splay_tree_node n2;
n2 = splay_tree_lookup (octx->variables, (splay_tree_key) decl);
if (n2 && (n2->value & GOVD_DATA_SHARE_CLASS) != GOVD_SHARED)
{
flags |= GOVD_FIRSTPRIVATE;
break;
}
if ((octx->region_type & ORT_PARALLEL) != 0)
break;
}
if (flags & GOVD_FIRSTPRIVATE)
break;
if (octx == NULL
&& (TREE_CODE (decl) == PARM_DECL
|| (!is_global_var (decl)
&& DECL_CONTEXT (decl) == current_function_decl)))
{
flags |= GOVD_FIRSTPRIVATE;
break;
}
flags |= GOVD_SHARED;
break;
default:
gcc_unreachable ();
}
if ((flags & GOVD_PRIVATE)
&& lang_hooks.decls.omp_private_outer_ref (decl))
flags |= GOVD_PRIVATE_OUTER_REF;
omp_add_variable (ctx, decl, flags);
shared = (flags & GOVD_SHARED) != 0;
ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared);
goto do_outer;
}
if ((n->value & (GOVD_SEEN | GOVD_LOCAL)) == 0
&& (flags & (GOVD_SEEN | GOVD_LOCAL)) == GOVD_SEEN
&& DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
splay_tree_node n2;
tree t = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (t) == INDIRECT_REF);
t = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (t));
n2 = splay_tree_lookup (ctx->variables, (splay_tree_key) t);
n2->value |= GOVD_SEEN;
}
shared = ((flags | n->value) & GOVD_SHARED) != 0;
ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared);
/* If nothing changed, there's nothing left to do. */
if ((n->value & flags) == flags)
return ret;
flags |= n->value;
n->value = flags;
do_outer:
/* If the variable is private in the current context, then we don't
need to propagate anything to an outer context. */
if ((flags & GOVD_PRIVATE) && !(flags & GOVD_PRIVATE_OUTER_REF))
return ret;
if (ctx->outer_context
&& omp_notice_variable (ctx->outer_context, decl, in_code))
return true;
return ret;
}
/* Verify that DECL is private within CTX. If there's specific information
to the contrary in the innermost scope, generate an error. */
static bool
omp_is_private (struct gimplify_omp_ctx *ctx, tree decl)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
if (n->value & GOVD_SHARED)
{
if (ctx == gimplify_omp_ctxp)
{
error ("iteration variable %qE should be private",
DECL_NAME (decl));
n->value = GOVD_PRIVATE;
return true;
}
else
return false;
}
else if ((n->value & GOVD_EXPLICIT) != 0
&& (ctx == gimplify_omp_ctxp
|| (ctx->region_type == ORT_COMBINED_PARALLEL
&& gimplify_omp_ctxp->outer_context == ctx)))
{
if ((n->value & GOVD_FIRSTPRIVATE) != 0)
error ("iteration variable %qE should not be firstprivate",
DECL_NAME (decl));
else if ((n->value & GOVD_REDUCTION) != 0)
error ("iteration variable %qE should not be reduction",
DECL_NAME (decl));
}
return (ctx == gimplify_omp_ctxp
|| (ctx->region_type == ORT_COMBINED_PARALLEL
&& gimplify_omp_ctxp->outer_context == ctx));
}
if (ctx->region_type != ORT_WORKSHARE)
return false;
else if (ctx->outer_context)
return omp_is_private (ctx->outer_context, decl);
return false;
}
/* Return true if DECL is private within a parallel region
that binds to the current construct's context or in parallel
region's REDUCTION clause. */
static bool
omp_check_private (struct gimplify_omp_ctx *ctx, tree decl)
{
splay_tree_node n;
do
{
ctx = ctx->outer_context;
if (ctx == NULL)
return !(is_global_var (decl)
/* References might be private, but might be shared too. */
|| lang_hooks.decls.omp_privatize_by_reference (decl));
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (n != NULL)
return (n->value & GOVD_SHARED) == 0;
}
while (ctx->region_type == ORT_WORKSHARE);
return false;
}
/* Scan the OpenMP clauses in *LIST_P, installing mappings into a new
and previous omp contexts. */
static void
gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
enum omp_region_type region_type)
{
struct gimplify_omp_ctx *ctx, *outer_ctx;
struct gimplify_ctx gctx;
tree c;
ctx = new_omp_context (region_type);
outer_ctx = ctx->outer_context;
while ((c = *list_p) != NULL)
{
bool remove = false;
bool notice_outer = true;
const char *check_non_private = NULL;
unsigned int flags;
tree decl;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
flags = GOVD_PRIVATE | GOVD_EXPLICIT;
if (lang_hooks.decls.omp_private_outer_ref (OMP_CLAUSE_DECL (c)))
{
flags |= GOVD_PRIVATE_OUTER_REF;
OMP_CLAUSE_PRIVATE_OUTER_REF (c) = 1;
}
else
notice_outer = false;
goto do_add;
case OMP_CLAUSE_SHARED:
flags = GOVD_SHARED | GOVD_EXPLICIT;
goto do_add;
case OMP_CLAUSE_FIRSTPRIVATE:
flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT;
check_non_private = "firstprivate";
goto do_add;
case OMP_CLAUSE_LASTPRIVATE:
flags = GOVD_LASTPRIVATE | GOVD_SEEN | GOVD_EXPLICIT;
check_non_private = "lastprivate";
goto do_add;
case OMP_CLAUSE_REDUCTION:
flags = GOVD_REDUCTION | GOVD_SEEN | GOVD_EXPLICIT;
check_non_private = "reduction";
goto do_add;
do_add:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
omp_add_variable (ctx, decl, flags);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
omp_add_variable (ctx, OMP_CLAUSE_REDUCTION_PLACEHOLDER (c),
GOVD_LOCAL | GOVD_SEEN);
gimplify_omp_ctxp = ctx;
push_gimplify_context (&gctx);
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = gimple_seq_alloc ();
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = gimple_seq_alloc ();
gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c),
&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c)));
push_gimplify_context (&gctx);
gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c),
&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c)));
OMP_CLAUSE_REDUCTION_INIT (c) = NULL_TREE;
OMP_CLAUSE_REDUCTION_MERGE (c) = NULL_TREE;
gimplify_omp_ctxp = outer_ctx;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_STMT (c))
{
gimplify_omp_ctxp = ctx;
push_gimplify_context (&gctx);
if (TREE_CODE (OMP_CLAUSE_LASTPRIVATE_STMT (c)) != BIND_EXPR)
{
tree bind = build3 (BIND_EXPR, void_type_node, NULL,
NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
BIND_EXPR_BODY (bind) = OMP_CLAUSE_LASTPRIVATE_STMT (c);
OMP_CLAUSE_LASTPRIVATE_STMT (c) = bind;
}
gimplify_and_add (OMP_CLAUSE_LASTPRIVATE_STMT (c),
&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)));
OMP_CLAUSE_LASTPRIVATE_STMT (c) = NULL_TREE;
gimplify_omp_ctxp = outer_ctx;
}
if (notice_outer)
goto do_notice;
break;
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
do_notice:
if (outer_ctx)
omp_notice_variable (outer_ctx, decl, true);
if (check_non_private
&& region_type == ORT_WORKSHARE
&& omp_check_private (ctx, decl))
{
error ("%s variable %qE is private in outer context",
check_non_private, DECL_NAME (decl));
remove = true;
}
break;
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_IF:
OMP_CLAUSE_OPERAND (c, 0)
= gimple_boolify (OMP_CLAUSE_OPERAND (c, 0));
/* Fall through. */
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NUM_THREADS:
if (gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
remove = true;
break;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_MERGEABLE:
break;
case OMP_CLAUSE_DEFAULT:
ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
break;
default:
gcc_unreachable ();
}
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
gimplify_omp_ctxp = ctx;
}
/* For all variables that were not actually used within the context,
remove PRIVATE, SHARED, and FIRSTPRIVATE clauses. */
static int
gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data)
{
tree *list_p = (tree *) data;
tree decl = (tree) n->key;
unsigned flags = n->value;
enum omp_clause_code code;
tree clause;
bool private_debug;
if (flags & (GOVD_EXPLICIT | GOVD_LOCAL))
return 0;
if ((flags & GOVD_SEEN) == 0)
return 0;
if (flags & GOVD_DEBUG_PRIVATE)
{
gcc_assert ((flags & GOVD_DATA_SHARE_CLASS) == GOVD_PRIVATE);
private_debug = true;
}
else
private_debug
= lang_hooks.decls.omp_private_debug_clause (decl,
!!(flags & GOVD_SHARED));
if (private_debug)
code = OMP_CLAUSE_PRIVATE;
else if (flags & GOVD_SHARED)
{
if (is_global_var (decl))
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp->outer_context;
while (ctx != NULL)
{
splay_tree_node on
= splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (on && (on->value & (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE
| GOVD_PRIVATE | GOVD_REDUCTION)) != 0)
break;
ctx = ctx->outer_context;
}
if (ctx == NULL)
return 0;
}
code = OMP_CLAUSE_SHARED;
}
else if (flags & GOVD_PRIVATE)
code = OMP_CLAUSE_PRIVATE;
else if (flags & GOVD_FIRSTPRIVATE)
code = OMP_CLAUSE_FIRSTPRIVATE;
else
gcc_unreachable ();
clause = build_omp_clause (input_location, code);
OMP_CLAUSE_DECL (clause) = decl;
OMP_CLAUSE_CHAIN (clause) = *list_p;
if (private_debug)
OMP_CLAUSE_PRIVATE_DEBUG (clause) = 1;
else if (code == OMP_CLAUSE_PRIVATE && (flags & GOVD_PRIVATE_OUTER_REF))
OMP_CLAUSE_PRIVATE_OUTER_REF (clause) = 1;
*list_p = clause;
lang_hooks.decls.omp_finish_clause (clause);
return 0;
}
static void
gimplify_adjust_omp_clauses (tree *list_p)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
tree c, decl;
while ((c = *list_p) != NULL)
{
splay_tree_node n;
bool remove = false;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_FIRSTPRIVATE:
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
remove = !(n->value & GOVD_SEEN);
if (! remove)
{
bool shared = OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED;
if ((n->value & GOVD_DEBUG_PRIVATE)
|| lang_hooks.decls.omp_private_debug_clause (decl, shared))
{
gcc_assert ((n->value & GOVD_DEBUG_PRIVATE) == 0
|| ((n->value & GOVD_DATA_SHARE_CLASS)
== GOVD_PRIVATE));
OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_PRIVATE_DEBUG (c) = 1;
}
}
break;
case OMP_CLAUSE_LASTPRIVATE:
/* Make sure OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE is set to
accurately reflect the presence of a FIRSTPRIVATE clause. */
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)
= (n->value & GOVD_FIRSTPRIVATE) != 0;
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
break;
default:
gcc_unreachable ();
}
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
/* Add in any implicit data sharing. */
splay_tree_foreach (ctx->variables, gimplify_adjust_omp_clauses_1, list_p);
gimplify_omp_ctxp = ctx->outer_context;
delete_omp_context (ctx);
}
/* Gimplify the contents of an OMP_PARALLEL statement. This involves
gimplification of the body, as well as scanning the body for used
variables. We need to do this scan now, because variable-sized
decls will be decomposed during gimplification. */
static void
gimplify_omp_parallel (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple g;
gimple_seq body = NULL;
struct gimplify_ctx gctx;
gimplify_scan_omp_clauses (&OMP_PARALLEL_CLAUSES (expr), pre_p,
OMP_PARALLEL_COMBINED (expr)
? ORT_COMBINED_PARALLEL
: ORT_PARALLEL);
push_gimplify_context (&gctx);
g = gimplify_and_return_first (OMP_PARALLEL_BODY (expr), &body);
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
gimplify_adjust_omp_clauses (&OMP_PARALLEL_CLAUSES (expr));
g = gimple_build_omp_parallel (body,
OMP_PARALLEL_CLAUSES (expr),
NULL_TREE, NULL_TREE);
if (OMP_PARALLEL_COMBINED (expr))
gimple_omp_set_subcode (g, GF_OMP_PARALLEL_COMBINED);
gimplify_seq_add_stmt (pre_p, g);
*expr_p = NULL_TREE;
}
/* Gimplify the contents of an OMP_TASK statement. This involves
gimplification of the body, as well as scanning the body for used
variables. We need to do this scan now, because variable-sized
decls will be decomposed during gimplification. */
static void
gimplify_omp_task (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple g;
gimple_seq body = NULL;
struct gimplify_ctx gctx;
gimplify_scan_omp_clauses (&OMP_TASK_CLAUSES (expr), pre_p,
find_omp_clause (OMP_TASK_CLAUSES (expr),
OMP_CLAUSE_UNTIED)
? ORT_UNTIED_TASK : ORT_TASK);
push_gimplify_context (&gctx);
g = gimplify_and_return_first (OMP_TASK_BODY (expr), &body);
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
gimplify_adjust_omp_clauses (&OMP_TASK_CLAUSES (expr));
g = gimple_build_omp_task (body,
OMP_TASK_CLAUSES (expr),
NULL_TREE, NULL_TREE,
NULL_TREE, NULL_TREE, NULL_TREE);
gimplify_seq_add_stmt (pre_p, g);
*expr_p = NULL_TREE;
}
/* Gimplify the gross structure of an OMP_FOR statement. */
static enum gimplify_status
gimplify_omp_for (tree *expr_p, gimple_seq *pre_p)
{
tree for_stmt, decl, var, t;
enum gimplify_status ret = GS_ALL_DONE;
enum gimplify_status tret;
gimple gfor;
gimple_seq for_body, for_pre_body;
int i;
for_stmt = *expr_p;
gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p,
ORT_WORKSHARE);
/* Handle OMP_FOR_INIT. */
for_pre_body = NULL;
gimplify_and_add (OMP_FOR_PRE_BODY (for_stmt), &for_pre_body);
OMP_FOR_PRE_BODY (for_stmt) = NULL_TREE;
for_body = gimple_seq_alloc ();
gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
== TREE_VEC_LENGTH (OMP_FOR_COND (for_stmt)));
gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
== TREE_VEC_LENGTH (OMP_FOR_INCR (for_stmt)));
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
decl = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (decl));
gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (decl))
|| POINTER_TYPE_P (TREE_TYPE (decl)));
/* Make sure the iteration variable is private. */
if (omp_is_private (gimplify_omp_ctxp, decl))
omp_notice_variable (gimplify_omp_ctxp, decl, true);
else
omp_add_variable (gimplify_omp_ctxp, decl, GOVD_PRIVATE | GOVD_SEEN);
/* If DECL is not a gimple register, create a temporary variable to act
as an iteration counter. This is valid, since DECL cannot be
modified in the body of the loop. */
if (!is_gimple_reg (decl))
{
var = create_tmp_var (TREE_TYPE (decl), get_name (decl));
TREE_OPERAND (t, 0) = var;
gimplify_seq_add_stmt (&for_body, gimple_build_assign (decl, var));
omp_add_variable (gimplify_omp_ctxp, var, GOVD_PRIVATE | GOVD_SEEN);
}
else
var = decl;
tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
if (ret == GS_ERROR)
return ret;
/* Handle OMP_FOR_COND. */
t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
gcc_assert (COMPARISON_CLASS_P (t));
gcc_assert (TREE_OPERAND (t, 0) == decl);
tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
/* Handle OMP_FOR_INCR. */
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
switch (TREE_CODE (t))
{
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
t = build_int_cst (TREE_TYPE (decl), 1);
t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (var), var, t);
TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t;
break;
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
t = build_int_cst (TREE_TYPE (decl), -1);
t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (var), var, t);
TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t;
break;
case MODIFY_EXPR:
gcc_assert (TREE_OPERAND (t, 0) == decl);
TREE_OPERAND (t, 0) = var;
t = TREE_OPERAND (t, 1);
switch (TREE_CODE (t))
{
case PLUS_EXPR:
if (TREE_OPERAND (t, 1) == decl)
{
TREE_OPERAND (t, 1) = TREE_OPERAND (t, 0);
TREE_OPERAND (t, 0) = var;
break;
}
/* Fallthru. */
case MINUS_EXPR:
case POINTER_PLUS_EXPR:
gcc_assert (TREE_OPERAND (t, 0) == decl);
TREE_OPERAND (t, 0) = var;
break;
default:
gcc_unreachable ();
}
tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
break;
default:
gcc_unreachable ();
}
if (var != decl || TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) > 1)
{
tree c;
for (c = OMP_FOR_CLAUSES (for_stmt); c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_DECL (c) == decl
&& OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) == NULL)
{
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
gcc_assert (TREE_OPERAND (t, 0) == var);
t = TREE_OPERAND (t, 1);
gcc_assert (TREE_CODE (t) == PLUS_EXPR
|| TREE_CODE (t) == MINUS_EXPR
|| TREE_CODE (t) == POINTER_PLUS_EXPR);
gcc_assert (TREE_OPERAND (t, 0) == var);
t = build2 (TREE_CODE (t), TREE_TYPE (decl), decl,
TREE_OPERAND (t, 1));
gimplify_assign (decl, t,
&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
}
}
}
gimplify_and_add (OMP_FOR_BODY (for_stmt), &for_body);
gimplify_adjust_omp_clauses (&OMP_FOR_CLAUSES (for_stmt));
gfor = gimple_build_omp_for (for_body, OMP_FOR_CLAUSES (for_stmt),
TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)),
for_pre_body);
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
gimple_omp_for_set_index (gfor, i, TREE_OPERAND (t, 0));
gimple_omp_for_set_initial (gfor, i, TREE_OPERAND (t, 1));
t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
gimple_omp_for_set_cond (gfor, i, TREE_CODE (t));
gimple_omp_for_set_final (gfor, i, TREE_OPERAND (t, 1));
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
gimple_omp_for_set_incr (gfor, i, TREE_OPERAND (t, 1));
}
gimplify_seq_add_stmt (pre_p, gfor);
return ret == GS_ALL_DONE ? GS_ALL_DONE : GS_ERROR;
}
/* Gimplify the gross structure of other OpenMP worksharing constructs.
In particular, OMP_SECTIONS and OMP_SINGLE. */
static void
gimplify_omp_workshare (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple stmt;
gimple_seq body = NULL;
gimplify_scan_omp_clauses (&OMP_CLAUSES (expr), pre_p, ORT_WORKSHARE);
gimplify_and_add (OMP_BODY (expr), &body);
gimplify_adjust_omp_clauses (&OMP_CLAUSES (expr));
if (TREE_CODE (expr) == OMP_SECTIONS)
stmt = gimple_build_omp_sections (body, OMP_CLAUSES (expr));
else if (TREE_CODE (expr) == OMP_SINGLE)
stmt = gimple_build_omp_single (body, OMP_CLAUSES (expr));
else
gcc_unreachable ();
gimplify_seq_add_stmt (pre_p, stmt);
}
/* A subroutine of gimplify_omp_atomic. The front end is supposed to have
stabilized the lhs of the atomic operation as *ADDR. Return true if
EXPR is this stabilized form. */
static bool
goa_lhs_expr_p (tree expr, tree addr)
{
/* Also include casts to other type variants. The C front end is fond
of adding these for e.g. volatile variables. This is like
STRIP_TYPE_NOPS but includes the main variant lookup. */
STRIP_USELESS_TYPE_CONVERSION (expr);
if (TREE_CODE (expr) == INDIRECT_REF)
{
expr = TREE_OPERAND (expr, 0);
while (expr != addr
&& (CONVERT_EXPR_P (expr)
|| TREE_CODE (expr) == NON_LVALUE_EXPR)
&& TREE_CODE (expr) == TREE_CODE (addr)
&& types_compatible_p (TREE_TYPE (expr), TREE_TYPE (addr)))
{
expr = TREE_OPERAND (expr, 0);
addr = TREE_OPERAND (addr, 0);
}
if (expr == addr)
return true;
return (TREE_CODE (addr) == ADDR_EXPR
&& TREE_CODE (expr) == ADDR_EXPR
&& TREE_OPERAND (addr, 0) == TREE_OPERAND (expr, 0));
}
if (TREE_CODE (addr) == ADDR_EXPR && expr == TREE_OPERAND (addr, 0))
return true;
return false;
}
/* Walk *EXPR_P and replace appearances of *LHS_ADDR with LHS_VAR. If an
expression does not involve the lhs, evaluate it into a temporary.
Return 1 if the lhs appeared as a subexpression, 0 if it did not,
or -1 if an error was encountered. */
static int
goa_stabilize_expr (tree *expr_p, gimple_seq *pre_p, tree lhs_addr,
tree lhs_var)
{
tree expr = *expr_p;
int saw_lhs;
if (goa_lhs_expr_p (expr, lhs_addr))
{
*expr_p = lhs_var;
return 1;
}
if (is_gimple_val (expr))
return 0;
saw_lhs = 0;
switch (TREE_CODE_CLASS (TREE_CODE (expr)))
{
case tcc_binary:
case tcc_comparison:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p, lhs_addr,
lhs_var);
case tcc_unary:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p, lhs_addr,
lhs_var);
break;
case tcc_expression:
switch (TREE_CODE (expr))
{
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p,
lhs_addr, lhs_var);
case TRUTH_NOT_EXPR:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p,
lhs_addr, lhs_var);
break;
case COMPOUND_EXPR:
/* Break out any preevaluations from cp_build_modify_expr. */
for (; TREE_CODE (expr) == COMPOUND_EXPR;
expr = TREE_OPERAND (expr, 1))
gimplify_stmt (&TREE_OPERAND (expr, 0), pre_p);
*expr_p = expr;
return goa_stabilize_expr (expr_p, pre_p, lhs_addr, lhs_var);
default:
break;
}
break;
default:
break;
}
if (saw_lhs == 0)
{
enum gimplify_status gs;
gs = gimplify_expr (expr_p, pre_p, NULL, is_gimple_val, fb_rvalue);
if (gs != GS_ALL_DONE)
saw_lhs = -1;
}
return saw_lhs;
}
/* Gimplify an OMP_ATOMIC statement. */
static enum gimplify_status
gimplify_omp_atomic (tree *expr_p, gimple_seq *pre_p)
{
tree addr = TREE_OPERAND (*expr_p, 0);
tree rhs = TREE_CODE (*expr_p) == OMP_ATOMIC_READ
? NULL : TREE_OPERAND (*expr_p, 1);
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
tree tmp_load;
gimple loadstmt, storestmt;
tmp_load = create_tmp_reg (type, NULL);
if (rhs && goa_stabilize_expr (&rhs, pre_p, addr, tmp_load) < 0)
return GS_ERROR;
if (gimplify_expr (&addr, pre_p, NULL, is_gimple_val, fb_rvalue)
!= GS_ALL_DONE)
return GS_ERROR;
loadstmt = gimple_build_omp_atomic_load (tmp_load, addr);
gimplify_seq_add_stmt (pre_p, loadstmt);
if (rhs && gimplify_expr (&rhs, pre_p, NULL, is_gimple_val, fb_rvalue)
!= GS_ALL_DONE)
return GS_ERROR;
if (TREE_CODE (*expr_p) == OMP_ATOMIC_READ)
rhs = tmp_load;
storestmt = gimple_build_omp_atomic_store (rhs);
gimplify_seq_add_stmt (pre_p, storestmt);
switch (TREE_CODE (*expr_p))
{
case OMP_ATOMIC_READ:
case OMP_ATOMIC_CAPTURE_OLD:
*expr_p = tmp_load;
gimple_omp_atomic_set_need_value (loadstmt);
break;
case OMP_ATOMIC_CAPTURE_NEW:
*expr_p = rhs;
gimple_omp_atomic_set_need_value (storestmt);
break;
default:
*expr_p = NULL;
break;
}
return GS_ALL_DONE;
}
/* Gimplify a TRANSACTION_EXPR. This involves gimplification of the
body, and adding some EH bits. */
static enum gimplify_status
gimplify_transaction (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p, temp, tbody = TRANSACTION_EXPR_BODY (expr);
gimple g;
gimple_seq body = NULL;
struct gimplify_ctx gctx;
int subcode = 0;
/* Wrap the transaction body in a BIND_EXPR so we have a context
where to put decls for OpenMP. */
if (TREE_CODE (tbody) != BIND_EXPR)
{
tree bind = build3 (BIND_EXPR, void_type_node, NULL, tbody, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
SET_EXPR_LOCATION (bind, EXPR_LOCATION (tbody));
TRANSACTION_EXPR_BODY (expr) = bind;
}
push_gimplify_context (&gctx);
temp = voidify_wrapper_expr (*expr_p, NULL);
g = gimplify_and_return_first (TRANSACTION_EXPR_BODY (expr), &body);
pop_gimplify_context (g);
g = gimple_build_transaction (body, NULL);
if (TRANSACTION_EXPR_OUTER (expr))
subcode = GTMA_IS_OUTER;
else if (TRANSACTION_EXPR_RELAXED (expr))
subcode = GTMA_IS_RELAXED;
gimple_transaction_set_subcode (g, subcode);
gimplify_seq_add_stmt (pre_p, g);
if (temp)
{
*expr_p = temp;
return GS_OK;
}
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* Convert the GENERIC expression tree *EXPR_P to GIMPLE. If the
expression produces a value to be used as an operand inside a GIMPLE
statement, the value will be stored back in *EXPR_P. This value will
be a tree of class tcc_declaration, tcc_constant, tcc_reference or
an SSA_NAME. The corresponding sequence of GIMPLE statements is
emitted in PRE_P and POST_P.
Additionally, this process may overwrite parts of the input
expression during gimplification. Ideally, it should be
possible to do non-destructive gimplification.
EXPR_P points to the GENERIC expression to convert to GIMPLE. If
the expression needs to evaluate to a value to be used as
an operand in a GIMPLE statement, this value will be stored in
*EXPR_P on exit. This happens when the caller specifies one
of fb_lvalue or fb_rvalue fallback flags.
PRE_P will contain the sequence of GIMPLE statements corresponding
to the evaluation of EXPR and all the side-effects that must
be executed before the main expression. On exit, the last
statement of PRE_P is the core statement being gimplified. For
instance, when gimplifying 'if (++a)' the last statement in
PRE_P will be 'if (t.1)' where t.1 is the result of
pre-incrementing 'a'.
POST_P will contain the sequence of GIMPLE statements corresponding
to the evaluation of all the side-effects that must be executed
after the main expression. If this is NULL, the post
side-effects are stored at the end of PRE_P.
The reason why the output is split in two is to handle post
side-effects explicitly. In some cases, an expression may have
inner and outer post side-effects which need to be emitted in
an order different from the one given by the recursive
traversal. For instance, for the expression (*p--)++ the post
side-effects of '--' must actually occur *after* the post
side-effects of '++'. However, gimplification will first visit
the inner expression, so if a separate POST sequence was not
used, the resulting sequence would be:
1 t.1 = *p
2 p = p - 1
3 t.2 = t.1 + 1
4 *p = t.2
However, the post-decrement operation in line #2 must not be
evaluated until after the store to *p at line #4, so the
correct sequence should be:
1 t.1 = *p
2 t.2 = t.1 + 1
3 *p = t.2
4 p = p - 1
So, by specifying a separate post queue, it is possible
to emit the post side-effects in the correct order.
If POST_P is NULL, an internal queue will be used. Before
returning to the caller, the sequence POST_P is appended to
the main output sequence PRE_P.
GIMPLE_TEST_F points to a function that takes a tree T and
returns nonzero if T is in the GIMPLE form requested by the
caller. The GIMPLE predicates are in gimple.c.
FALLBACK tells the function what sort of a temporary we want if
gimplification cannot produce an expression that complies with
GIMPLE_TEST_F.
fb_none means that no temporary should be generated
fb_rvalue means that an rvalue is OK to generate
fb_lvalue means that an lvalue is OK to generate
fb_either means that either is OK, but an lvalue is preferable.
fb_mayfail means that gimplification may fail (in which case
GS_ERROR will be returned)
The return value is either GS_ERROR or GS_ALL_DONE, since this
function iterates until EXPR is completely gimplified or an error
occurs. */
enum gimplify_status
gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool (*gimple_test_f) (tree), fallback_t fallback)
{
tree tmp;
gimple_seq internal_pre = NULL;
gimple_seq internal_post = NULL;
tree save_expr;
bool is_statement;
location_t saved_location;
enum gimplify_status ret;
gimple_stmt_iterator pre_last_gsi, post_last_gsi;
save_expr = *expr_p;
if (save_expr == NULL_TREE)
return GS_ALL_DONE;
/* If we are gimplifying a top-level statement, PRE_P must be valid. */
is_statement = gimple_test_f == is_gimple_stmt;
if (is_statement)
gcc_assert (pre_p);
/* Consistency checks. */
if (gimple_test_f == is_gimple_reg)
gcc_assert (fallback & (fb_rvalue | fb_lvalue));
else if (gimple_test_f == is_gimple_val
|| gimple_test_f == is_gimple_call_addr
|| gimple_test_f == is_gimple_condexpr
|| gimple_test_f == is_gimple_mem_rhs
|| gimple_test_f == is_gimple_mem_rhs_or_call
|| gimple_test_f == is_gimple_reg_rhs
|| gimple_test_f == is_gimple_reg_rhs_or_call
|| gimple_test_f == is_gimple_asm_val
|| gimple_test_f == is_gimple_mem_ref_addr)
gcc_assert (fallback & fb_rvalue);
else if (gimple_test_f == is_gimple_min_lval
|| gimple_test_f == is_gimple_lvalue)
gcc_assert (fallback & fb_lvalue);
else if (gimple_test_f == is_gimple_addressable)
gcc_assert (fallback & fb_either);
else if (gimple_test_f == is_gimple_stmt)
gcc_assert (fallback == fb_none);
else
{
/* We should have recognized the GIMPLE_TEST_F predicate to
know what kind of fallback to use in case a temporary is
needed to hold the value or address of *EXPR_P. */
gcc_unreachable ();
}
/* We used to check the predicate here and return immediately if it
succeeds. This is wrong; the design is for gimplification to be
idempotent, and for the predicates to only test for valid forms, not
whether they are fully simplified. */
if (pre_p == NULL)
pre_p = &internal_pre;
if (post_p == NULL)
post_p = &internal_post;
/* Remember the last statements added to PRE_P and POST_P. Every
new statement added by the gimplification helpers needs to be
annotated with location information. To centralize the
responsibility, we remember the last statement that had been
added to both queues before gimplifying *EXPR_P. If
gimplification produces new statements in PRE_P and POST_P, those
statements will be annotated with the same location information
as *EXPR_P. */
pre_last_gsi = gsi_last (*pre_p);
post_last_gsi = gsi_last (*post_p);
saved_location = input_location;
if (save_expr != error_mark_node
&& EXPR_HAS_LOCATION (*expr_p))
input_location = EXPR_LOCATION (*expr_p);
/* Loop over the specific gimplifiers until the toplevel node
remains the same. */
do
{
/* Strip away as many useless type conversions as possible
at the toplevel. */
STRIP_USELESS_TYPE_CONVERSION (*expr_p);
/* Remember the expr. */
save_expr = *expr_p;
/* Die, die, die, my darling. */
if (save_expr == error_mark_node
|| (TREE_TYPE (save_expr)
&& TREE_TYPE (save_expr) == error_mark_node))
{
ret = GS_ERROR;
break;
}
/* Do any language-specific gimplification. */
ret = ((enum gimplify_status)
lang_hooks.gimplify_expr (expr_p, pre_p, post_p));
if (ret == GS_OK)
{
if (*expr_p == NULL_TREE)
break;
if (*expr_p != save_expr)
continue;
}
else if (ret != GS_UNHANDLED)
break;
/* Make sure that all the cases set 'ret' appropriately. */
ret = GS_UNHANDLED;
switch (TREE_CODE (*expr_p))
{
/* First deal with the special cases. */
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
ret = gimplify_self_mod_expr (expr_p, pre_p, post_p,
fallback != fb_none);
break;
case ARRAY_REF:
case ARRAY_RANGE_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case COMPONENT_REF:
case VIEW_CONVERT_EXPR:
ret = gimplify_compound_lval (expr_p, pre_p, post_p,
fallback ? fallback : fb_rvalue);
break;
case COND_EXPR:
ret = gimplify_cond_expr (expr_p, pre_p, fallback);
/* C99 code may assign to an array in a structure value of a
conditional expression, and this has undefined behavior
only on execution, so create a temporary if an lvalue is
required. */
if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
mark_addressable (*expr_p);
ret = GS_OK;
}
break;
case CALL_EXPR:
ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none);
/* C99 code may assign to an array in a structure returned
from a function, and this has undefined behavior only on
execution, so create a temporary if an lvalue is
required. */
if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
mark_addressable (*expr_p);
ret = GS_OK;
}
break;
case TREE_LIST:
gcc_unreachable ();
case COMPOUND_EXPR:
ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none);
break;
case COMPOUND_LITERAL_EXPR:
ret = gimplify_compound_literal_expr (expr_p, pre_p);
break;
case MODIFY_EXPR:
case INIT_EXPR:
ret = gimplify_modify_expr (expr_p, pre_p, post_p,
fallback != fb_none);
break;
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
{
/* Preserve the original type of the expression and the
source location of the outer expression. */
tree org_type = TREE_TYPE (*expr_p);
*expr_p = gimple_boolify (*expr_p);
*expr_p = build3_loc (input_location, COND_EXPR,
org_type, *expr_p,
fold_convert_loc
(input_location,
org_type, boolean_true_node),
fold_convert_loc
(input_location,
org_type, boolean_false_node));
ret = GS_OK;
break;
}
case TRUTH_NOT_EXPR:
{
tree type = TREE_TYPE (*expr_p);
/* The parsers are careful to generate TRUTH_NOT_EXPR
only with operands that are always zero or one.
We do not fold here but handle the only interesting case
manually, as fold may re-introduce the TRUTH_NOT_EXPR. */
*expr_p = gimple_boolify (*expr_p);
if (TYPE_PRECISION (TREE_TYPE (*expr_p)) == 1)
*expr_p = build1_loc (input_location, BIT_NOT_EXPR,
TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0));
else
*expr_p = build2_loc (input_location, BIT_XOR_EXPR,
TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0),
build_int_cst (TREE_TYPE (*expr_p), 1));
if (!useless_type_conversion_p (type, TREE_TYPE (*expr_p)))
*expr_p = fold_convert_loc (input_location, type, *expr_p);
ret = GS_OK;
break;
}
case ADDR_EXPR:
ret = gimplify_addr_expr (expr_p, pre_p, post_p);
break;
case VA_ARG_EXPR:
ret = gimplify_va_arg_expr (expr_p, pre_p, post_p);
break;
CASE_CONVERT:
if (IS_EMPTY_STMT (*expr_p))
{
ret = GS_ALL_DONE;
break;
}
if (VOID_TYPE_P (TREE_TYPE (*expr_p))
|| fallback == fb_none)
{
/* Just strip a conversion to void (or in void context) and
try again. */
*expr_p = TREE_OPERAND (*expr_p, 0);
ret = GS_OK;
break;
}
ret = gimplify_conversion (expr_p);
if (ret == GS_ERROR)
break;
if (*expr_p != save_expr)
break;
/* FALLTHRU */
case FIX_TRUNC_EXPR:
/* unary_expr: ... | '(' cast ')' val | ... */
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
break;
case INDIRECT_REF:
{
bool volatilep = TREE_THIS_VOLATILE (*expr_p);
bool notrap = TREE_THIS_NOTRAP (*expr_p);
tree saved_ptr_type = TREE_TYPE (TREE_OPERAND (*expr_p, 0));
*expr_p = fold_indirect_ref_loc (input_location, *expr_p);
if (*expr_p != save_expr)
{
ret = GS_OK;
break;
}
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_reg, fb_rvalue);
if (ret == GS_ERROR)
break;
recalculate_side_effects (*expr_p);
*expr_p = fold_build2_loc (input_location, MEM_REF,
TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0),
build_int_cst (saved_ptr_type, 0));
TREE_THIS_VOLATILE (*expr_p) = volatilep;
TREE_THIS_NOTRAP (*expr_p) = notrap;
ret = GS_OK;
break;
}
/* We arrive here through the various re-gimplifcation paths. */
case MEM_REF:
/* First try re-folding the whole thing. */
tmp = fold_binary (MEM_REF, TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0),
TREE_OPERAND (*expr_p, 1));
if (tmp)
{
*expr_p = tmp;
recalculate_side_effects (*expr_p);
ret = GS_OK;
break;
}
/* Avoid re-gimplifying the address operand if it is already
in suitable form. Re-gimplifying would mark the address
operand addressable. Always gimplify when not in SSA form
as we still may have to gimplify decls with value-exprs. */
if (!gimplify_ctxp || !gimplify_ctxp->into_ssa
|| !is_gimple_mem_ref_addr (TREE_OPERAND (*expr_p, 0)))
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_mem_ref_addr, fb_rvalue);
if (ret == GS_ERROR)
break;
}
recalculate_side_effects (*expr_p);
ret = GS_ALL_DONE;
break;
/* Constants need not be gimplified. */
case INTEGER_CST:
case REAL_CST:
case FIXED_CST:
case STRING_CST:
case COMPLEX_CST:
case VECTOR_CST:
ret = GS_ALL_DONE;
break;
case CONST_DECL:
/* If we require an lvalue, such as for ADDR_EXPR, retain the
CONST_DECL node. Otherwise the decl is replaceable by its
value. */
/* ??? Should be == fb_lvalue, but ADDR_EXPR passes fb_either. */
if (fallback & fb_lvalue)
ret = GS_ALL_DONE;
else
{
*expr_p = DECL_INITIAL (*expr_p);
ret = GS_OK;
}
break;
case DECL_EXPR:
ret = gimplify_decl_expr (expr_p, pre_p);
break;
case BIND_EXPR:
ret = gimplify_bind_expr (expr_p, pre_p);
break;
case LOOP_EXPR:
ret = gimplify_loop_expr (expr_p, pre_p);
break;
case SWITCH_EXPR:
ret = gimplify_switch_expr (expr_p, pre_p);
break;
case EXIT_EXPR:
ret = gimplify_exit_expr (expr_p);
break;
case GOTO_EXPR:
/* If the target is not LABEL, then it is a computed jump
and the target needs to be gimplified. */
if (TREE_CODE (GOTO_DESTINATION (*expr_p)) != LABEL_DECL)
{
ret = gimplify_expr (&GOTO_DESTINATION (*expr_p), pre_p,
NULL, is_gimple_val, fb_rvalue);
if (ret == GS_ERROR)
break;
}
gimplify_seq_add_stmt (pre_p,
gimple_build_goto (GOTO_DESTINATION (*expr_p)));
ret = GS_ALL_DONE;
break;
case PREDICT_EXPR:
gimplify_seq_add_stmt (pre_p,
gimple_build_predict (PREDICT_EXPR_PREDICTOR (*expr_p),
PREDICT_EXPR_OUTCOME (*expr_p)));
ret = GS_ALL_DONE;
break;
case LABEL_EXPR:
ret = GS_ALL_DONE;
gcc_assert (decl_function_context (LABEL_EXPR_LABEL (*expr_p))
== current_function_decl);
gimplify_seq_add_stmt (pre_p,
gimple_build_label (LABEL_EXPR_LABEL (*expr_p)));
break;
case CASE_LABEL_EXPR:
ret = gimplify_case_label_expr (expr_p, pre_p);
break;
case RETURN_EXPR:
ret = gimplify_return_expr (*expr_p, pre_p);
break;
case CONSTRUCTOR:
/* Don't reduce this in place; let gimplify_init_constructor work its
magic. Buf if we're just elaborating this for side effects, just
gimplify any element that has side-effects. */
if (fallback == fb_none)
{
unsigned HOST_WIDE_INT ix;
tree val;
tree temp = NULL_TREE;
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (*expr_p), ix, val)
if (TREE_SIDE_EFFECTS (val))
append_to_statement_list (val, &temp);
*expr_p = temp;
ret = temp ? GS_OK : GS_ALL_DONE;
}
/* C99 code may assign to an array in a constructed
structure or union, and this has undefined behavior only
on execution, so create a temporary if an lvalue is
required. */
else if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
mark_addressable (*expr_p);
ret = GS_OK;
}
else
ret = GS_ALL_DONE;
break;
/* The following are special cases that are not handled by the
original GIMPLE grammar. */
/* SAVE_EXPR nodes are converted into a GIMPLE identifier and
eliminated. */
case SAVE_EXPR:
ret = gimplify_save_expr (expr_p, pre_p, post_p);
break;
case BIT_FIELD_REF:
{
enum gimplify_status r0, r1, r2;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_lvalue, fb_either);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p,
post_p, is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
ret = MIN (r0, MIN (r1, r2));
}
break;
case TARGET_MEM_REF:
{
enum gimplify_status r0 = GS_ALL_DONE, r1 = GS_ALL_DONE;
if (TMR_BASE (*expr_p))
r0 = gimplify_expr (&TMR_BASE (*expr_p), pre_p,
post_p, is_gimple_mem_ref_addr, fb_either);
if (TMR_INDEX (*expr_p))
r1 = gimplify_expr (&TMR_INDEX (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
if (TMR_INDEX2 (*expr_p))
r1 = gimplify_expr (&TMR_INDEX2 (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
/* TMR_STEP and TMR_OFFSET are always integer constants. */
ret = MIN (r0, r1);
}
break;
case NON_LVALUE_EXPR:
/* This should have been stripped above. */
gcc_unreachable ();
case ASM_EXPR:
ret = gimplify_asm_expr (expr_p, pre_p, post_p);
break;
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
{
gimple_seq eval, cleanup;
gimple try_;
eval = cleanup = NULL;
gimplify_and_add (TREE_OPERAND (*expr_p, 0), &eval);
gimplify_and_add (TREE_OPERAND (*expr_p, 1), &cleanup);
/* Don't create bogus GIMPLE_TRY with empty cleanup. */
if (gimple_seq_empty_p (cleanup))
{
gimple_seq_add_seq (pre_p, eval);
ret = GS_ALL_DONE;
break;
}
try_ = gimple_build_try (eval, cleanup,
TREE_CODE (*expr_p) == TRY_FINALLY_EXPR
? GIMPLE_TRY_FINALLY
: GIMPLE_TRY_CATCH);
if (TREE_CODE (*expr_p) == TRY_CATCH_EXPR)
gimple_try_set_catch_is_cleanup (try_,
TRY_CATCH_IS_CLEANUP (*expr_p));
gimplify_seq_add_stmt (pre_p, try_);
ret = GS_ALL_DONE;
break;
}
case CLEANUP_POINT_EXPR:
ret = gimplify_cleanup_point_expr (expr_p, pre_p);
break;
case TARGET_EXPR:
ret = gimplify_target_expr (expr_p, pre_p, post_p);
break;
case CATCH_EXPR:
{
gimple c;
gimple_seq handler = NULL;
gimplify_and_add (CATCH_BODY (*expr_p), &handler);
c = gimple_build_catch (CATCH_TYPES (*expr_p), handler);
gimplify_seq_add_stmt (pre_p, c);
ret = GS_ALL_DONE;
break;
}
case EH_FILTER_EXPR:
{
gimple ehf;
gimple_seq failure = NULL;
gimplify_and_add (EH_FILTER_FAILURE (*expr_p), &failure);
ehf = gimple_build_eh_filter (EH_FILTER_TYPES (*expr_p), failure);
gimple_set_no_warning (ehf, TREE_NO_WARNING (*expr_p));
gimplify_seq_add_stmt (pre_p, ehf);
ret = GS_ALL_DONE;
break;
}
case OBJ_TYPE_REF:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&OBJ_TYPE_REF_OBJECT (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&OBJ_TYPE_REF_EXPR (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
TREE_SIDE_EFFECTS (*expr_p) = 0;
ret = MIN (r0, r1);
}
break;
case LABEL_DECL:
/* We get here when taking the address of a label. We mark
the label as "forced"; meaning it can never be removed and
it is a potential target for any computed goto. */
FORCED_LABEL (*expr_p) = 1;
ret = GS_ALL_DONE;
break;
case STATEMENT_LIST:
ret = gimplify_statement_list (expr_p, pre_p);
break;
case WITH_SIZE_EXPR:
{
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p == &internal_post ? NULL : post_p,
gimple_test_f, fallback);
gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = GS_ALL_DONE;
}
break;
case VAR_DECL:
case PARM_DECL:
ret = gimplify_var_or_parm_decl (expr_p);
break;
case RESULT_DECL:
/* When within an OpenMP context, notice uses of variables. */
if (gimplify_omp_ctxp)
omp_notice_variable (gimplify_omp_ctxp, *expr_p, true);
ret = GS_ALL_DONE;
break;
case SSA_NAME:
/* Allow callbacks into the gimplifier during optimization. */
ret = GS_ALL_DONE;
break;
case OMP_PARALLEL:
gimplify_omp_parallel (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_TASK:
gimplify_omp_task (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_FOR:
ret = gimplify_omp_for (expr_p, pre_p);
break;
case OMP_SECTIONS:
case OMP_SINGLE:
gimplify_omp_workshare (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_SECTION:
case OMP_MASTER:
case OMP_ORDERED:
case OMP_CRITICAL:
{
gimple_seq body = NULL;
gimple g;
gimplify_and_add (OMP_BODY (*expr_p), &body);
switch (TREE_CODE (*expr_p))
{
case OMP_SECTION:
g = gimple_build_omp_section (body);
break;
case OMP_MASTER:
g = gimple_build_omp_master (body);
break;
case OMP_ORDERED:
g = gimple_build_omp_ordered (body);
break;
case OMP_CRITICAL:
g = gimple_build_omp_critical (body,
OMP_CRITICAL_NAME (*expr_p));
break;
default:
gcc_unreachable ();
}
gimplify_seq_add_stmt (pre_p, g);
ret = GS_ALL_DONE;
break;
}
case OMP_ATOMIC:
case OMP_ATOMIC_READ:
case OMP_ATOMIC_CAPTURE_OLD:
case OMP_ATOMIC_CAPTURE_NEW:
ret = gimplify_omp_atomic (expr_p, pre_p);
break;
case TRANSACTION_EXPR:
ret = gimplify_transaction (expr_p, pre_p);
break;
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
{
tree orig_type = TREE_TYPE (*expr_p);
tree new_type, xop0, xop1;
*expr_p = gimple_boolify (*expr_p);
new_type = TREE_TYPE (*expr_p);
if (!useless_type_conversion_p (orig_type, new_type))
{
*expr_p = fold_convert_loc (input_location, orig_type, *expr_p);
ret = GS_OK;
break;
}
/* Boolified binary truth expressions are semantically equivalent
to bitwise binary expressions. Canonicalize them to the
bitwise variant. */
switch (TREE_CODE (*expr_p))
{
case TRUTH_AND_EXPR:
TREE_SET_CODE (*expr_p, BIT_AND_EXPR);
break;
case TRUTH_OR_EXPR:
TREE_SET_CODE (*expr_p, BIT_IOR_EXPR);
break;
case TRUTH_XOR_EXPR:
TREE_SET_CODE (*expr_p, BIT_XOR_EXPR);
break;
default:
break;
}
/* Now make sure that operands have compatible type to
expression's new_type. */
xop0 = TREE_OPERAND (*expr_p, 0);
xop1 = TREE_OPERAND (*expr_p, 1);
if (!useless_type_conversion_p (new_type, TREE_TYPE (xop0)))
TREE_OPERAND (*expr_p, 0) = fold_convert_loc (input_location,
new_type,
xop0);
if (!useless_type_conversion_p (new_type, TREE_TYPE (xop1)))
TREE_OPERAND (*expr_p, 1) = fold_convert_loc (input_location,
new_type,
xop1);
/* Continue classified as tcc_binary. */
goto expr_2;
}
case FMA_EXPR:
case VEC_PERM_EXPR:
/* Classified as tcc_expression. */
goto expr_3;
case POINTER_PLUS_EXPR:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
ret = MIN (r0, r1);
/* Convert &X + CST to invariant &MEM[&X, CST]. Do this
after gimplifying operands - this is similar to how
it would be folding all gimplified stmts on creation
to have them canonicalized, which is what we eventually
should do anyway. */
if (TREE_CODE (TREE_OPERAND (*expr_p, 1)) == INTEGER_CST
&& is_gimple_min_invariant (TREE_OPERAND (*expr_p, 0)))
{
*expr_p = build_fold_addr_expr_with_type_loc
(input_location,
fold_build2 (MEM_REF, TREE_TYPE (TREE_TYPE (*expr_p)),
TREE_OPERAND (*expr_p, 0),
fold_convert (ptr_type_node,
TREE_OPERAND (*expr_p, 1))),
TREE_TYPE (*expr_p));
ret = MIN (ret, GS_OK);
}
break;
}
default:
switch (TREE_CODE_CLASS (TREE_CODE (*expr_p)))
{
case tcc_comparison:
/* Handle comparison of objects of non scalar mode aggregates
with a call to memcmp. It would be nice to only have to do
this for variable-sized objects, but then we'd have to allow
the same nest of reference nodes we allow for MODIFY_EXPR and
that's too complex.
Compare scalar mode aggregates as scalar mode values. Using
memcmp for them would be very inefficient at best, and is
plain wrong if bitfields are involved. */
{
tree type = TREE_TYPE (TREE_OPERAND (*expr_p, 1));
/* Vector comparisons need no boolification. */
if (TREE_CODE (type) == VECTOR_TYPE)
goto expr_2;
else if (!AGGREGATE_TYPE_P (type))
{
tree org_type = TREE_TYPE (*expr_p);
*expr_p = gimple_boolify (*expr_p);
if (!useless_type_conversion_p (org_type,
TREE_TYPE (*expr_p)))
{
*expr_p = fold_convert_loc (input_location,
org_type, *expr_p);
ret = GS_OK;
}
else
goto expr_2;
}
else if (TYPE_MODE (type) != BLKmode)
ret = gimplify_scalar_mode_aggregate_compare (expr_p);
else
ret = gimplify_variable_sized_compare (expr_p);
break;
}
/* If *EXPR_P does not need to be special-cased, handle it
according to its class. */
case tcc_unary:
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
break;
case tcc_binary:
expr_2:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
ret = MIN (r0, r1);
break;
}
expr_3:
{
enum gimplify_status r0, r1, r2;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p,
post_p, is_gimple_val, fb_rvalue);
ret = MIN (MIN (r0, r1), r2);
break;
}
case tcc_declaration:
case tcc_constant:
ret = GS_ALL_DONE;
goto dont_recalculate;
default:
gcc_unreachable ();
}
recalculate_side_effects (*expr_p);
dont_recalculate:
break;
}
gcc_assert (*expr_p || ret != GS_OK);
}
while (ret == GS_OK);
/* If we encountered an error_mark somewhere nested inside, either
stub out the statement or propagate the error back out. */
if (ret == GS_ERROR)
{
if (is_statement)
*expr_p = NULL;
goto out;
}
/* This was only valid as a return value from the langhook, which
we handled. Make sure it doesn't escape from any other context. */
gcc_assert (ret != GS_UNHANDLED);
if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p))
{
/* We aren't looking for a value, and we don't have a valid
statement. If it doesn't have side-effects, throw it away. */
if (!TREE_SIDE_EFFECTS (*expr_p))
*expr_p = NULL;
else if (!TREE_THIS_VOLATILE (*expr_p))
{
/* This is probably a _REF that contains something nested that
has side effects. Recurse through the operands to find it. */
enum tree_code code = TREE_CODE (*expr_p);
switch (code)
{
case COMPONENT_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case VIEW_CONVERT_EXPR:
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
gimple_test_f, fallback);
break;
case ARRAY_REF:
case ARRAY_RANGE_REF:
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
gimple_test_f, fallback);
gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
gimple_test_f, fallback);
break;
default:
/* Anything else with side-effects must be converted to
a valid statement before we get here. */
gcc_unreachable ();
}
*expr_p = NULL;
}
else if (COMPLETE_TYPE_P (TREE_TYPE (*expr_p))
&& TYPE_MODE (TREE_TYPE (*expr_p)) != BLKmode)
{
/* Historically, the compiler has treated a bare reference
to a non-BLKmode volatile lvalue as forcing a load. */
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (*expr_p));
/* Normally, we do not want to create a temporary for a
TREE_ADDRESSABLE type because such a type should not be
copied by bitwise-assignment. However, we make an
exception here, as all we are doing here is ensuring that
we read the bytes that make up the type. We use
create_tmp_var_raw because create_tmp_var will abort when
given a TREE_ADDRESSABLE type. */
tree tmp = create_tmp_var_raw (type, "vol");
gimple_add_tmp_var (tmp);
gimplify_assign (tmp, *expr_p, pre_p);
*expr_p = NULL;
}
else
/* We can't do anything useful with a volatile reference to
an incomplete type, so just throw it away. Likewise for
a BLKmode type, since any implicit inner load should
already have been turned into an explicit one by the
gimplification process. */
*expr_p = NULL;
}
/* If we are gimplifying at the statement level, we're done. Tack
everything together and return. */
if (fallback == fb_none || is_statement)
{
/* Since *EXPR_P has been converted into a GIMPLE tuple, clear
it out for GC to reclaim it. */
*expr_p = NULL_TREE;
if (!gimple_seq_empty_p (internal_pre)
|| !gimple_seq_empty_p (internal_post))
{
gimplify_seq_add_seq (&internal_pre, internal_post);
gimplify_seq_add_seq (pre_p, internal_pre);
}
/* The result of gimplifying *EXPR_P is going to be the last few
statements in *PRE_P and *POST_P. Add location information
to all the statements that were added by the gimplification
helpers. */
if (!gimple_seq_empty_p (*pre_p))
annotate_all_with_location_after (*pre_p, pre_last_gsi, input_location);
if (!gimple_seq_empty_p (*post_p))
annotate_all_with_location_after (*post_p, post_last_gsi,
input_location);
goto out;
}
#ifdef ENABLE_GIMPLE_CHECKING
if (*expr_p)
{
enum tree_code code = TREE_CODE (*expr_p);
/* These expressions should already be in gimple IR form. */
gcc_assert (code != MODIFY_EXPR
&& code != ASM_EXPR
&& code != BIND_EXPR
&& code != CATCH_EXPR
&& (code != COND_EXPR || gimplify_ctxp->allow_rhs_cond_expr)
&& code != EH_FILTER_EXPR
&& code != GOTO_EXPR
&& code != LABEL_EXPR
&& code != LOOP_EXPR
&& code != SWITCH_EXPR
&& code != TRY_FINALLY_EXPR
&& code != OMP_CRITICAL
&& code != OMP_FOR
&& code != OMP_MASTER
&& code != OMP_ORDERED
&& code != OMP_PARALLEL
&& code != OMP_SECTIONS
&& code != OMP_SECTION
&& code != OMP_SINGLE);
}
#endif
/* Otherwise we're gimplifying a subexpression, so the resulting
value is interesting. If it's a valid operand that matches
GIMPLE_TEST_F, we're done. Unless we are handling some
post-effects internally; if that's the case, we need to copy into
a temporary before adding the post-effects to POST_P. */
if (gimple_seq_empty_p (internal_post) && (*gimple_test_f) (*expr_p))
goto out;
/* Otherwise, we need to create a new temporary for the gimplified
expression. */
/* We can't return an lvalue if we have an internal postqueue. The
object the lvalue refers to would (probably) be modified by the
postqueue; we need to copy the value out first, which means an
rvalue. */
if ((fallback & fb_lvalue)
&& gimple_seq_empty_p (internal_post)
&& is_gimple_addressable (*expr_p))
{
/* An lvalue will do. Take the address of the expression, store it
in a temporary, and replace the expression with an INDIRECT_REF of
that temporary. */
tmp = build_fold_addr_expr_loc (input_location, *expr_p);
gimplify_expr (&tmp, pre_p, post_p, is_gimple_reg, fb_rvalue);
*expr_p = build_simple_mem_ref (tmp);
}
else if ((fallback & fb_rvalue) && is_gimple_reg_rhs_or_call (*expr_p))
{
/* An rvalue will do. Assign the gimplified expression into a
new temporary TMP and replace the original expression with
TMP. First, make sure that the expression has a type so that
it can be assigned into a temporary. */
gcc_assert (!VOID_TYPE_P (TREE_TYPE (*expr_p)));
if (!gimple_seq_empty_p (internal_post) || (fallback & fb_lvalue))
/* The postqueue might change the value of the expression between
the initialization and use of the temporary, so we can't use a
formal temp. FIXME do we care? */
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
if (TREE_CODE (TREE_TYPE (*expr_p)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (*expr_p)) == VECTOR_TYPE)
DECL_GIMPLE_REG_P (*expr_p) = 1;
}
else
*expr_p = get_formal_tmp_var (*expr_p, pre_p);
}
else
{
#ifdef ENABLE_GIMPLE_CHECKING
if (!(fallback & fb_mayfail))
{
fprintf (stderr, "gimplification failed:\n");
print_generic_expr (stderr, *expr_p, 0);
debug_tree (*expr_p);
internal_error ("gimplification failed");
}
#endif
gcc_assert (fallback & fb_mayfail);
/* If this is an asm statement, and the user asked for the
impossible, don't die. Fail and let gimplify_asm_expr
issue an error. */
ret = GS_ERROR;
goto out;
}
/* Make sure the temporary matches our predicate. */
gcc_assert ((*gimple_test_f) (*expr_p));
if (!gimple_seq_empty_p (internal_post))
{
annotate_all_with_location (internal_post, input_location);
gimplify_seq_add_seq (pre_p, internal_post);
}
out:
input_location = saved_location;
return ret;
}
/* Look through TYPE for variable-sized objects and gimplify each such
size that we find. Add to LIST_P any statements generated. */
void
gimplify_type_sizes (tree type, gimple_seq *list_p)
{
tree field, t;
if (type == NULL || type == error_mark_node)
return;
/* We first do the main variant, then copy into any other variants. */
type = TYPE_MAIN_VARIANT (type);
/* Avoid infinite recursion. */
if (TYPE_SIZES_GIMPLIFIED (type))
return;
TYPE_SIZES_GIMPLIFIED (type) = 1;
switch (TREE_CODE (type))
{
case INTEGER_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
gimplify_one_sizepos (&TYPE_MIN_VALUE (type), list_p);
gimplify_one_sizepos (&TYPE_MAX_VALUE (type), list_p);
for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (type);
TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (type);
}
break;
case ARRAY_TYPE:
/* These types may not have declarations, so handle them here. */
gimplify_type_sizes (TREE_TYPE (type), list_p);
gimplify_type_sizes (TYPE_DOMAIN (type), list_p);
/* Ensure VLA bounds aren't removed, for -O0 they should be variables
with assigned stack slots, for -O1+ -g they should be tracked
by VTA. */
if (!(TYPE_NAME (type)
&& TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
&& DECL_IGNORED_P (TYPE_NAME (type)))
&& TYPE_DOMAIN (type)
&& INTEGRAL_TYPE_P (TYPE_DOMAIN (type)))
{
t = TYPE_MIN_VALUE (TYPE_DOMAIN (type));
if (t && TREE_CODE (t) == VAR_DECL && DECL_ARTIFICIAL (t))
DECL_IGNORED_P (t) = 0;
t = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
if (t && TREE_CODE (t) == VAR_DECL && DECL_ARTIFICIAL (t))
DECL_IGNORED_P (t) = 0;
}
break;
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
{
gimplify_one_sizepos (&DECL_FIELD_OFFSET (field), list_p);
gimplify_one_sizepos (&DECL_SIZE (field), list_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (field), list_p);
gimplify_type_sizes (TREE_TYPE (field), list_p);
}
break;
case POINTER_TYPE:
case REFERENCE_TYPE:
/* We used to recurse on the pointed-to type here, which turned out to
be incorrect because its definition might refer to variables not
yet initialized at this point if a forward declaration is involved.
It was actually useful for anonymous pointed-to types to ensure
that the sizes evaluation dominates every possible later use of the
values. Restricting to such types here would be safe since there
is no possible forward declaration around, but would introduce an
undesirable middle-end semantic to anonymity. We then defer to
front-ends the responsibility of ensuring that the sizes are
evaluated both early and late enough, e.g. by attaching artificial
type declarations to the tree. */
break;
default:
break;
}
gimplify_one_sizepos (&TYPE_SIZE (type), list_p);
gimplify_one_sizepos (&TYPE_SIZE_UNIT (type), list_p);
for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
TYPE_SIZE (t) = TYPE_SIZE (type);
TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (type);
TYPE_SIZES_GIMPLIFIED (t) = 1;
}
}
/* A subroutine of gimplify_type_sizes to make sure that *EXPR_P,
a size or position, has had all of its SAVE_EXPRs evaluated.
We add any required statements to *STMT_P. */
void
gimplify_one_sizepos (tree *expr_p, gimple_seq *stmt_p)
{
tree type, expr = *expr_p;
/* We don't do anything if the value isn't there, is constant, or contains
A PLACEHOLDER_EXPR. We also don't want to do anything if it's already
a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier
will want to replace it with a new variable, but that will cause problems
if this type is from outside the function. It's OK to have that here. */
if (expr == NULL_TREE || TREE_CONSTANT (expr)
|| TREE_CODE (expr) == VAR_DECL
|| CONTAINS_PLACEHOLDER_P (expr))
return;
type = TREE_TYPE (expr);
*expr_p = unshare_expr (expr);
gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue);
expr = *expr_p;
/* Verify that we've an exact type match with the original expression.
In particular, we do not wish to drop a "sizetype" in favour of a
type of similar dimensions. We don't want to pollute the generic
type-stripping code with this knowledge because it doesn't matter
for the bulk of GENERIC/GIMPLE. It only matters that TYPE_SIZE_UNIT
and friends retain their "sizetype-ness". */
if (TREE_TYPE (expr) != type
&& TREE_CODE (type) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (type))
{
tree tmp;
gimple stmt;
*expr_p = create_tmp_var (type, NULL);
tmp = build1 (NOP_EXPR, type, expr);
stmt = gimplify_assign (*expr_p, tmp, stmt_p);
gimple_set_location (stmt, EXPR_LOC_OR_HERE (expr));
}
}
/* Gimplify the body of statements of FNDECL and return a GIMPLE_BIND node
containing the sequence of corresponding GIMPLE statements. If DO_PARMS
is true, also gimplify the parameters. */
gimple
gimplify_body (tree fndecl, bool do_parms)
{
location_t saved_location = input_location;
gimple_seq parm_stmts, seq;
gimple outer_bind;
struct gimplify_ctx gctx;
struct cgraph_node *cgn;
timevar_push (TV_TREE_GIMPLIFY);
/* Initialize for optimize_insn_for_s{ize,peed}_p possibly called during
gimplification. */
default_rtl_profile ();
gcc_assert (gimplify_ctxp == NULL);
push_gimplify_context (&gctx);
/* Unshare most shared trees in the body and in that of any nested functions.
It would seem we don't have to do this for nested functions because
they are supposed to be output and then the outer function gimplified
first, but the g++ front end doesn't always do it that way. */
unshare_body (fndecl);
unvisit_body (fndecl);
cgn = cgraph_get_node (fndecl);
if (cgn && cgn->origin)
nonlocal_vlas = pointer_set_create ();
/* Make sure input_location isn't set to something weird. */
input_location = DECL_SOURCE_LOCATION (fndecl);
/* Resolve callee-copies. This has to be done before processing
the body so that DECL_VALUE_EXPR gets processed correctly. */
parm_stmts = do_parms ? gimplify_parameters () : NULL;
/* Gimplify the function's body. */
seq = NULL;
gimplify_stmt (&DECL_SAVED_TREE (fndecl), &seq);
outer_bind = gimple_seq_first_stmt (seq);
if (!outer_bind)
{
outer_bind = gimple_build_nop ();
gimplify_seq_add_stmt (&seq, outer_bind);
}
/* The body must contain exactly one statement, a GIMPLE_BIND. If this is
not the case, wrap everything in a GIMPLE_BIND to make it so. */
if (gimple_code (outer_bind) == GIMPLE_BIND
&& gimple_seq_first (seq) == gimple_seq_last (seq))
;
else
outer_bind = gimple_build_bind (NULL_TREE, seq, NULL);
DECL_SAVED_TREE (fndecl) = NULL_TREE;
/* If we had callee-copies statements, insert them at the beginning
of the function and clear DECL_VALUE_EXPR_P on the parameters. */
if (!gimple_seq_empty_p (parm_stmts))
{
tree parm;
gimplify_seq_add_seq (&parm_stmts, gimple_bind_body (outer_bind));
gimple_bind_set_body (outer_bind, parm_stmts);
for (parm = DECL_ARGUMENTS (current_function_decl);
parm; parm = DECL_CHAIN (parm))
if (DECL_HAS_VALUE_EXPR_P (parm))
{
DECL_HAS_VALUE_EXPR_P (parm) = 0;
DECL_IGNORED_P (parm) = 0;
}
}
if (nonlocal_vlas)
{
pointer_set_destroy (nonlocal_vlas);
nonlocal_vlas = NULL;
}
pop_gimplify_context (outer_bind);
gcc_assert (gimplify_ctxp == NULL);
if (!seen_error ())
verify_gimple_in_seq (gimple_bind_body (outer_bind));
timevar_pop (TV_TREE_GIMPLIFY);
input_location = saved_location;
return outer_bind;
}
typedef char *char_p; /* For DEF_VEC_P. */
DEF_VEC_P(char_p);
DEF_VEC_ALLOC_P(char_p,heap);
/* Return whether we should exclude FNDECL from instrumentation. */
static bool
flag_instrument_functions_exclude_p (tree fndecl)
{
VEC(char_p,heap) *vec;
vec = (VEC(char_p,heap) *) flag_instrument_functions_exclude_functions;
if (VEC_length (char_p, vec) > 0)
{
const char *name;
int i;
char *s;
name = lang_hooks.decl_printable_name (fndecl, 0);
FOR_EACH_VEC_ELT (char_p, vec, i, s)
if (strstr (name, s) != NULL)
return true;
}
vec = (VEC(char_p,heap) *) flag_instrument_functions_exclude_files;
if (VEC_length (char_p, vec) > 0)
{
const char *name;
int i;
char *s;
name = DECL_SOURCE_FILE (fndecl);
FOR_EACH_VEC_ELT (char_p, vec, i, s)
if (strstr (name, s) != NULL)
return true;
}
return false;
}
/* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL
node for the function we want to gimplify.
Return the sequence of GIMPLE statements corresponding to the body
of FNDECL. */
void
gimplify_function_tree (tree fndecl)
{
tree oldfn, parm, ret;
gimple_seq seq;
gimple bind;
gcc_assert (!gimple_body (fndecl));
oldfn = current_function_decl;
current_function_decl = fndecl;
if (DECL_STRUCT_FUNCTION (fndecl))
push_cfun (DECL_STRUCT_FUNCTION (fndecl));
else
push_struct_function (fndecl);
for (parm = DECL_ARGUMENTS (fndecl); parm ; parm = DECL_CHAIN (parm))
{
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (parm)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (parm)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (parm)
&& !needs_to_live_in_memory (parm))
DECL_GIMPLE_REG_P (parm) = 1;
}
ret = DECL_RESULT (fndecl);
if ((TREE_CODE (TREE_TYPE (ret)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (ret)) == VECTOR_TYPE)
&& !needs_to_live_in_memory (ret))
DECL_GIMPLE_REG_P (ret) = 1;
bind = gimplify_body (fndecl, true);
/* The tree body of the function is no longer needed, replace it
with the new GIMPLE body. */
seq = gimple_seq_alloc ();
gimple_seq_add_stmt (&seq, bind);
gimple_set_body (fndecl, seq);
/* If we're instrumenting function entry/exit, then prepend the call to
the entry hook and wrap the whole function in a TRY_FINALLY_EXPR to
catch the exit hook. */
/* ??? Add some way to ignore exceptions for this TFE. */
if (flag_instrument_function_entry_exit
&& !DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (fndecl)
&& !flag_instrument_functions_exclude_p (fndecl))
{
tree x;
gimple new_bind;
gimple tf;
gimple_seq cleanup = NULL, body = NULL;
tree tmp_var;
gimple call;
x = builtin_decl_implicit (BUILT_IN_RETURN_ADDRESS);
call = gimple_build_call (x, 1, integer_zero_node);
tmp_var = create_tmp_var (ptr_type_node, "return_addr");
gimple_call_set_lhs (call, tmp_var);
gimplify_seq_add_stmt (&cleanup, call);
x = builtin_decl_implicit (BUILT_IN_PROFILE_FUNC_EXIT);
call = gimple_build_call (x, 2,
build_fold_addr_expr (current_function_decl),
tmp_var);
gimplify_seq_add_stmt (&cleanup, call);
tf = gimple_build_try (seq, cleanup, GIMPLE_TRY_FINALLY);
x = builtin_decl_implicit (BUILT_IN_RETURN_ADDRESS);
call = gimple_build_call (x, 1, integer_zero_node);
tmp_var = create_tmp_var (ptr_type_node, "return_addr");
gimple_call_set_lhs (call, tmp_var);
gimplify_seq_add_stmt (&body, call);
x = builtin_decl_implicit (BUILT_IN_PROFILE_FUNC_ENTER);
call = gimple_build_call (x, 2,
build_fold_addr_expr (current_function_decl),
tmp_var);
gimplify_seq_add_stmt (&body, call);
gimplify_seq_add_stmt (&body, tf);
new_bind = gimple_build_bind (NULL, body, gimple_bind_block (bind));
/* Clear the block for BIND, since it is no longer directly inside
the function, but within a try block. */
gimple_bind_set_block (bind, NULL);
/* Replace the current function body with the body
wrapped in the try/finally TF. */
seq = gimple_seq_alloc ();
gimple_seq_add_stmt (&seq, new_bind);
gimple_set_body (fndecl, seq);
}
DECL_SAVED_TREE (fndecl) = NULL_TREE;
cfun->curr_properties = PROP_gimple_any;
current_function_decl = oldfn;
pop_cfun ();
}
/* Some transformations like inlining may invalidate the GIMPLE form
for operands. This function traverses all the operands in STMT and
gimplifies anything that is not a valid gimple operand. Any new
GIMPLE statements are inserted before *GSI_P. */
void
gimple_regimplify_operands (gimple stmt, gimple_stmt_iterator *gsi_p)
{
size_t i, num_ops;
tree orig_lhs = NULL_TREE, lhs, t;
gimple_seq pre = NULL;
gimple post_stmt = NULL;
struct gimplify_ctx gctx;
push_gimplify_context (&gctx);
gimplify_ctxp->into_ssa = gimple_in_ssa_p (cfun);
switch (gimple_code (stmt))
{
case GIMPLE_COND:
gimplify_expr (gimple_cond_lhs_ptr (stmt), &pre, NULL,
is_gimple_val, fb_rvalue);
gimplify_expr (gimple_cond_rhs_ptr (stmt), &pre, NULL,
is_gimple_val, fb_rvalue);
break;
case GIMPLE_SWITCH:
gimplify_expr (gimple_switch_index_ptr (stmt), &pre, NULL,
is_gimple_val, fb_rvalue);
break;
case GIMPLE_OMP_ATOMIC_LOAD:
gimplify_expr (gimple_omp_atomic_load_rhs_ptr (stmt), &pre, NULL,
is_gimple_val, fb_rvalue);
break;
case GIMPLE_ASM:
{
size_t i, noutputs = gimple_asm_noutputs (stmt);
const char *constraint, **oconstraints;
bool allows_mem, allows_reg, is_inout;
oconstraints
= (const char **) alloca ((noutputs) * sizeof (const char *));
for (i = 0; i < noutputs; i++)
{
tree op = gimple_asm_output_op (stmt, i);
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (op)));
oconstraints[i] = constraint;
parse_output_constraint (&constraint, i, 0, 0, &allows_mem,
&allows_reg, &is_inout);
gimplify_expr (&TREE_VALUE (op), &pre, NULL,
is_inout ? is_gimple_min_lval : is_gimple_lvalue,
fb_lvalue | fb_mayfail);
}
for (i = 0; i < gimple_asm_ninputs (stmt); i++)
{
tree op = gimple_asm_input_op (stmt, i);
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (op)));
parse_input_constraint (&constraint, 0, 0, noutputs, 0,
oconstraints, &allows_mem, &allows_reg);
if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (op))) && allows_mem)
allows_reg = 0;
if (!allows_reg && allows_mem)
gimplify_expr (&TREE_VALUE (op), &pre, NULL,
is_gimple_lvalue, fb_lvalue | fb_mayfail);
else
gimplify_expr (&TREE_VALUE (op), &pre, NULL,
is_gimple_asm_val, fb_rvalue);
}
}
break;
default:
/* NOTE: We start gimplifying operands from last to first to
make sure that side-effects on the RHS of calls, assignments
and ASMs are executed before the LHS. The ordering is not
important for other statements. */
num_ops = gimple_num_ops (stmt);
orig_lhs = gimple_get_lhs (stmt);
for (i = num_ops; i > 0; i--)
{
tree op = gimple_op (stmt, i - 1);
if (op == NULL_TREE)
continue;
if (i == 1 && (is_gimple_call (stmt) || is_gimple_assign (stmt)))
gimplify_expr (&op, &pre, NULL, is_gimple_lvalue, fb_lvalue);
else if (i == 2
&& is_gimple_assign (stmt)
&& num_ops == 2
&& get_gimple_rhs_class (gimple_expr_code (stmt))
== GIMPLE_SINGLE_RHS)
gimplify_expr (&op, &pre, NULL,
rhs_predicate_for (gimple_assign_lhs (stmt)),
fb_rvalue);
else if (i == 2 && is_gimple_call (stmt))
{
if (TREE_CODE (op) == FUNCTION_DECL)
continue;
gimplify_expr (&op, &pre, NULL, is_gimple_call_addr, fb_rvalue);
}
else
gimplify_expr (&op, &pre, NULL, is_gimple_val, fb_rvalue);
gimple_set_op (stmt, i - 1, op);
}
lhs = gimple_get_lhs (stmt);
/* If the LHS changed it in a way that requires a simple RHS,
create temporary. */
if (lhs && !is_gimple_reg (lhs))
{
bool need_temp = false;
if (is_gimple_assign (stmt)
&& num_ops == 2
&& get_gimple_rhs_class (gimple_expr_code (stmt))
== GIMPLE_SINGLE_RHS)
gimplify_expr (gimple_assign_rhs1_ptr (stmt), &pre, NULL,
rhs_predicate_for (gimple_assign_lhs (stmt)),
fb_rvalue);
else if (is_gimple_reg (lhs))
{
if (is_gimple_reg_type (TREE_TYPE (lhs)))
{
if (is_gimple_call (stmt))
{
i = gimple_call_flags (stmt);
if ((i & ECF_LOOPING_CONST_OR_PURE)
|| !(i & (ECF_CONST | ECF_PURE)))
need_temp = true;
}
if (stmt_can_throw_internal (stmt))
need_temp = true;
}
}
else
{
if (is_gimple_reg_type (TREE_TYPE (lhs)))
need_temp = true;
else if (TYPE_MODE (TREE_TYPE (lhs)) != BLKmode)
{
if (is_gimple_call (stmt))
{
tree fndecl = gimple_call_fndecl (stmt);
if (!aggregate_value_p (TREE_TYPE (lhs), fndecl)
&& !(fndecl && DECL_RESULT (fndecl)
&& DECL_BY_REFERENCE (DECL_RESULT (fndecl))))
need_temp = true;
}
else
need_temp = true;
}
}
if (need_temp)
{
tree temp = create_tmp_reg (TREE_TYPE (lhs), NULL);
if (TREE_CODE (orig_lhs) == SSA_NAME)
orig_lhs = SSA_NAME_VAR (orig_lhs);
if (gimple_in_ssa_p (cfun))
temp = make_ssa_name (temp, NULL);
gimple_set_lhs (stmt, temp);
post_stmt = gimple_build_assign (lhs, temp);
if (TREE_CODE (lhs) == SSA_NAME)
SSA_NAME_DEF_STMT (lhs) = post_stmt;
}
}
break;
}
if (gimple_referenced_vars (cfun))
for (t = gimplify_ctxp->temps; t ; t = TREE_CHAIN (t))
add_referenced_var (t);
if (!gimple_seq_empty_p (pre))
{
if (gimple_in_ssa_p (cfun))
{
gimple_stmt_iterator i;
for (i = gsi_start (pre); !gsi_end_p (i); gsi_next (&i))
mark_symbols_for_renaming (gsi_stmt (i));
}
gsi_insert_seq_before (gsi_p, pre, GSI_SAME_STMT);
}
if (post_stmt)
gsi_insert_after (gsi_p, post_stmt, GSI_NEW_STMT);
pop_gimplify_context (NULL);
}
/* Expand EXPR to list of gimple statements STMTS. GIMPLE_TEST_F specifies
the predicate that will hold for the result. If VAR is not NULL, make the
base variable of the final destination be VAR if suitable. */
tree
force_gimple_operand_1 (tree expr, gimple_seq *stmts,
gimple_predicate gimple_test_f, tree var)
{
tree t;
enum gimplify_status ret;
struct gimplify_ctx gctx;
*stmts = NULL;
/* gimple_test_f might be more strict than is_gimple_val, make
sure we pass both. Just checking gimple_test_f doesn't work
because most gimple predicates do not work recursively. */
if (is_gimple_val (expr)
&& (*gimple_test_f) (expr))
return expr;
push_gimplify_context (&gctx);
gimplify_ctxp->into_ssa = gimple_in_ssa_p (cfun);
gimplify_ctxp->allow_rhs_cond_expr = true;
if (var)
expr = build2 (MODIFY_EXPR, TREE_TYPE (var), var, expr);
if (TREE_CODE (expr) != MODIFY_EXPR
&& TREE_TYPE (expr) == void_type_node)
{
gimplify_and_add (expr, stmts);
expr = NULL_TREE;
}
else
{
ret = gimplify_expr (&expr, stmts, NULL, gimple_test_f, fb_rvalue);
gcc_assert (ret != GS_ERROR);
}
if (gimple_referenced_vars (cfun))
for (t = gimplify_ctxp->temps; t ; t = DECL_CHAIN (t))
add_referenced_var (t);
pop_gimplify_context (NULL);
return expr;
}
/* Expand EXPR to list of gimple statements STMTS. If SIMPLE is true,
force the result to be either ssa_name or an invariant, otherwise
just force it to be a rhs expression. If VAR is not NULL, make the
base variable of the final destination be VAR if suitable. */
tree
force_gimple_operand (tree expr, gimple_seq *stmts, bool simple, tree var)
{
return force_gimple_operand_1 (expr, stmts,
simple ? is_gimple_val : is_gimple_reg_rhs,
var);
}
/* Invoke force_gimple_operand_1 for EXPR with parameters GIMPLE_TEST_F
and VAR. If some statements are produced, emits them at GSI.
If BEFORE is true. the statements are appended before GSI, otherwise
they are appended after it. M specifies the way GSI moves after
insertion (GSI_SAME_STMT or GSI_CONTINUE_LINKING are the usual values). */
tree
force_gimple_operand_gsi_1 (gimple_stmt_iterator *gsi, tree expr,
gimple_predicate gimple_test_f,
tree var, bool before,
enum gsi_iterator_update m)
{
gimple_seq stmts;
expr = force_gimple_operand_1 (expr, &stmts, gimple_test_f, var);
if (!gimple_seq_empty_p (stmts))
{
if (gimple_in_ssa_p (cfun))
{
gimple_stmt_iterator i;
for (i = gsi_start (stmts); !gsi_end_p (i); gsi_next (&i))
mark_symbols_for_renaming (gsi_stmt (i));
}
if (before)
gsi_insert_seq_before (gsi, stmts, m);
else
gsi_insert_seq_after (gsi, stmts, m);
}
return expr;
}
/* Invoke force_gimple_operand_1 for EXPR with parameter VAR.
If SIMPLE is true, force the result to be either ssa_name or an invariant,
otherwise just force it to be a rhs expression. If some statements are
produced, emits them at GSI. If BEFORE is true, the statements are
appended before GSI, otherwise they are appended after it. M specifies
the way GSI moves after insertion (GSI_SAME_STMT or GSI_CONTINUE_LINKING
are the usual values). */
tree
force_gimple_operand_gsi (gimple_stmt_iterator *gsi, tree expr,
bool simple_p, tree var, bool before,
enum gsi_iterator_update m)
{
return force_gimple_operand_gsi_1 (gsi, expr,
simple_p
? is_gimple_val : is_gimple_reg_rhs,
var, before, m);
}
#include "gt-gimplify.h"
|
GB_unop__expm1_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__expm1_fp32_fp32)
// op(A') function: GB (_unop_tran__expm1_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = expm1f (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = expm1f (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = expm1f (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXPM1 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__expm1_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = expm1f (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = expm1f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__expm1_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
taskdep_if0_2.c | // RUN: %libomp-compile-and-run
// REQUIRES: !abt
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "omp_my_sleep.h"
int a = 0, b = 0;
int task_grabbed = 0, task_can_proceed = 0;
int task2_grabbed = 0, task2_can_proceed = 0;
static void wait_on_flag(int *flag) {
int flag_value;
int timelimit = 30;
int secs = 0;
do {
#pragma omp atomic read
flag_value = *flag;
my_sleep(1.0);
secs++;
if (secs == timelimit) {
fprintf(stderr, "error: timeout in wait_on_flag()\n");
exit(EXIT_FAILURE);
}
} while (flag_value == 0);
}
static void signal_flag(int *flag) {
#pragma omp atomic
(*flag)++;
}
int main(int argc, char** argv) {
// Ensure two threads are running
int num_threads = omp_get_max_threads();
if (num_threads < 2)
omp_set_num_threads(2);
#pragma omp parallel shared(a)
{
int a_value;
// Let us be extra safe here
if (omp_get_num_threads() > 1) {
#pragma omp single nowait
{
// Schedule independent child task that
// waits to be flagged after sebsequent taskwait depend()
#pragma omp task
{
signal_flag(&task_grabbed);
wait_on_flag(&task_can_proceed);
}
// Let another worker thread grab the task to execute
wait_on_flag(&task_grabbed);
// This should be ignored since the task above has
// no dependency information
#pragma omp task if(0) depend(inout: a)
{}
// Signal the independent task to proceed
signal_flag(&task_can_proceed);
// Schedule child task with dependencies that taskwait does
// not care about
#pragma omp task depend(inout: b)
{
signal_flag(&task2_grabbed);
wait_on_flag(&task2_can_proceed);
#pragma omp atomic
b++;
}
// Let another worker thread grab the task to execute
wait_on_flag(&task2_grabbed);
// This should be ignored since the task above has
// dependency information on b instead of a
#pragma omp task if(0) depend(inout: a)
{}
// Signal the task to proceed
signal_flag(&task2_can_proceed);
// Generate one child task for taskwait
#pragma omp task shared(a) depend(inout: a)
{
my_sleep(1.0);
#pragma omp atomic
a++;
}
#pragma omp task if(0) depend(inout: a)
{}
#pragma omp atomic read
a_value = a;
if (a_value != 1) {
fprintf(stderr, "error: dependent task was not executed before "
"taskwait finished\n");
exit(EXIT_FAILURE);
}
} // #pragma omp single
} // if (num_threads > 1)
} // #pragma omp parallel
return EXIT_SUCCESS;
}
|
lw_vector.h | /* lw_vector.h, part of the Global Epidemic Simulation v1.0 BETA
/* Lightweight vector class
/*
/* Copyright 2012, MRC Centre for Outbreak Analysis and Modelling
/*
/* Licensed under the Apache License, Version 2.0 (the "License");
/* you may not use this file except in compliance with the License.
/* You may obtain a copy of the License at
/*
/* http://www.apache.org/licenses/LICENSE-2.0
/*
/* Unless required by applicable law or agreed to in writing, software
/* distributed under the License is distributed on an "AS IS" BASIS,
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/* See the License for the specific language governing permissions and
/* limitations under the License.
*/
#ifndef LW_VECTOR
#define LW_VECTOR
#include "simINT64.h"
// Recent VS/Intel combinations have incorrectly left _OPENMP undefined. Enable OPENMP_ENABLED below if necessary.
#define OPENMP_ENABLED
#ifdef _OPENMP
#define OPENMP_ENABLED
//#pragma message("Vector class (lw_vector.h) - OpenMP parallelisation enabled")
#endif
/* enables thread safety while inserting or deleting elements from the vector */
/* can be switched off in a thread safe code to make it more efficient */
#ifdef OPENMP_ENABLED
//#define THREAD_SAFE_OPS
#endif
#ifndef THREAD_SAFE_OPS
//#pragma message("Warning: Vector class (lw_vector.h) - inserting or deleting elements from the vector is not thread safe!")
#endif
/* enables parallelisation of loops */
#ifdef OPENMP_ENABLED
#define LOOPS_IN_PARALLEL
#endif
#ifdef LOOPS_IN_PARALLEL
//#define SCHED_TYPE dynamic // schedule type: dynamic
#define SCHED_TYPE static // schedule type: static
#define NUM_PROCS_OUT 0 // number of processors excluded from parallelisation
#endif
/* enables index range check useful while debugging and testing */
//#define ENABLE_INDX_RANGE_CHECK
#ifndef ENABLE_INDX_RANGE_CHECK
//#pragma message("Warning: Vector class (lw_vector.h) - index range check disabled!")
#endif
/* exception codes */
#define OUT_OF_RANGE 1
#pragma pack(push,_CRT_PACKING)
template <class lwvType> class lw_vector
{
private:
#ifdef THREAD_SAFE_OPS
omp_lock_t ob__lock;
void init_lock();
void destroy_lock();
void set_lock();
void unset_lock();
bool is_lock_valid();
#endif
#ifdef OPENMP_ENABLED
int num_procs;
#endif
SIM_I64 alloc_size; // allocated vector size
SIM_I64 indx_flast; // index of a memory location that follows the last element of a vector ( indx_flast <= alloc_size )
lwvType *v; // the vector itself
/* methods that do bytewise data copying */
void copy_frwrd(char* dest, char* src, SIM_I64 count);
void copy_frwrd(char* dest, char* src, SIM_I64 count, int num_procs);
void copy_bckwrd(char* dest, char* src, SIM_I64 count);
public:
/* no proper definition of the iterator class */
typedef lwvType* iterator;
/* default constructor; allocates empty vector */
lw_vector();
/* constructor; allocates memory for a vector but the elements of this vector remain undefined */
/* num_els: number of elements */
lw_vector(SIM_I64 num_els);
/* constructor; allocates memory for a vector and initialises its elements with copies of el */
/* num_els: number of elements */
lw_vector(SIM_I64 num_els, const lwvType& el);
/* copy constructor */
lw_vector(const lw_vector& lwv);
/* destructor */
~lw_vector();
/* assignment operator */
lw_vector<lwvType> operator=(const lw_vector& lwv2);
/* provides access to vector elements via index indx; not thread safe */
lwvType& operator[](SIM_I64 indx);
/* returns a reference to the element at the position indx; not thread safe */
lwvType& at(SIM_I64 indx);
/* returns a reference to the first element of the vector */
lwvType& front();
/* returns a reference to the last element of the vector */
lwvType& back();
/* returns random-access iterator to the first element of the vector */
iterator begin();
/* returns random-access iterator that points just beyond the end of the vector */
iterator end();
/* returns the size of the vector (number of elements) */
SIM_I64 size();
/* tests if there are any elements in the vector */
bool empty();
/* adds an element el to the end of a vector */
void push_back(const lwvType& el);
/* inserts an element el into a vector at the position indx */
/* returns an index that points to the position of the inserted element */
SIM_I64 insert(SIM_I64 indx, const lwvType& el);
/* inserts count copies of el into a vector starting from the position indx_start */
void insert(SIM_I64 indx_start, SIM_I64 count, const lwvType& el);
/* inserts an element el into a vector at the position specified by iterator it */
/* returns an iterator that points to the position of the inserted element */
iterator insert(iterator it, const lwvType& el);
/* inserts count copies of el into a vector starting from the position specified by iterator it_start */
void insert(iterator it_start, SIM_I64 count, const lwvType& el);
/* deletes the element at the end of the vector without reducing its capacity */
void pop_back();
/* erases an element at the position specified by indx */
/* returns an index pointing at the first element beyond the removed one or to the end of the vector if there are no such elements */
SIM_I64 erase(SIM_I64 indx);
/* erases elements in the range starting from index indx_start and finishing just before the position defined by indx_end */
/* returns an index pointing at the first element beyond those removed or to the end of the vector if there are no such elements */
SIM_I64 erase(SIM_I64 indx_start, SIM_I64 indx_end);
/* erases an element at the position specified by iterator it */
/* returns an iterator pointing at the first element beyond the removed one or to the end of the vector if there are no such elements */
iterator erase(iterator it);
/* erases elements in the range starting from iterator it_start and finishing just before the position defined by it_end */
/* returns an iterator pointing at the first element beyond those removed or to the end of the vector if there are no such elements */
iterator erase(iterator it_start, iterator it_end);
/* erases all elements of the vector without reducing its capacity */
void clear();
/* specifies a new size for the vector */
/* if the vector size is less than new size new_size, new (default) elements are added to the end of the vector until it reaches the requested size */
/* otherwise elements of the vector are deleted starting from its end until until it reaches the requested size */
void resize(SIM_I64 new_size);
/* specifies a new size for the vector */
/* if the vector size is less than new size new_size, new (el) elements are added to the end of the vector until it reaches the requested size */
/* otherwise elements of the vector are deleted starting from its end until until it reaches the requested size */
void resize(SIM_I64 new_size, const lwvType& el);
/* compacts the vector reducing the allocated memory */
void compact();
};
/* vector size grows exponentially */
#define DEF_INI_SIZE 1 // default initial size
#define EXP_GROWTH_COEFF 3 //3
#ifdef THREAD_SAFE_OPS
template <class lwvType> inline void lw_vector<lwvType>::init_lock()
{
omp_init_lock(&ob__lock);
}
template <class lwvType> inline void lw_vector<lwvType>::destroy_lock()
{
omp_destroy_lock(&ob__lock);
ob__lock = 0;
}
template <class lwvType> inline void lw_vector<lwvType>::set_lock()
{
omp_set_lock(&ob__lock);
}
template <class lwvType> inline void lw_vector<lwvType>::unset_lock()
{
omp_unset_lock(&ob__lock);
}
template <class lwvType> inline bool lw_vector<lwvType>::is_lock_valid()
{
if( ob__lock != 0 )
return true;
else
return false;
}
#endif
template <class lwvType> inline void lw_vector<lwvType>::copy_frwrd(char* dest, char* src, SIM_I64 count)
{
for(SIM_I64 i=0; i < count; i++)
*(dest++) = *(src++);
}
template <class lwvType> inline void lw_vector<lwvType>::copy_frwrd(char* dest, char* src, SIM_I64 count, int num_procs)
{
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = count > num_procs ? count / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i=0; i < count; i++)
dest[i] = src[i];
}
template <class lwvType> inline void lw_vector<lwvType>::copy_bckwrd(char* dest, char* src, SIM_I64 count)
{
for(SIM_I64 i = count; i > 0 ; i--)
*(--dest) = *(--src);
}
/* default constructor; allocates empty vector */
template <class lwvType> lw_vector<lwvType>::lw_vector() : alloc_size(0), indx_flast(0), v(0)
{
#ifdef THREAD_SAFE_OPS
init_lock();
#endif
#ifdef OPENMP_ENABLED
num_procs = omp_get_num_procs() - NUM_PROCS_OUT; // TESTING!
#endif
}
/* constructor; allocates memory for a vector but the elements of this vector remain undefined */
/* num_els: number of elements */
template <class lwvType> lw_vector<lwvType>::lw_vector(SIM_I64 num_els) : alloc_size(num_els), indx_flast(0)
{
#ifdef THREAD_SAFE_OPS
init_lock();
#endif
#ifdef OPENMP_ENABLED
num_procs = omp_get_num_procs();
#endif
if( alloc_size != 0 )
v = (lwvType*)(new char[alloc_size * (SIM_I64)sizeof(lwvType)]);
else
v = 0;
}
/* constructor; allocates memory for a vector and initialises its elements with copies of el */
/* num_els: number of elements */
template <class lwvType> lw_vector<lwvType>::lw_vector(SIM_I64 num_els, const lwvType& el) : alloc_size(num_els), indx_flast(alloc_size)
{
#ifdef THREAD_SAFE_OPS
init_lock();
#endif
#ifdef OPENMP_ENABLED
num_procs = omp_get_num_procs();
#endif
if( alloc_size != 0 )
v = (lwvType*)(new char[alloc_size * (SIM_I64)sizeof(lwvType)]);
else
v = 0;
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = indx_flast > num_procs ? indx_flast / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i=0; i < indx_flast; i++)
::new(&v[i]) lwvType(el);
}
/* copy constructor */
template <class lwvType> lw_vector<lwvType>::lw_vector(const lw_vector& lwv)
{
#ifdef THREAD_SAFE_OPS
init_lock();
#endif
#ifdef OPENMP_ENABLED
num_procs = lwv.num_procs;
#endif
alloc_size = lwv.alloc_size;
indx_flast = lwv.indx_flast;
if( alloc_size != 0 )
v = (lwvType*)(new char[alloc_size * (SIM_I64)sizeof(lwvType)]);
else
v = 0;
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = indx_flast > num_procs ? indx_flast / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i=0; i < indx_flast; i++)
::new(&v[i]) lwvType(lwv.v[i]);
}
/* destructor */
template <class lwvType> lw_vector<lwvType>::~lw_vector()
{
if( v != 0 )
{
for(SIM_I64 i=0; i < indx_flast; i++)
v[i].~lwvType();
delete [] (char*)v;
alloc_size = 0;
indx_flast = 0;
}
v = 0;
#ifdef THREAD_SAFE_OPS
destroy_lock();
#endif
}
/* assignment operator */
template <class lwvType> lw_vector<lwvType> lw_vector<lwvType>::operator=(const lw_vector& lwv2)
{
#ifdef THREAD_SAFE_OPS
if( !is_lock_valid() )
init_lock();
set_lock();
#endif
#ifdef OPENMP_ENABLED
num_procs = lwv2.num_procs;
#endif
if( v != 0 )
{
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = indx_flast > num_procs ? indx_flast / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i=0; i < indx_flast; i++)
v[i].~lwvType();
delete [] (char*)v;
}
alloc_size = lwv2.alloc_size;
indx_flast = lwv2.indx_flast;
if( alloc_size != 0 )
v = (lwvType*)(new char[alloc_size * (SIM_I64)sizeof(lwvType)]);
else
v = 0;
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = indx_flast > num_procs ? indx_flast / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i=0; i < indx_flast; i++)
v[i] = lwv2.v[i];
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return *this;
}
/* provides access to vector elements via index indx; not thread safe */
template <class lwvType> inline lwvType& lw_vector<lwvType>::operator[](SIM_I64 indx)
{
#ifdef ENABLE_INDX_RANGE_CHECK
if( indx < 0 || indx >= indx_flast )
throw OUT_OF_RANGE;
#endif
return v[indx];
}
/* returns a reference to the element at the position indx; not thread safe */
template <class lwvType> inline lwvType& lw_vector<lwvType>::at(SIM_I64 indx)
{
#ifdef ENABLE_INDX_RANGE_CHECK
if( indx < 0 || indx >= indx_flast )
throw OUT_OF_RANGE;
#endif
return v[indx];
}
/* returns a reference to the first element of the vector */
template <class lwvType> inline lwvType& lw_vector<lwvType>::front()
{
return v[0];
}
/* returns a reference to the last element of the vector */
template <class lwvType> inline lwvType& lw_vector<lwvType>::back()
{
return v[indx_flast - 1];
}
/* returns random-access iterator to the first element of the vector */
template <class lwvType> inline typename lw_vector<lwvType>::iterator lw_vector<lwvType>::begin()
{
return &v[0];
}
/* returns random-access iterator that points just beyond the end of the vector */
template <class lwvType> inline typename lw_vector<lwvType>::iterator lw_vector<lwvType>::end()
{
return &v[indx_flast];
}
/* returns the size of the vector (number of elements) */
template <class lwvType> inline SIM_I64 lw_vector<lwvType>::size()
{
return indx_flast;
}
/* tests if there are any elements in the vector */
template <class lwvType> inline bool lw_vector<lwvType>::empty()
{
if( indx_flast == 0 )
return true;
else
return false;
}
/* adds an element el to the end of a vector */
template <class lwvType> void lw_vector<lwvType>::push_back(const lwvType& el)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
if( indx_flast >= alloc_size ) // the vector is full, reallocation needed
{
SIM_I64 new_alloc_size = alloc_size != 0 ? alloc_size * EXP_GROWTH_COEFF : DEF_INI_SIZE;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
if( v != 0 )
{
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType), num_procs);
#endif
delete [] (char*)v;
}
v = v_temp;
alloc_size = new_alloc_size;
}
::new(&v[indx_flast++]) lwvType(el);
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
/* inserts an element el into a vector at the position indx */
template <class lwvType> SIM_I64 lw_vector<lwvType>::insert(SIM_I64 indx, const lwvType& el)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
if( indx < 0 || indx > indx_flast )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
if( indx_flast < alloc_size ) // there is still memory available in the vector
{
copy_bckwrd((char*)(&v[indx_flast + 1]), (char*)(&v[indx_flast]), (indx_flast - indx) * (SIM_I64)sizeof(lwvType));
::new(&v[indx]) lwvType(el);
}
else // the vector is full, reallocation needed
{
SIM_I64 new_alloc_size = alloc_size != 0 ? alloc_size * EXP_GROWTH_COEFF : DEF_INI_SIZE;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType), num_procs);
#endif
::new(&v_temp[indx]) lwvType(el);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)(&v_temp[indx + 1]), (char*)(&v[indx]), (indx_flast - indx) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)(&v_temp[indx + 1]), (char*)(&v[indx]), (indx_flast - indx) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast++;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return indx;
}
/* inserts count copies of el into a vector starting from the position indx_start */
template <class lwvType> void lw_vector<lwvType>::insert(SIM_I64 indx_start, SIM_I64 count, const lwvType& el)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
if( indx_start < 0 || indx_start > indx_flast || count < 0 )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
if( (indx_flast + count) <= alloc_size ) // allocated size of the vector remains unchanged
{
copy_bckwrd((char*)(&v[indx_flast + count]), (char*)(&v[indx_flast + count - 1]), (indx_flast - indx_start) * (SIM_I64)sizeof(lwvType));
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = count > num_procs ? count / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i = indx_start; i < indx_start + count; i++)
::new(&v[i]) lwvType(el);
}
else // allocated vector size is insufficient; reallocation and expansion needed
{
SIM_I64 new_alloc_size = alloc_size != 0 ? alloc_size * EXP_GROWTH_COEFF : DEF_INI_SIZE;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType), num_procs);
#endif
for(SIM_I64 i = indx_start; i < indx_start + count; i++)
::new(&v_temp[i]) lwvType(el);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)(&v_temp[indx_start + count]), (char*)(&v[indx_start]), (indx_flast - indx_start) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)(&v_temp[indx_start + count]), (char*)(&v[indx_start]), (indx_flast - indx_start) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast += count;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
/* inserts an element el into a vector at the position specified by iterator it */
template <class lwvType> typename lw_vector<lwvType>::iterator lw_vector<lwvType>::insert(iterator it, const lwvType& el)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
SIM_I64 delta = (SIM_I64)(it + 1) - (SIM_I64)it;
SIM_I64 indx = ((SIM_I64)it - (SIM_I64)begin()) / delta;
if( indx < 0 || indx > indx_flast )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
if( indx_flast < alloc_size ) // there is still memory available in the vector
{
copy_bckwrd((char*)(&v[indx_flast + 1]), (char*)(&v[indx_flast]), (indx_flast - indx) * (SIM_I64)sizeof(lwvType));
::new(&v[indx]) lwvType(el);
}
else // the vector is full, reallocation needed
{
SIM_I64 new_alloc_size = alloc_size != 0 ? alloc_size * EXP_GROWTH_COEFF : DEF_INI_SIZE;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType), num_procs);
#endif
::new(&v_temp[indx]) lwvType(el);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)(&v_temp[indx + 1]), (char*)(&v[indx]), (indx_flast - indx) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)(&v_temp[indx + 1]), (char*)(&v[indx]), (indx_flast - indx) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast++;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return it;
}
/* inserts count copies of el into a vector starting from the position specified by iterator it_start */
template <class lwvType> void lw_vector<lwvType>::insert(iterator it_start, SIM_I64 count, const lwvType& el)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
SIM_I64 delta = (SIM_I64)(it_start + 1) - (SIM_I64)it_start;
SIM_I64 indx_start = ((SIM_I64)it_start - (SIM_I64)begin()) / delta;
if( indx_start < 0 || indx_start > indx_flast || count < 0 )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
if( (indx_flast + count) <= alloc_size ) // allocated size of the vector remains unchanged
{
copy_bckwrd((char*)(&v[indx_flast + count]), (char*)(&v[indx_flast + count - 1]), (indx_flast - indx_start) * (SIM_I64)sizeof(lwvType));
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = count > num_procs ? count / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i = indx_start; i < indx_start + count; i++)
::new(&v[i]) lwvType(el);
}
else // allocated vector size is insufficient; reallocation and expansion needed
{
SIM_I64 new_alloc_size = alloc_size != 0 ? alloc_size * EXP_GROWTH_COEFF : DEF_INI_SIZE;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType), num_procs);
#endif
for(SIM_I64 i = indx_start; i < indx_start + count; i++)
::new(&v_temp[i]) lwvType(el);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)(&v_temp[indx_start + count]), (char*)(&v[indx_start]), (indx_flast - indx_start) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)(&v_temp[indx_start + count]), (char*)(&v[indx_start]), (indx_flast - indx_start) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast += count;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
/* deletes the element at the end of the vector without reducing its capacity */
template <class lwvType> void lw_vector<lwvType>::pop_back()
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
if( indx_flast == 0 )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return;
}
v[--indx_flast].~lwvType(); // explicitly call the destructor for the erased object (virtual call)
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
/* erases an element at the position specified by indx */
/* returns an index pointing at the first element beyond the removed one or to the end of the vector if there are no such elements */
template <class lwvType> SIM_I64 lw_vector<lwvType>::erase(SIM_I64 indx)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
if( indx < 0 || indx >= indx_flast )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
v[indx].~lwvType(); // explicitly call the destructor for the erased object (virtual call)
if( EXP_GROWTH_COEFF * (indx_flast - 1) > alloc_size ) // allocated size of the vector remains unchanged
copy_frwrd((char*)(&v[indx]), (char*)(&v[indx + 1]), (indx_flast -indx - 1) * (SIM_I64)sizeof(lwvType));
else // reallocate the vector and decrease its size
{
SIM_I64 new_alloc_size = alloc_size / EXP_GROWTH_COEFF;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType));
copy_frwrd((char*)(&v_temp[indx]), (char*)(&v[indx + 1]), (indx_flast - indx - 1) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType), num_procs);
copy_frwrd((char*)(&v_temp[indx]), (char*)(&v[indx + 1]), (indx_flast - indx - 1) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast--;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return indx;
}
/* erases elements in the range starting from index indx_start and finishing just before the position defined by indx_end */
/* returns an index pointing at the first element beyond those removed or to the end of the vector if there are no such elements */
template <class lwvType> SIM_I64 lw_vector<lwvType>::erase(SIM_I64 indx_start, SIM_I64 indx_end)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
if( indx_start > indx_end || indx_start < 0 || indx_start >= indx_flast )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
if( indx_end > indx_flast )
indx_end = indx_flast;
for(SIM_I64 i = indx_start; i < indx_end; i++)
v[i].~lwvType(); // explicitly call the destructor for the erased objects (virtual call)
if( EXP_GROWTH_COEFF * (indx_flast - (indx_end - indx_start)) > alloc_size ) // allocated size of the vector remains unchanged
copy_frwrd((char*)(&v[indx_start]), (char*)(&v[indx_end]), (indx_flast - indx_end) * (SIM_I64)sizeof(lwvType));
else // reallocate the vector and decrease its size
{
SIM_I64 new_alloc_size = alloc_size / EXP_GROWTH_COEFF;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType));
copy_frwrd((char*)(&v_temp[indx_start]), (char*)(&v[indx_end]), (indx_flast - indx_end) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType), num_procs);
copy_frwrd((char*)(&v_temp[indx_start]), (char*)(&v[indx_end]), (indx_flast - indx_end) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast -= indx_end - indx_start;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return indx_start;
}
/* erases an element at the position specified by iterator it */
/* returns an iterator pointing at the first element beyond the removed one or to the end of the vector if there are no such elements */
template <class lwvType> typename lw_vector<lwvType>::iterator lw_vector<lwvType>::erase(iterator it)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
SIM_I64 delta = (SIM_I64)(it + 1) - (SIM_I64)it;
SIM_I64 indx = ((SIM_I64)it - (SIM_I64)begin()) / delta;
if( indx < 0 || indx >= indx_flast )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
v[indx].~lwvType(); // explicitly call the destructor for the erased object (virtual call)
if( EXP_GROWTH_COEFF * (indx_flast - 1) > alloc_size ) // allocated size of the vector remains unchanged
copy_frwrd((char*)(&v[indx]), (char*)(&v[indx + 1]), (indx_flast -indx - 1) * (SIM_I64)sizeof(lwvType));
else // reallocate the vector and decrease its size
{
SIM_I64 new_alloc_size = alloc_size / EXP_GROWTH_COEFF;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType));
copy_frwrd((char*)(&v_temp[indx]), (char*)(&v[indx + 1]), (indx_flast - indx - 1) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx * (SIM_I64)sizeof(lwvType), num_procs);
copy_frwrd((char*)(&v_temp[indx]), (char*)(&v[indx + 1]), (indx_flast - indx - 1) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast--;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return it;
}
/* erases elements in the range starting from iterator it_start and finishing just before the position defined by it_end */
/* returns an iterator pointing at the first element beyond those removed or to the end of the vector if there are no such elements */
template <class lwvType> typename lw_vector<lwvType>::iterator lw_vector<lwvType>::erase(iterator it_start, iterator it_end)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
SIM_I64 delta = (SIM_I64)(it_start + 1) - (SIM_I64)it_start;
SIM_I64 indx_start = ((SIM_I64)it_start - (SIM_I64)begin()) / delta;
SIM_I64 indx_end = ((SIM_I64)it_end - (SIM_I64)begin()) / delta;
if( indx_start > indx_end || indx_start < 0 || indx_start >= indx_flast )
{
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
throw OUT_OF_RANGE;
}
if( indx_end > indx_flast )
indx_end = indx_flast;
for(SIM_I64 i = indx_start; i < indx_end; i++)
v[i].~lwvType(); // explicitly call the destructor for the erased objects (virtual call)
if( EXP_GROWTH_COEFF * (indx_flast - (indx_end - indx_start)) > alloc_size ) // allocated size of the vector remains unchanged
copy_frwrd((char*)(&v[indx_start]), (char*)(&v[indx_end]), (indx_flast - indx_end) * (SIM_I64)sizeof(lwvType));
else // reallocate the vector and decrease its size
{
SIM_I64 new_alloc_size = alloc_size / EXP_GROWTH_COEFF;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType));
copy_frwrd((char*)(&v_temp[indx_start]), (char*)(&v[indx_end]), (indx_flast - indx_end) * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_start * (SIM_I64)sizeof(lwvType), num_procs);
copy_frwrd((char*)(&v_temp[indx_start]), (char*)(&v[indx_end]), (indx_flast - indx_end) * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
v = v_temp;
alloc_size = new_alloc_size;
}
indx_flast -= indx_end - indx_start;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
return it_start;
}
/* erases all elements of the vector without reducing its capacity */
template <class lwvType> void lw_vector<lwvType>::clear()
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = indx_flast > num_procs ? (indx_flast * (SIM_I64)sizeof(lwvType)) / num_procs : (SIM_I64)sizeof(lwvType);
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i = 0; i < indx_flast; i++)
v[i].~lwvType(); // explicitly call the destructor for the erased objects (virtual call)
indx_flast = 0;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
/* specifies a new size for the vector */
/* if the vector size is less than new size new_size, new (default) elements are added to the end of the vector until it reaches the requested size */
/* otherwise elements of the vector are deleted starting from its end until it reaches the requested size */
template <class lwvType> void lw_vector<lwvType>::resize(SIM_I64 new_size)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
SIM_I64 new_alloc_size = new_size;
if( new_alloc_size != alloc_size )
{
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
if( indx_flast > new_alloc_size )
indx_flast = new_alloc_size;
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType), num_procs);
#endif
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = (new_alloc_size - indx_flast) > num_procs ? (new_alloc_size - indx_flast) / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i = indx_flast; i < new_alloc_size; i++)
::new(&v_temp[i]) lwvType();
if( v != 0 )
delete [] (char*)v;
v = v_temp;
}
else if( new_alloc_size == alloc_size )
{
for(SIM_I64 i = indx_flast; i < new_alloc_size; i++)
::new(&v[i]) lwvType();
}
indx_flast = new_alloc_size;
alloc_size = new_alloc_size;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
/* specifies a new size for the vector */
/* if the vector size is less than new size new_size, new (el) elements are added to the end of the vector until it reaches the requested size */
/* otherwise elements of the vector are deleted starting from its end until it reaches the requested size */
template <class lwvType> void lw_vector<lwvType>::resize(SIM_I64 new_size, const lwvType& el)
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
SIM_I64 new_alloc_size = new_size;
if( new_alloc_size != alloc_size )
{
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
if( indx_flast > new_alloc_size )
indx_flast = new_alloc_size;
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType), num_procs);
#endif
#ifdef LOOPS_IN_PARALLEL
SIM_I64 chunk_size = (new_alloc_size - indx_flast) > num_procs ? (new_alloc_size - indx_flast) / num_procs : 1;
#pragma omp parallel for schedule(SCHED_TYPE, chunk_size) num_threads(num_procs)
#endif
for(SIM_I64 i = indx_flast; i < new_alloc_size; i++)
::new(&v_temp[i]) lwvType(el);
if( v != 0 )
delete [] (char*)v;
v = v_temp;
}
else if( new_alloc_size == alloc_size )
{
for(SIM_I64 i = indx_flast; i < new_alloc_size; i++)
::new(&v[i]) lwvType(el);
}
indx_flast = new_alloc_size;
alloc_size = new_alloc_size;
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
/* compacts the vector reducing the allocated memory */
template <class lwvType> void lw_vector<lwvType>::compact()
{
#ifdef THREAD_SAFE_OPS
set_lock();
#endif
if( indx_flast < alloc_size )
{
SIM_I64 new_alloc_size = indx_flast;
lwvType *v_temp = (lwvType*)(new char[new_alloc_size * (SIM_I64)sizeof(lwvType)]);
#ifndef LOOPS_IN_PARALLEL
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType));
#else
copy_frwrd((char*)v_temp, (char*)v, indx_flast * (SIM_I64)sizeof(lwvType), num_procs);
#endif
if( v != 0 )
delete [] (char*)v;
alloc_size = new_alloc_size;
v = v_temp;
}
#ifdef THREAD_SAFE_OPS
unset_lock();
#endif
}
#pragma pack(pop)
#endif
|
im2col.c | void im2col(double *img, double *col, int width, int height, int channels,
int kernel_w, int kernel_h, int pad_w, int pad_h, int stride_w, int stride_h)
{
int height_col = (height + 2 * pad_h - kernel_h) / stride_h + 1;
int width_col = (width + 2 * pad_w - kernel_w) / stride_w + 1;
int channels_col = channels * kernel_h * kernel_w;
// This makes performance much worse
// #pragma omp parallel for
for (int c = 0; c < channels_col; ++c) {
int w_offset = c % kernel_w;
int h_offset = (c / kernel_w) % kernel_h;
int c_im = c / (kernel_h * kernel_w);
for (int h = 0; h < height_col; ++h) {
for (int w = 0; w < width_col; ++w) {
int h_pad = h*stride_h - pad_h + h_offset;
int w_pad = w*stride_w - pad_w + w_offset;
if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) {
col[(c*height_col+h) * width_col + w] =
img[(c_im * height + h_pad) * width + w_pad];
} else {
col[(c*height_col+h) * width_col + w] = 0;
}
}
}
}
}
|
flexProxDualDataL2.h | #ifndef flexProxDualL2_H
#define flexProxDualL2_H
#include "flexProx.h"
//! represents prox for a L2 data term
/*!
\f$ \frac{\alpha}{2}\|\cdot-f\|_2^2 \f$
*/
template<typename T>
class flexProxDualDataL2 : public flexProx<T>
{
#ifdef __CUDACC__
typedef thrust::device_vector<T> Tdata;
#else
typedef std::vector<T> Tdata;
#endif
public:
flexProxDualDataL2() : flexProx<T>(dualL2DataProx)
{
}
~flexProxDualDataL2()
{
if (VERBOSE > 0) printf("Destructor prox\n!");
}
void applyProx(T alpha, flexBoxData<T>* data, const std::vector<int> &dualNumbers, const std::vector<int> &primalNumbers)
{
}
#ifdef __CUDACC__
struct flexProxDualDataL2Functor
{
__host__ __device__
flexProxDualDataL2Functor(T _alpha) : alpha(_alpha){};
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<0>(t) = alpha / (thrust::get<2>(t) + alpha) * (thrust::get<1>(t) - thrust::get<2>(t) * thrust::get<3>(t));
}
const T alpha;
};
#endif
void applyProx(T alpha, flexBoxData<T>* data, const std::vector<int> &dualNumbers, const std::vector<int> &primalNumbers, std::vector<Tdata> &fList)
{
#ifdef __CUDACC__
for (int i = 0; i < dualNumbers.size(); i++)
{
auto startIterator = thrust::make_zip_iterator(thrust::make_tuple(data->y[dualNumbers[i]].begin(), data->yTilde[dualNumbers[i]].begin(), data->sigmaElt[dualNumbers[i]].begin(), fList[i].begin()));
auto endIterator = thrust::make_zip_iterator( thrust::make_tuple(data->y[dualNumbers[i]].end(), data->yTilde[dualNumbers[i]].end(), data->sigmaElt[dualNumbers[i]].end(), fList[i].end()));
thrust::for_each(startIterator,endIterator,flexProxDualDataL2Functor(alpha));
}
#else
for (int i = 0; i < dualNumbers.size(); i++)
{
T* ptrY = data->y[dualNumbers[i]].data();
T* ptrYtilde = data->yTilde[dualNumbers[i]].data();
T* ptrSigma = data->sigmaElt[dualNumbers[i]].data();
T* ptrF = fList[i].data();
int numElements = (int)data->yTilde[dualNumbers[i]].size();
#pragma omp parallel for
for (int j = 0; j < numElements; j++)
{
ptrY[j] = alpha / (ptrSigma[j] + alpha) * (ptrYtilde[j] - ptrSigma[j] * ptrF[j]);
}
}
#endif
}
};
#endif
|
relic_core.c | /*
* RELIC is an Efficient LIbrary for Cryptography
* Copyright (C) 2007-2017 RELIC Authors
*
* This file is part of RELIC. RELIC is legal property of its developers,
* whose names are not listed here. Please refer to the COPYRIGHT file
* for contact information.
*
* RELIC is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* RELIC is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with RELIC. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* @file
*
* Implementation of the library basic functions.
*
* @ingroup relic
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "relic_core.h"
#include "relic_rand.h"
#include "relic_types.h"
#include "relic_err.h"
#include "relic_arch.h"
#include "relic_fp.h"
#include "relic_fb.h"
#include "relic_ep.h"
#include "relic_eb.h"
#include "relic_cp.h"
#include "relic_pp.h"
/*============================================================================*/
/* Public definitions */
/*============================================================================*/
/**
* If multi-threading is enabled, assigns each thread a local copy of the data.
*/
#if MULTI == PTHREAD
#define thread __thread
#else
#define thread /* */
#endif
/**
* Default library context.
*/
thread ctx_t first_ctx;
/**
* Active library context.
*/
thread ctx_t *core_ctx = NULL;
#if MULTI != RELIC_NONE
/*
* Initializer function to call for every thread's context
*/
void (*core_thread_initializer)(void* init_ptr) = NULL;
void* core_init_ptr = NULL;
#endif
#if MULTI == OPENMP
#pragma omp threadprivate(first_ctx, core_ctx)
#endif
int core_init(void) {
if (core_ctx == NULL) {
core_ctx = &(first_ctx);
}
#if defined(CHECK) && defined(TRACE)
core_ctx->trace = 0;
#endif
#ifdef CHECK
core_ctx->reason[ERR_NO_MEMORY] = MSG_NO_MEMORY;
core_ctx->reason[ERR_NO_PRECI] = MSG_NO_PRECI;
core_ctx->reason[ERR_NO_FILE] = MSG_NO_FILE;
core_ctx->reason[ERR_NO_READ] = MSG_NO_READ;
core_ctx->reason[ERR_NO_VALID] = MSG_NO_VALID;
core_ctx->reason[ERR_NO_BUFFER] = MSG_NO_BUFFER;
core_ctx->reason[ERR_NO_FIELD] = MSG_NO_FIELD;
core_ctx->reason[ERR_NO_CURVE] = MSG_NO_CURVE;
core_ctx->reason[ERR_NO_CONFIG] = MSG_NO_CONFIG;
core_ctx->last = NULL;
#endif /* CHECK */
#if ALLOC == STATIC
core_ctx->next = 0;
#endif
#ifdef OVERH
core_ctx->over = 0;
#endif
core_ctx->code = STS_OK;
TRY {
arch_init();
rand_init();
#ifdef WITH_FP
fp_prime_init();
#endif
#ifdef WITH_FB
fb_poly_init();
#endif
#ifdef WITH_FT
ft_poly_init();
#endif
#ifdef WITH_EP
ep_curve_init();
#endif
#ifdef WITH_EB
eb_curve_init();
#endif
#ifdef WITH_ED
ed_curve_init();
#endif
#ifdef WITH_PP
pp_map_init();
#endif
}
CATCH_ANY {
return STS_ERR;
}
return STS_OK;
}
int core_clean(void) {
rand_clean();
#ifdef WITH_FP
fp_prime_clean();
#endif
#ifdef WITH_FB
fb_poly_clean();
#endif
#ifdef WITH_FT
ft_poly_clean();
#endif
#ifdef WITH_EP
ep_curve_clean();
#endif
#ifdef WITH_EB
eb_curve_clean();
#endif
#ifdef WITH_ED
ed_curve_clean();
#endif
#ifdef WITH_PP
pp_map_clean();
#endif
arch_clean();
core_ctx = NULL;
return STS_OK;
}
ctx_t *core_get(void) {
#if MULTI != RELIC_NONE
if (core_ctx == NULL && core_thread_initializer != NULL) {
core_thread_initializer(core_init_ptr);
}
#endif
return core_ctx;
}
void core_set(ctx_t *ctx) {
core_ctx = ctx;
}
#if MULTI != RELIC_NONE
void core_set_thread_initializer(void(*init)(void *init_ptr), void* init_ptr) {
core_thread_initializer = init;
core_init_ptr = init_ptr;
}
#endif |
2Dfold.c | /*
minimum free energy
RNA secondary structure with
basepair distance d_1 to reference structure 1 and distance d_2 to reference structure 2
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include "utils.h"
#include "energy_par.h"
#include "fold_vars.h"
#include "fold.h"
#include "pair_mat.h"
#include "loop_energies.h"
#include "mm.h"
#include "params.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "2Dfold.h"
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
int compute_2Dfold_F3 = 0;
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE void mfe_linear(TwoDfold_vars *vars);
PRIVATE void mfe_circ(TwoDfold_vars *vars);
PRIVATE void initialize_TwoDfold_vars(TwoDfold_vars *vars);
PUBLIC void update_TwoDfold_params(TwoDfold_vars *vars);
PRIVATE void make_ptypes(TwoDfold_vars *vars);
PRIVATE void backtrack_f5(unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars);
PRIVATE void backtrack_c(unsigned int i, unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars);
PRIVATE void backtrack_m(unsigned int i, unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars);
PRIVATE void backtrack_m1(unsigned int i, unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars);
PRIVATE void backtrack_fc(int k, int l, char *structure, TwoDfold_vars *vars);
PRIVATE void backtrack_m2(unsigned int i, int k, int l, char *structure, TwoDfold_vars *vars);
PRIVATE void adjustArrayBoundaries(int ***array, int *k_min, int *k_max, int **l_min, int **l_max, int k_min_real, int k_max_real, int *l_min_real, int *l_max_real);
INLINE PRIVATE void preparePosteriorBoundaries(int size, int shift, int *min_k, int *max_k, int **min_l, int **max_l);
INLINE PRIVATE void updatePosteriorBoundaries(int d1, int d2, int *min_k, int *max_k, int **min_l, int **max_l);
INLINE PRIVATE void prepareBoundaries(int min_k_pre, int max_k_pre, int min_l_pre, int max_l_pre, int bpdist, int *min_k, int *max_k, int **min_l, int **max_l);
INLINE PRIVATE void prepareArray(int ***array, int min_k, int max_k, int *min_l, int *max_l);
INLINE PRIVATE void prepareArray2(unsigned long ***array, int min_k, int max_k, int *min_l, int *max_l);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PUBLIC TwoDfold_vars *get_TwoDfold_variables(const char *seq, const char *structure1, const char *structure2, int circ){
unsigned int size, length, i;
int *index;
TwoDfold_vars *vars;
length = strlen(seq);
vars = (TwoDfold_vars *)malloc(sizeof(TwoDfold_vars));
vars->sequence = (char *)space(length + 1);
strcpy(vars->sequence, seq);
vars->seq_length = length;
if(vars->seq_length < 1) nrerror("get_TwoDfold_variables: sequence must be longer than 0");
size = ((length + 1) * (length + 2)/2);
vars->reference_pt1 = make_pair_table(structure1);
vars->reference_pt2 = make_pair_table(structure2);
vars->referenceBPs1 = make_referenceBP_array(vars->reference_pt1, TURN);
vars->referenceBPs2 = make_referenceBP_array(vars->reference_pt2, TURN);
vars->bpdist = compute_BPdifferences(vars->reference_pt1, vars->reference_pt2, TURN);
vars->do_backtrack = 1;
vars->dangles = dangles;
vars->circ = circ;
vars->temperature = temperature;
vars->ptype = space(sizeof(char) * size);
vars->P = NULL;
vars->S = NULL;
vars->S1 = NULL;
vars->my_iindx = get_iindx(length);
index = vars->my_iindx;
/* compute maximum matching with reference structure 1 disallowed */
vars->mm1 = maximumMatchingConstraint(vars->sequence, vars->reference_pt1);
/* compute maximum matching with reference structure 2 disallowed */
vars->mm2 = maximumMatchingConstraint(vars->sequence, vars->reference_pt2);
vars->maxD1 = vars->mm1[index[1]-length] + vars->referenceBPs1[index[1]-length];
vars->maxD2 = vars->mm2[index[1]-length] + vars->referenceBPs2[index[1]-length];
/* allocate memory for the energy matrices and min-/max-index helper arrays */
vars->E_C = (int ***) space(sizeof(int **) * size);
vars->l_min_values = (int **) space(sizeof(int *) * size);
vars->l_max_values = (int **) space(sizeof(int *) * size);
vars->k_min_values = (int *) space(sizeof(int) * size);
vars->k_max_values = (int *) space(sizeof(int) * size);
vars->E_F5 = (int ***) space(sizeof(int **) * (length + 1));
vars->l_min_values_f = (int **) space(sizeof(int *) * (length + 1));
vars->l_max_values_f = (int **) space(sizeof(int *) * (length + 1));
vars->k_min_values_f = (int *) space(sizeof(int) * (length + 1));
vars->k_max_values_f = (int *) space(sizeof(int) * (length + 1));
if(compute_2Dfold_F3){
vars->E_F3 = (int ***) space(sizeof(int **) * (length + 1));
vars->l_min_values_f3 = (int **) space(sizeof(int *) * (length + 1));
vars->l_max_values_f3 = (int **) space(sizeof(int *) * (length + 1));
vars->k_min_values_f3 = (int *) space(sizeof(int) * (length + 1));
vars->k_max_values_f3 = (int *) space(sizeof(int) * (length + 1));
}
else vars->E_F3 = NULL;
vars->E_M = (int ***) space(sizeof(int **) * size);
vars->l_min_values_m = (int **) space(sizeof(int *) * size);
vars->l_max_values_m = (int **) space(sizeof(int *) * size);
vars->k_min_values_m = (int *) space(sizeof(int) * size);
vars->k_max_values_m = (int *) space(sizeof(int) * size);
vars->E_M1 = (int ***) space(sizeof(int **) * size);
vars->l_min_values_m1 = (int **) space(sizeof(int *) * size);
vars->l_max_values_m1 = (int **) space(sizeof(int *) * size);
vars->k_min_values_m1 = (int *) space(sizeof(int) * size);
vars->k_max_values_m1 = (int *) space(sizeof(int) * size);
#ifdef COUNT_STATES
vars->N_C = (unsigned long ***) space(sizeof(unsigned long **) * size);
vars->N_F5 = (unsigned long ***) space(sizeof(unsigned long **) * (length + 1));
vars->N_M = (unsigned long ***) space(sizeof(unsigned long **) * size);
vars->N_M1 = (unsigned long ***) space(sizeof(unsigned long **) * size);
#endif
if(circ){
vars->E_M2_rem = (int *) space(sizeof(int) * (length + 1));
vars->E_M2 = (int ***) space(sizeof(int **) * (length + 1));
vars->l_min_values_m2 = (int **) space(sizeof(int *) * (length + 1));
vars->l_max_values_m2 = (int **) space(sizeof(int *) * (length + 1));
vars->k_min_values_m2 = (int *) space(sizeof(int) * (length + 1));
vars->k_max_values_m2 = (int *) space(sizeof(int) * (length + 1));
}
else{
vars->E_M2_rem = NULL;
vars->E_M2 = NULL;
vars->l_min_values_m2 = NULL;
vars->l_max_values_m2 = NULL;
vars->k_min_values_m2 = NULL;
vars->k_max_values_m2 = NULL;
}
vars->E_Fc = NULL;
vars->E_FcH = NULL;
vars->E_FcI = NULL;
vars->E_FcM = NULL;
vars->E_Fc_rem = INF;
vars->E_FcH_rem = INF;
vars->E_FcI_rem = INF;
vars->E_FcM_rem = INF;
vars->E_C_rem = (int *) space(sizeof(int) * size);
vars->E_M_rem = (int *) space(sizeof(int) * size);
vars->E_M1_rem = (int *) space(sizeof(int) * size);
vars->E_F5_rem = (int *) space(sizeof(int) * (length+1));
/* init rest arrays */
for(i=0;i<size;i++){
vars->E_C_rem[i] = vars->E_M_rem[i] = vars->E_M1_rem[i] = INF;
}
for(i=0;i<=length;i++)
vars->E_F5_rem[i] = INF;
if(vars->E_M2_rem)
for(i=0;i<=length;i++)
vars->E_M2_rem[i] = INF;
return vars;
}
PUBLIC void destroy_TwoDfold_variables(TwoDfold_vars *vars){
unsigned int i, j, ij;
int cnt1;
if(vars == NULL) return;
free(vars->E_C_rem);
free(vars->E_M_rem);
free(vars->E_M1_rem);
free(vars->E_F5_rem);
if(vars->E_M2_rem) free(vars->E_M2_rem);
#ifdef _OPENMP
#pragma omp sections private(i,j,ij,cnt1)
{
#pragma omp section
{
#endif
#ifdef COUNT_STATES
if(vars->N_C != NULL){
for(i = 1; i < vars->seq_length; i++){
for(j = i; j <= vars->seq_length; j++){
ij = vars->my_iindx[i] - j;
if(!vars->N_C[ij]) continue;
for(cnt1 = vars->k_min_values[ij]; cnt1 <= vars->k_max_values[ij]; cnt1++)
if(vars->l_min_values[ij][cnt1] < INF){
vars->N_C[ij][cnt1] += vars->l_min_values[ij][cnt1]/2;
free(vars->N_C[ij][cnt1]);
}
if(vars->k_min_values[ij] < INF){
vars->N_C[ij] += vars->k_min_values[ij];
free(vars->N_C[ij]);
}
}
}
free(vars->N_C);
}
#endif
if(vars->E_C != NULL){
for(i = 1; i < vars->seq_length; i++){
for(j = i; j <= vars->seq_length; j++){
ij = vars->my_iindx[i] - j;
if(!vars->E_C[ij]) continue;
for(cnt1 = vars->k_min_values[ij]; cnt1 <= vars->k_max_values[ij]; cnt1++)
if(vars->l_min_values[ij][cnt1] < INF){
vars->E_C[ij][cnt1] += vars->l_min_values[ij][cnt1]/2;
free(vars->E_C[ij][cnt1]);
}
if(vars->k_min_values[ij] < INF){
vars->E_C[ij] += vars->k_min_values[ij];
free(vars->E_C[ij]);
vars->l_min_values[ij] += vars->k_min_values[ij];
vars->l_max_values[ij] += vars->k_min_values[ij];
free(vars->l_min_values[ij]);
free(vars->l_max_values[ij]);
}
}
}
free(vars->E_C);
free(vars->l_min_values);
free(vars->l_max_values);
free(vars->k_min_values);
free(vars->k_max_values);
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
#ifdef COUNT_STATES
if(vars->N_M != NULL){
for(i = 1; i < vars->seq_length; i++){
for(j = i; j <= vars->seq_length; j++){
ij = vars->my_iindx[i] - j;
if(!vars->N_M[ij]) continue;
for(cnt1 = vars->k_min_values_m[ij]; cnt1 <= vars->k_max_values_m[ij]; cnt1++)
if(vars->l_min_values_m[ij][cnt1] < INF){
vars->N_M[ij][cnt1] += vars->l_min_values_m[ij][cnt1]/2;
free(vars->N_M[ij][cnt1]);
}
if(vars->k_min_values_m[ij] < INF){
vars->N_M[ij] += vars->k_min_values_m[ij];
free(vars->N_M[ij]);
}
}
}
free(vars->N_M);
}
#endif
if(vars->E_M != NULL){
for(i = 1; i < vars->seq_length; i++){
for(j = i; j <= vars->seq_length; j++){
ij = vars->my_iindx[i] - j;
if(!vars->E_M[ij]) continue;
for(cnt1 = vars->k_min_values_m[ij]; cnt1 <= vars->k_max_values_m[ij]; cnt1++)
if(vars->l_min_values_m[ij][cnt1] < INF){
vars->E_M[ij][cnt1] += vars->l_min_values_m[ij][cnt1]/2;
free(vars->E_M[ij][cnt1]);
}
if(vars->k_min_values_m[ij] < INF){
vars->E_M[ij] += vars->k_min_values_m[ij];
free(vars->E_M[ij]);
vars->l_min_values_m[ij] += vars->k_min_values_m[ij];
vars->l_max_values_m[ij] += vars->k_min_values_m[ij];
free(vars->l_min_values_m[ij]);
free(vars->l_max_values_m[ij]);
}
}
}
free(vars->E_M);
free(vars->l_min_values_m);
free(vars->l_max_values_m);
free(vars->k_min_values_m);
free(vars->k_max_values_m);
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
#ifdef COUNT_STATES
if(vars->N_M1 != NULL){
for(i = 1; i < vars->seq_length; i++){
for(j = i; j <= vars->seq_length; j++){
ij = vars->my_iindx[i] - j;
if(!vars->N_M1[ij]) continue;
for(cnt1 = vars->k_min_values_m1[ij]; cnt1 <= vars->k_max_values_m1[ij]; cnt1++)
if(vars->l_min_values_m1[ij][cnt1] < INF){
vars->N_M1[ij][cnt1] += vars->l_min_values_m1[ij][cnt1]/2;
free(vars->N_M1[ij][cnt1]);
}
if(vars->k_min_values_m1[ij] < INF){
vars->N_M1[ij] += vars->k_min_values_m1[ij];
free(vars->N_M1[ij]);
}
}
}
free(vars->N_M1);
}
#endif
if(vars->E_M1 != NULL){
for(i = 1; i < vars->seq_length; i++){
for(j = i; j <= vars->seq_length; j++){
ij = vars->my_iindx[i] - j;
if(!vars->E_M1[ij]) continue;
for(cnt1 = vars->k_min_values_m1[ij]; cnt1 <= vars->k_max_values_m1[ij]; cnt1++)
if(vars->l_min_values_m1[ij][cnt1] < INF){
vars->E_M1[ij][cnt1] += vars->l_min_values_m1[ij][cnt1]/2;
free(vars->E_M1[ij][cnt1]);
}
if(vars->k_min_values_m1[ij] < INF){
vars->E_M1[ij] += vars->k_min_values_m1[ij];
free(vars->E_M1[ij]);
vars->l_min_values_m1[ij] += vars->k_min_values_m1[ij];
vars->l_max_values_m1[ij] += vars->k_min_values_m1[ij];
free(vars->l_min_values_m1[ij]);
free(vars->l_max_values_m1[ij]);
}
}
}
free(vars->E_M1);
free(vars->l_min_values_m1);
free(vars->l_max_values_m1);
free(vars->k_min_values_m1);
free(vars->k_max_values_m1);
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if(vars->E_M2 != NULL){
for(i = 1; i < vars->seq_length-TURN-1; i++){
if(!vars->E_M2[i]) continue;
for(cnt1 = vars->k_min_values_m2[i]; cnt1 <= vars->k_max_values_m2[i]; cnt1++)
if(vars->l_min_values_m2[i][cnt1] < INF){
vars->E_M2[i][cnt1] += vars->l_min_values_m2[i][cnt1]/2;
free(vars->E_M2[i][cnt1]);
}
if(vars->k_min_values_m2[i] < INF){
vars->E_M2[i] += vars->k_min_values_m2[i];
free(vars->E_M2[i]);
vars->l_min_values_m2[i] += vars->k_min_values_m2[i];
vars->l_max_values_m2[i] += vars->k_min_values_m2[i];
free(vars->l_min_values_m2[i]);
free(vars->l_max_values_m2[i]);
}
}
free(vars->E_M2);
free(vars->l_min_values_m2);
free(vars->l_max_values_m2);
free(vars->k_min_values_m2);
free(vars->k_max_values_m2);
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
#ifdef COUNT_STATES
if(vars->N_F5 != NULL){
for(i = 1; i <= vars->seq_length; i++){
if(!vars->N_F5[i]) continue;
for(cnt1 = vars->k_min_values_f[i]; cnt1 <= vars->k_max_values_f[i]; cnt1++)
if(vars->l_min_values_f[i][cnt1] < INF){
vars->N_F5[i][cnt1] += vars->l_min_values_f[i][cnt1]/2;
free(vars->N_F5[i][cnt1]);
}
if(vars->k_min_values_f[i] < INF){
vars->N_F5[i] += vars->k_min_values_f[i];
free(vars->N_F5[i]);
}
}
free(vars->N_F5);
}
#endif
if(vars->E_F5 != NULL){
for(i = 1; i <= vars->seq_length; i++){
if(!vars->E_F5[i]) continue;
for(cnt1 = vars->k_min_values_f[i]; cnt1 <= vars->k_max_values_f[i]; cnt1++)
if(vars->l_min_values_f[i][cnt1] < INF){
vars->E_F5[i][cnt1] += vars->l_min_values_f[i][cnt1]/2;
free(vars->E_F5[i][cnt1]);
}
if(vars->k_min_values_f[i] < INF){
vars->E_F5[i] += vars->k_min_values_f[i];
free(vars->E_F5[i]);
vars->l_min_values_f[i] += vars->k_min_values_f[i];
vars->l_max_values_f[i] += vars->k_min_values_f[i];
free(vars->l_min_values_f[i]);
free(vars->l_max_values_f[i]);
}
}
free(vars->E_F5);
free(vars->l_min_values_f);
free(vars->l_max_values_f);
free(vars->k_min_values_f);
free(vars->k_max_values_f);
}
if(vars->E_F3 != NULL){
for(i = 1; i <= vars->seq_length; i++){
if(!vars->E_F3[i]) continue;
for(cnt1 = vars->k_min_values_f3[i]; cnt1 <= vars->k_max_values_f3[i]; cnt1++)
if(vars->l_min_values_f3[i][cnt1] < INF){
vars->E_F3[i][cnt1] += vars->l_min_values_f3[i][cnt1]/2;
free(vars->E_F3[i][cnt1]);
}
if(vars->k_min_values_f3[i] < INF){
vars->E_F3[i] += vars->k_min_values_f3[i];
free(vars->E_F3[i]);
vars->l_min_values_f3[i] += vars->k_min_values_f3[i];
vars->l_max_values_f3[i] += vars->k_min_values_f3[i];
free(vars->l_min_values_f3[i]);
free(vars->l_max_values_f3[i]);
}
}
free(vars->E_F3);
free(vars->l_min_values_f3);
free(vars->l_max_values_f3);
free(vars->k_min_values_f3);
free(vars->k_max_values_f3);
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if(vars->E_Fc != NULL){
for(cnt1 = vars->k_min_values_fc; cnt1 <= vars->k_max_values_fc; cnt1++)
if(vars->l_min_values_fc[cnt1] < INF){
vars->E_Fc[cnt1] += vars->l_min_values_fc[cnt1]/2;
free(vars->E_Fc[cnt1]);
}
if(vars->k_min_values_fc < INF){
vars->E_Fc += vars->k_min_values_fc;
free(vars->E_Fc);
vars->l_min_values_fc += vars->k_min_values_fc;
vars->l_max_values_fc += vars->k_min_values_fc;
free(vars->l_min_values_fc);
free(vars->l_max_values_fc);
}
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if(vars->E_FcI != NULL){
for(cnt1 = vars->k_min_values_fcI; cnt1 <= vars->k_max_values_fcI; cnt1++)
if(vars->l_min_values_fcI[cnt1] < INF){
vars->E_FcI[cnt1] += vars->l_min_values_fcI[cnt1]/2;
free(vars->E_FcI[cnt1]);
}
if(vars->k_min_values_fcI < INF){
vars->E_FcI += vars->k_min_values_fcI;
free(vars->E_FcI);
vars->l_min_values_fcI += vars->k_min_values_fcI;
vars->l_max_values_fcI += vars->k_min_values_fcI;
free(vars->l_min_values_fcI);
free(vars->l_max_values_fcI);
}
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if(vars->E_FcH != NULL){
for(cnt1 = vars->k_min_values_fcH; cnt1 <= vars->k_max_values_fcH; cnt1++)
if(vars->l_min_values_fcH[cnt1] < INF){
vars->E_FcH[cnt1] += vars->l_min_values_fcH[cnt1]/2;
free(vars->E_FcH[cnt1]);
}
if(vars->k_min_values_fcH < INF){
vars->E_FcH += vars->k_min_values_fcH;
free(vars->E_FcH);
vars->l_min_values_fcH += vars->k_min_values_fcH;
vars->l_max_values_fcH += vars->k_min_values_fcH;
free(vars->l_min_values_fcH);
free(vars->l_max_values_fcH);
}
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if(vars->E_FcM != NULL){
for(cnt1 = vars->k_min_values_fcM; cnt1 <= vars->k_max_values_fcM; cnt1++)
if(vars->l_min_values_fcM[cnt1] < INF){
vars->E_FcM[cnt1] += vars->l_min_values_fcM[cnt1]/2;
free(vars->E_FcM[cnt1]);
}
if(vars->k_min_values_fcM < INF){
vars->E_FcM += vars->k_min_values_fcM;
free(vars->E_FcM);
vars->l_min_values_fcM += vars->k_min_values_fcM;
vars->l_max_values_fcM += vars->k_min_values_fcM;
free(vars->l_min_values_fcM);
free(vars->l_max_values_fcM);
}
}
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if(vars->P != NULL) free(vars->P);
if(vars->sequence != NULL) free(vars->sequence);
if(vars->reference_pt1 != NULL) free(vars->reference_pt1);
if(vars->reference_pt2 != NULL) free(vars->reference_pt2);
if(vars->referenceBPs1 != NULL) free(vars->referenceBPs1);
if(vars->referenceBPs2 != NULL) free(vars->referenceBPs2);
if(vars->ptype != NULL) free(vars->ptype);
if(vars->S != NULL) free(vars->S);
if(vars->S1 != NULL) free(vars->S1);
if(vars->mm1 != NULL) free(vars->mm1);
if(vars->mm2 != NULL) free(vars->mm2);
if(vars->bpdist != NULL) free(vars->bpdist);
#ifdef _OPENMP
}
}
#endif
if(vars->my_iindx != NULL) free(vars->my_iindx);
free(vars);
}
PRIVATE void initialize_TwoDfold_vars(TwoDfold_vars *vars){
update_TwoDfold_params(vars);
/* this call updates the params in the ViennaRNA fold.o which is a global, so be careful
* whith calling it parallel... need a workarround or fix of ViennaRNA fold stuff
*/
update_fold_params();
}
PUBLIC TwoDfold_solution **TwoDfold(TwoDfold_vars *vars, int distance1, int distance2){
unsigned int i, d1, d2;
unsigned int maxD1;
unsigned int maxD2;
unsigned int length;
TwoDfold_solution **output;
initialize_TwoDfold_vars(vars);
if(fabs(vars->P->temperature - temperature)>1e-6) update_TwoDfold_params(vars);
vars->S = encode_sequence(vars->sequence, 0);
vars->S1 = encode_sequence(vars->sequence, 1);
make_ptypes(vars);
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
if(distance1 >= 0){
if((unsigned int)distance1 > maxD1)
fprintf(stderr,
"limiting maximum basepair distance 1 to %u\n",
maxD1);
else
maxD1 = (unsigned int)distance1;
}
if(distance2 >= 0){
if((unsigned int)distance2 > maxD2)
fprintf(stderr,
"limiting maximum basepair distance 2 to %u\n",
maxD2);
else
maxD2 = (unsigned int)distance2;
}
vars->maxD1 = maxD1;
vars->maxD2 = maxD2;
output = (TwoDfold_solution **)space((vars->maxD1+1) * sizeof(TwoDfold_solution *));
mfe_linear(vars);
if(vars->circ) mfe_circ(vars);
length = vars->seq_length;
for(d1=0; d1<=maxD1;d1++){
output[d1] = (TwoDfold_solution *)space((vars->maxD2+1)*sizeof(TwoDfold_solution));
#ifdef _OPENMP
#pragma omp parallel for private(d2)
#endif
for(d2=0; d2<=maxD2;d2++){
output[d1][d2].en = (float)INF/(float)100.;
output[d1][d2].s = NULL;
}
if( (d1 >= ((vars->circ) ? vars->k_min_values_fc : vars->k_min_values_f[length]))
&& (d1 <= ((vars->circ) ? vars->k_max_values_fc : vars->k_max_values_f[length]))){
#ifdef _OPENMP
#pragma omp parallel for private(d2, i)
#endif
for( d2 = ((vars->circ) ? vars->l_min_values_fc[d1] : vars->l_min_values_f[length][d1]);
d2 <= ((vars->circ) ? vars->l_max_values_fc[d1] : vars->l_max_values_f[length][d1]);
d2 += 2){
output[d1][d2].en = (float)((vars->circ) ? vars->E_Fc[d1][d2/2] : vars->E_F5[length][d1][d2/2])/(float)100.;
if(vars->do_backtrack && (output[d1][d2].en != (float)INF/(float)100.)){
char *mfe_structure = (char *)space(length+1);
for(i=0;i<length;i++) mfe_structure[i] = '.';
mfe_structure[i] = '\0';
(vars->circ) ? backtrack_fc(d1, d2, mfe_structure, vars) : backtrack_f5(length, d1, d2, mfe_structure, vars);
output[d1][d2].s = mfe_structure;
}
}
}
}
return output;
}
PUBLIC TwoDfold_solution *TwoDfoldList(TwoDfold_vars *vars, int distance1, int distance2){
unsigned int i, d1, d2;
unsigned int maxD1;
unsigned int maxD2;
unsigned int length;
unsigned int counter = 0;
int en = 0;
TwoDfold_solution *output;
initialize_TwoDfold_vars(vars);
if(fabs(vars->P->temperature - temperature)>1e-6) update_TwoDfold_params(vars);
vars->S = encode_sequence(vars->sequence, 0);
vars->S1 = encode_sequence(vars->sequence, 1);
make_ptypes(vars);
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
if(distance1 >= 0){
if((unsigned int)distance1 > maxD1)
fprintf(stderr,
"TwoDfoldList@2Dfold.c: limiting maximum basepair distance 1 to %u\n",
maxD1);
else
maxD1 = (unsigned int)distance1;
}
if(distance2 >= 0){
if((unsigned int)distance2 > maxD2)
fprintf(stderr,
"TwoDfoldList@2Dfold.c: limiting maximum basepair distance 2 to %u\n",
maxD2);
else
maxD2 = (unsigned int)distance2;
}
vars->maxD1 = maxD1;
vars->maxD2 = maxD2;
output = (TwoDfold_solution *)space((((vars->maxD1+1)*(vars->maxD2+2))/2 + 2) * sizeof(TwoDfold_solution));
mfe_linear(vars);
if(vars->circ) mfe_circ(vars);
length = vars->seq_length;
for(d1=0; d1<=maxD1;d1++){
if((d1 >= ((vars->circ) ? vars->k_min_values_fc : vars->k_min_values_f[length]))
&& (d1 <= ((vars->circ) ? vars->k_max_values_fc : vars->k_max_values_f[length]))){
for(d2 = ((vars->circ) ? vars->l_min_values_fc[d1] : vars->l_min_values_f[length][d1]);
d2 <= ((vars->circ) ? vars->l_max_values_fc[d1] : vars->l_max_values_f[length][d1]);
d2 += 2){
en = ((vars->circ) ? vars->E_Fc[d1][d2/2] : vars->E_F5[length][d1][d2/2]);
if(en == INF) continue;
output[counter].k = d1;
output[counter].l = d2;
output[counter].en = (float)en/(float)100.;
if(vars->do_backtrack){
char *mfe_structure = (char *)space(length+1);
for(i=0;i<length;i++) mfe_structure[i] = '.';
mfe_structure[i] = '\0';
(vars->circ) ? backtrack_fc((int)d1, (int)d2, mfe_structure, vars) : backtrack_f5(length, (int)d1, (int)d2, mfe_structure, vars);
output[counter].s = mfe_structure;
}
else output[counter].s = NULL;
counter++;
}
}
}
/* store entry for remaining partition if it exists */
en = ((vars->circ) ? vars->E_Fc_rem : vars->E_F5_rem[length]);
if(en != INF){
output[counter].k = -1;
output[counter].l = -1;
output[counter].en = (float)en/(float)100.;
if(vars->do_backtrack){
char *mfe_structure = (char *)space(length+1);
for(i=0;i<length;i++) mfe_structure[i] = '.';
mfe_structure[i] = '\0';
(vars->circ) ? backtrack_fc(-1, -1, mfe_structure, vars) : backtrack_f5(length, -1, -1, mfe_structure, vars);
output[counter].s = mfe_structure;
}
else output[counter].s = NULL;
counter++;
}
/* insert end-marker entry */
output[counter].k = output[counter].l = INF;
counter++;
/* resize to actual dataset amount */
output = (TwoDfold_solution*)xrealloc(output, sizeof(TwoDfold_solution) * counter);
return output;
}
PUBLIC char *TwoDfold_backtrack_f5(unsigned int j, int k, int l, TwoDfold_vars *vars){
unsigned int i;
char *mfe_structure = (char *)space(j+1);
if(j < TURN + 2) return NULL;
for(i=0; i < j; i++) mfe_structure[i] = '.';
mfe_structure[i] = '\0';
backtrack_f5(j, k, l, mfe_structure, vars);
return mfe_structure;
}
PRIVATE void mfe_linear(TwoDfold_vars *vars){
unsigned int d, i, j, ij, maxD1, maxD2, seq_length, dia, dib, dja, djb, *referenceBPs1, *referenceBPs2, *mm1, *mm2, *bpdist;
int cnt1, cnt2, cnt3, cnt4, d1, d2, energy, dangles, temp2, type, additional_en, *my_iindx, circ;
short *S1, *reference_pt1, *reference_pt2;
char *sequence, *ptype;
paramT *P;
/* dereferenciate things we often need */
P = vars->P;
sequence = vars->sequence;
seq_length = vars->seq_length;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
S1 = vars->S1;
ptype = vars->ptype;
reference_pt1 = vars->reference_pt1;
reference_pt2 = vars->reference_pt2;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
dangles = vars->dangles;
mm1 = vars->mm1;
mm2 = vars->mm2;
bpdist = vars->bpdist;
circ = vars->circ;
for (d = TURN+2; d <= seq_length; d++) { /* i,j in [1..length] */
#ifdef _OPENMP
#pragma omp parallel for private(additional_en, j, energy, temp2, i, ij, dia,dib,dja,djb,cnt1,cnt2,cnt3,cnt4, d1, d2)
#endif
for (j = d; j <= seq_length; j++) {
unsigned int p, q, pq, u, maxp, dij;
int type_2, type, tt, no_close, base_d1, base_d2;
i = j-d+1;
dij = j - i - 1;
ij = my_iindx[i]-j;
type = ptype[ij];
no_close = (((type==3)||(type==4))&&no_closingGU);
if (type) { /* we have a pair */
/* increase or decrease distance-to-reference value depending whether (i,j) is included in
* reference or has to be introduced
*/
base_d1 = ((unsigned int)reference_pt1[i] != j) ? 1 : -1;
base_d2 = ((unsigned int)reference_pt2[i] != j) ? 1 : -1;
/* HAIRPIN STRUCTURES */
/* get distance to reference if closing the hairpin
* d = dbp(T_{i,j}, {i,j})
*/
d1 = base_d1 + referenceBPs1[ij];
d2 = base_d2 + referenceBPs2[ij];
int min_k, max_k, min_l, max_l;
int real_min_k, real_max_k, *min_l_real, *max_l_real;
min_l = min_k = 0;
max_k = mm1[ij] + referenceBPs1[ij];
max_l = mm2[ij] + referenceBPs2[ij];
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[ij],
&vars->k_min_values[ij],
&vars->k_max_values[ij],
&vars->l_min_values[ij],
&vars->l_max_values[ij]
);
preparePosteriorBoundaries( vars->k_max_values[ij] - vars->k_min_values[ij] + 1,
vars->k_min_values[ij],
&real_min_k,
&real_max_k,
&min_l_real,
&max_l_real
);
prepareArray( &vars->E_C[ij],
vars->k_min_values[ij],
vars->k_max_values[ij],
vars->l_min_values[ij],
vars->l_max_values[ij]
);
#ifdef COUNT_STATES
prepareArray2( &vars->N_C[ij],
vars->k_min_values[ij],
vars->k_max_values[ij],
vars->l_min_values[ij],
vars->l_max_values[ij]
);
#endif
/* d1 and d2 are the distancies to both references introduced by closing a hairpin structure at (i,j) */
if((d1 >= 0) && (d2 >= 0)){
if(((unsigned int)d1<=maxD1) && ((unsigned int)d2 <= maxD2)){
vars->E_C[ij][d1][d2/2] = (no_close) ? FORBIDDEN : E_Hairpin(dij, type, S1[i+1], S1[j-1], sequence+i-1, P);
updatePosteriorBoundaries(d1,
d2,
&real_min_k,
&real_max_k,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
vars->N_C[ij][d1][d2/2] = 1;
#endif
}
else{
vars->E_C_rem[ij] = (no_close) ? FORBIDDEN : E_Hairpin(dij, type, S1[i+1], S1[j-1], sequence+i-1, P);
}
}
/* INTERIOR LOOP STRUCTURES */
maxp = MIN2(j-2-TURN,i+MAXLOOP+1);
for(p = i+1; p <= maxp; p++){
unsigned int minq = p + TURN + 1;
unsigned int ln_pre = dij + p;
if(ln_pre > minq + MAXLOOP) minq = ln_pre - MAXLOOP - 1;
for(q = minq; q < j; q++){
pq = my_iindx[p]-q;
/* set distance to reference structure... */
type_2 = ptype[pq];
if (type_2==0) continue;
type_2 = rtype[type_2];
/* get distance to reference if closing the interior loop
* d2 = dbp(S_{i,j}, S_{p.q} + {i,j})
*/
d1 = base_d1 + referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 + referenceBPs2[ij] - referenceBPs2[pq];
if(no_closingGU)
if(no_close||(type_2==3)||(type_2==4))
if((p>i+1)||(q<j-1)) continue; /* continue unless stack */
energy = E_IntLoop(p-i-1, j-q-1, type, type_2, S1[i+1], S1[j-1], S1[p-1], S1[q+1], P);
if(vars->E_C[pq] != NULL){
for(cnt1 = vars->k_min_values[pq]; cnt1 <= vars->k_max_values[pq]; cnt1++){
for(cnt2 = vars->l_min_values[pq][cnt1]; cnt2 <= vars->l_max_values[pq][cnt1]; cnt2+=2){
if(vars->E_C[pq][cnt1][cnt2/2] != INF){
if(((cnt1 + d1) <= maxD1) && ((cnt2+d2) <= maxD2)){
vars->E_C[ij][cnt1 + d1][(cnt2 + d2)/2] = MIN2( vars->E_C[ij][cnt1 + d1][(cnt2 + d2)/2],
vars->E_C[pq][cnt1][cnt2/2] + energy
);
updatePosteriorBoundaries(cnt1 + d1,
cnt2 + d2,
&real_min_k,
&real_max_k,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
vars->N_C[ij][cnt1 + d1][(cnt2 + d2)/2] += vars->N_C[pq][cnt1][cnt2/2];
#endif
}
/* collect all cases where d1+cnt1 or d2+cnt2 exceeds maxD1, maxD2, respectively */
else{
vars->E_C_rem[ij] = MIN2(vars->E_C_rem[ij], vars->E_C[pq][cnt1][cnt2/2] + energy);
}
}
}
}
}
/* collect all contributions where C[pq] already lies outside k_max, l_max boundary */
if(vars->E_C_rem[pq] != INF){
vars->E_C_rem[ij] = MIN2(vars->E_C_rem[ij], vars->E_C_rem[pq] + energy);
}
} /* end q-loop */
} /* end p-loop */
/* MULTI LOOP STRUCTURES */
if(!no_close){
/* dangle energies for multiloop closing stem */
tt = rtype[type];
temp2 = P->MLclosing;
if(dangles == 2)
temp2 += E_MLstem(tt, S1[j-1], S1[i+1], P);
else
temp2 += E_MLstem(tt, -1, -1, P);
for(u=i+TURN+2; u<j-TURN-2;u++){
int i1u = my_iindx[i+1]-u;
int u1j1 = my_iindx[u+1]-j+1;
/* check all cases where either M or M1 are already out of scope of maxD1 and/or maxD2 */
if(vars->E_M_rem[i1u] != INF){
for(cnt3 = vars->k_min_values_m1[u1j1];
cnt3 <= vars->k_max_values_m1[u1j1];
cnt3++)
for(cnt4 = vars->l_min_values_m1[u1j1][cnt3];
cnt4 <= vars->l_max_values_m1[u1j1][cnt3];
cnt4+=2){
if(vars->E_M1[u1j1][cnt3][cnt4/2]!= INF){
vars->E_C_rem[ij] = MIN2(vars->E_C_rem[ij],
vars->E_M_rem[i1u]
+ vars->E_M1[u1j1][cnt3][cnt4/2]
+ temp2
);
}
}
if(vars->E_M1_rem[u1j1] != INF){
vars->E_C_rem[ij] = MIN2(vars->E_C_rem[ij],
vars->E_M_rem[i1u]
+ vars->E_M1_rem[u1j1]
+ temp2
);
}
}
if(vars->E_M1_rem[u1j1] != INF){
for(cnt1 = vars->k_min_values_m[i1u];
cnt1 <= vars->k_max_values_m[i1u];
cnt1++)
for(cnt2 = vars->l_min_values_m[i1u][cnt1];
cnt2 <= vars->l_max_values_m[i1u][cnt1];
cnt2+=2)
if(vars->E_M[i1u][cnt1][cnt2/2] != INF){
vars->E_C_rem[ij] = MIN2(vars->E_C_rem[ij],
vars->E_M[i1u][cnt1][cnt2/2]
+ vars->E_M1_rem[u1j1]
+ temp2
);
}
}
/* get distance to reference if closing the multiloop
* d = dbp(S_{i,j}, {i,j} + S_{i+1,u} + S_{u+1,j-1})
*/
if(!vars->E_M[i1u]) continue;
if(!vars->E_M1[u1j1]) continue;
d1 = base_d1 + referenceBPs1[ij] - referenceBPs1[i1u] - referenceBPs1[u1j1];
d2 = base_d2 + referenceBPs2[ij] - referenceBPs2[i1u] - referenceBPs2[u1j1];
for(cnt1 = vars->k_min_values_m[i1u];
cnt1 <= vars->k_max_values_m[i1u];
cnt1++)
for(cnt2 = vars->l_min_values_m[i1u][cnt1];
cnt2 <= vars->l_max_values_m[i1u][cnt1];
cnt2+=2)
for(cnt3 = vars->k_min_values_m1[u1j1];
cnt3 <= vars->k_max_values_m1[u1j1];
cnt3++)
for(cnt4 = vars->l_min_values_m1[u1j1][cnt3];
cnt4 <= vars->l_max_values_m1[u1j1][cnt3];
cnt4+=2){
if((vars->E_M[i1u][cnt1][cnt2/2] != INF) && (vars->E_M1[u1j1][cnt3][cnt4/2]!= INF)){
if(((cnt1+cnt3+d1) <= maxD1) && ((cnt2+cnt4+d2) <= maxD2)){
vars->E_C[ij][cnt1+cnt3+d1][(cnt2+cnt4+d2)/2] = MIN2( vars->E_C[ij][cnt1+cnt3+d1][(cnt2+cnt4+d2)/2],
vars->E_M[i1u][cnt1][cnt2/2]
+ vars->E_M1[u1j1][cnt3][cnt4/2]
+ temp2
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1,
cnt2 + cnt4 + d2,
&real_min_k,
&real_max_k,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
vars->N_C[ij][cnt1+cnt3+d1][(cnt2+cnt4+d2)/2] += vars->N_M[i1u][cnt1][cnt2/2] * vars->N_M1[u1j1][cnt3][cnt4/2];
#endif
}
/* collect all cases where d1+cnt1+cnt3 or d2+cnt2+cnt4 exceeds maxD1, maxD2, respectively */
else{
vars->E_C_rem[ij] = MIN2( vars->E_C_rem[ij],
vars->E_M[i1u][cnt1][cnt2/2]
+ vars->E_M1[u1j1][cnt3][cnt4/2]
+ temp2
);
}
}
}
}
}
/* resize and move memory portions of energy matrix E_C */
adjustArrayBoundaries(&vars->E_C[ij],
&vars->k_min_values[ij],
&vars->k_max_values[ij],
&vars->l_min_values[ij],
&vars->l_max_values[ij],
real_min_k,
real_max_k,
min_l_real,
max_l_real
);
#ifdef COUNT_STATES
/* actually we should adjust the array boundaries here but we might never use the count states option more than once so what....*/
#endif
} /* end >> if (pair) << */
/* done with c[i,j], now compute fML[i,j] */
/* free ends ? -----------------------------------------*/
dia = referenceBPs1[ij] - referenceBPs1[my_iindx[i+1]-j];
dib = referenceBPs2[ij] - referenceBPs2[my_iindx[i+1]-j];
dja = referenceBPs1[ij] - referenceBPs1[ij+1];
djb = referenceBPs2[ij] - referenceBPs2[ij+1];
if(dangles==2)
temp2 = E_MLstem(type, ((i > 1) || circ) ? S1[i-1] : -1, ((j < seq_length) || circ) ? S1[j+1] : -1, P);
else
temp2 = E_MLstem(type, -1, -1, P);
int min_k_guess, max_k_guess, min_l_guess, max_l_guess;
int min_k_real_m, max_k_real_m, *min_l_real_m, *max_l_real_m;
int min_k_real_m1, max_k_real_m1, *min_l_real_m1, *max_l_real_m1;
min_k_guess = min_l_guess = 0;
max_k_guess = mm1[ij] + referenceBPs1[ij];
max_l_guess = mm2[ij] + referenceBPs2[ij];
prepareBoundaries(min_k_guess,
max_k_guess,
min_l_guess,
max_l_guess,
bpdist[ij],
&vars->k_min_values_m[ij],
&vars->k_max_values_m[ij],
&vars->l_min_values_m[ij],
&vars->l_max_values_m[ij]
);
prepareBoundaries(min_k_guess,
max_k_guess,
min_l_guess,
max_l_guess,
bpdist[ij],
&vars->k_min_values_m1[ij],
&vars->k_max_values_m1[ij],
&vars->l_min_values_m1[ij],
&vars->l_max_values_m1[ij]
);
preparePosteriorBoundaries( vars->k_max_values_m[ij] - vars->k_min_values_m[ij] + 1,
vars->k_min_values_m[ij],
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
preparePosteriorBoundaries( vars->k_max_values_m1[ij] - vars->k_min_values_m1[ij] + 1,
vars->k_min_values_m1[ij],
&min_k_real_m1,
&max_k_real_m1,
&min_l_real_m1,
&max_l_real_m1
);
prepareArray( &vars->E_M[ij],
vars->k_min_values_m[ij],
vars->k_max_values_m[ij],
vars->l_min_values_m[ij],
vars->l_max_values_m[ij]
);
prepareArray( &vars->E_M1[ij],
vars->k_min_values_m1[ij],
vars->k_max_values_m1[ij],
vars->l_min_values_m1[ij],
vars->l_max_values_m1[ij]
);
#ifdef COUNT_STATES
prepareArray2( &vars->N_M[ij],
vars->k_min_values_m[ij],
vars->k_max_values_m[ij],
vars->l_min_values_m[ij],
vars->l_max_values_m[ij]
);
prepareArray2( &vars->N_M1[ij],
vars->k_min_values_m1[ij],
vars->k_max_values_m1[ij],
vars->l_min_values_m1[ij],
vars->l_max_values_m1[ij]
);
#endif
/* now to the actual computations... */
/* 1st E_M[ij] = E_M1[ij] = E_C[ij] + b */
if(vars->E_C_rem[ij] != INF){
vars->E_M_rem[ij] = vars->E_M1_rem[ij] = temp2 + vars->E_C_rem[ij];
}
if(vars->E_C[ij])
for(cnt1 = vars->k_min_values[ij]; cnt1 <= vars->k_max_values[ij]; cnt1++){
for(cnt2 = vars->l_min_values[ij][cnt1]; cnt2 <= vars->l_max_values[ij][cnt1]; cnt2+=2){
if(vars->E_C[ij][cnt1][cnt2/2] != INF){
vars->E_M[ij][cnt1][cnt2/2] = vars->E_M1[ij][cnt1][cnt2/2] = temp2 + vars->E_C[ij][cnt1][cnt2/2];
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real_m1,
&max_k_real_m1,
&min_l_real_m1,
&max_l_real_m1
);
#ifdef COUNT_STATES
vars->N_M[ij][cnt1][cnt2/2] = vars->N_M1[ij][cnt1][cnt2/2] = vars->N_C[ij][cnt1][cnt2/2];
#endif
}
}
}
/* 2nd E_M[ij] = MIN(E_M[ij], E_M[i+1,j] + c) */
if(vars->E_M_rem[my_iindx[i+1]-j] != INF){
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M_rem[my_iindx[i+1]-j] + P->MLbase
);
}
if(vars->E_M[my_iindx[i+1]-j])
for(cnt1 = vars->k_min_values_m[my_iindx[i+1]-j];
cnt1 <= vars->k_max_values_m[my_iindx[i+1]-j];
cnt1++){
for(cnt2 = vars->l_min_values_m[my_iindx[i+1]-j][cnt1];
cnt2 <= vars->l_max_values_m[my_iindx[i+1]-j][cnt1];
cnt2+=2){
if(vars->E_M[my_iindx[i+1]-j][cnt1][cnt2/2] != INF){
if(((cnt1 + dia) <= maxD1) && ((cnt2 + dib) <= maxD2)){
vars->E_M[ij][cnt1+dia][(cnt2+dib)/2] = MIN2( vars->E_M[ij][cnt1+dia][(cnt2+dib)/2],
vars->E_M[my_iindx[i+1]-j][cnt1][cnt2/2] + P->MLbase
);
updatePosteriorBoundaries(cnt1 + dia,
cnt2 + dib,
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
#ifdef COUNT_STATES
vars->N_M[ij][cnt1+dia][(cnt2+dib)/2] += vars->N_M[my_iindx[i+1]-j][cnt1][cnt2/2];
#endif
}
/* collect all cases where dia+cnt1 or dib+cnt2 exceeds maxD1, maxD2, respectively */
else{
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M[my_iindx[i+1]-j][cnt1][cnt2/2] + P->MLbase
);
}
}
}
}
/* 3rd E_M[ij] = MIN(E_M[ij], E_M[i,j-1] + c) */
if(vars->E_M_rem[ij+1] != INF){
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M_rem[ij+1] + P->MLbase
);
}
if(vars->E_M[ij+1])
for(cnt1 = vars->k_min_values_m[ij+1];
cnt1 <= vars->k_max_values_m[ij+1];
cnt1++){
for(cnt2 = vars->l_min_values_m[ij+1][cnt1];
cnt2 <= vars->l_max_values_m[ij+1][cnt1];
cnt2+=2){
if(vars->E_M[ij+1][cnt1][cnt2/2] != INF){
if(((cnt1 + dja) <= maxD1) && ((cnt2 + djb) <= maxD2)){
vars->E_M[ij][cnt1+dja][(cnt2+djb)/2] = MIN2( vars->E_M[ij][cnt1+dja][(cnt2+djb)/2],
vars->E_M[ij+1][cnt1][cnt2/2] + P->MLbase
);
updatePosteriorBoundaries(cnt1 + dja,
cnt2 + djb,
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
#ifdef COUNT_STATES
vars->N_M[ij][cnt1+dja][(cnt2+djb)/2] += vars->N_M[ij+1][cnt1][cnt2/2];
#endif
}
/* collect all cases where dja+cnt1 or djb+cnt2 exceeds maxD1, maxD2, respectively */
else{
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M[ij+1][cnt1][cnt2/2] + P->MLbase
);
}
}
}
}
/* 4th E_M1[ij] = MIN(E_M1[ij], E_M1[i,j-1] + c) */
if(vars->E_M1_rem[ij+1] != INF){
vars->E_M1_rem[ij] = MIN2( vars->E_M1_rem[ij],
vars->E_M1_rem[ij+1] + P->MLbase
);
}
if(vars->E_M1[ij+1])
for(cnt1 = vars->k_min_values_m1[ij+1];
cnt1 <= vars->k_max_values_m1[ij+1];
cnt1++){
for(cnt2 = vars->l_min_values_m1[ij+1][cnt1];
cnt2 <= vars->l_max_values_m1[ij+1][cnt1];
cnt2+=2){
if(vars->E_M1[ij+1][cnt1][cnt2/2] != INF){
if(((cnt1 + dja) <= maxD1) && ((cnt2 + djb) <= maxD2)){
vars->E_M1[ij][cnt1+dja][(cnt2+djb)/2] = MIN2( vars->E_M1[ij][cnt1+dja][(cnt2+djb)/2],
vars->E_M1[ij+1][cnt1][cnt2/2] + P->MLbase
);
updatePosteriorBoundaries(cnt1 + dja,
cnt2 + djb,
&min_k_real_m1,
&max_k_real_m1,
&min_l_real_m1,
&max_l_real_m1
);
#ifdef COUNT_STATES
vars->N_M1[ij][cnt1+dja][(cnt2+djb)/2] += vars->N_M1[ij+1][cnt1][cnt2/2];
#endif
}
/* collect all cases where dja+cnt1 or djb+cnt2 exceeds maxD1, maxD2, respectively */
else{
vars->E_M1_rem[ij] = MIN2( vars->E_M1_rem[ij],
vars->E_M1[ij+1][cnt1][cnt2/2] + P->MLbase
);
}
}
}
}
/* 5th E_M[ij] = MIN(E_M[ij], min(E_M[i,k] + E_M[k+1,j])) */
if(j > TURN + 2)
for (u = i+1+TURN; u <= j-2-TURN; u++){
/* check all cases where M(i,u) and/or M(u+1,j) are already out of scope of maxD1 and/or maxD2 */
if(vars->E_M_rem[my_iindx[i]-u] != INF){
for(cnt3 = vars->k_min_values_m[my_iindx[u+1]-j];
cnt3 <= vars->k_max_values_m[my_iindx[u+1]-j];
cnt3++){
for(cnt4 = vars->l_min_values_m[my_iindx[u+1]-j][cnt3];
cnt4 <= vars->l_max_values_m[my_iindx[u+1]-j][cnt3];
cnt4+=2){
if(vars->E_M[my_iindx[u+1]-j][cnt3][cnt4/2] != INF){
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M_rem[my_iindx[i]-u] + vars->E_M[my_iindx[u+1]-j][cnt3][cnt4/2]
);
}
}
}
if(vars->E_M_rem[my_iindx[u+1]-j] != INF){
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M_rem[my_iindx[i]-u] + vars->E_M_rem[my_iindx[u+1]-j]
);
}
}
if(vars->E_M_rem[my_iindx[u+1]-j] != INF){
for(cnt1 = vars->k_min_values_m[my_iindx[i]-u];
cnt1 <= vars->k_max_values_m[my_iindx[i]-u];
cnt1++){
for(cnt2 = vars->l_min_values_m[my_iindx[i]-u][cnt1];
cnt2 <= vars->l_max_values_m[my_iindx[i]-u][cnt1];
cnt2+=2){
if(vars->E_M[my_iindx[i]-u][cnt1][cnt2/2] != INF){
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M[my_iindx[i]-u][cnt1][cnt2/2] + vars->E_M_rem[my_iindx[u+1]-j]
);
}
}
}
}
if(!vars->E_M[my_iindx[i]-u]) continue;
if(!vars->E_M[my_iindx[u+1]-j]) continue;
dia = referenceBPs1[ij] - referenceBPs1[my_iindx[i]-u] - referenceBPs1[my_iindx[u+1]-j];
dib = referenceBPs2[ij] - referenceBPs2[my_iindx[i]-u] - referenceBPs2[my_iindx[u+1]-j];
for(cnt1 = vars->k_min_values_m[my_iindx[i]-u];
cnt1 <= vars->k_max_values_m[my_iindx[i]-u];
cnt1++){
for(cnt2 = vars->l_min_values_m[my_iindx[i]-u][cnt1];
cnt2 <= vars->l_max_values_m[my_iindx[i]-u][cnt1];
cnt2+=2){
for(cnt3 = vars->k_min_values_m[my_iindx[u+1]-j];
cnt3 <= vars->k_max_values_m[my_iindx[u+1]-j];
cnt3++){
for(cnt4 = vars->l_min_values_m[my_iindx[u+1]-j][cnt3];
cnt4 <= vars->l_max_values_m[my_iindx[u+1]-j][cnt3];
cnt4+=2){
if((vars->E_M[my_iindx[i]-u][cnt1][cnt2/2] != INF) && (vars->E_M[my_iindx[u+1]-j][cnt3][cnt4/2] != INF)){
if(((cnt1 + cnt3 + dia) <= maxD1) && ((cnt2 + cnt4 + dib) <= maxD2)){
vars->E_M[ij][cnt1+cnt3+dia][(cnt2+cnt4+dib)/2] = MIN2( vars->E_M[ij][cnt1+cnt3+dia][(cnt2+cnt4+dib)/2],
vars->E_M[my_iindx[i]-u][cnt1][cnt2/2]
+ vars->E_M[my_iindx[u+1]-j][cnt3][cnt4/2]
);
updatePosteriorBoundaries(cnt1 + cnt3 + dia,
cnt2 + cnt4 + dib,
&min_k_real_m,
&max_k_real_m,
&min_l_real_m,
&max_l_real_m
);
#ifdef COUNT_STATES
vars->N_M[ij][cnt1+cnt3+dia][(cnt2+cnt4+dib)/2] += vars->N_M[my_iindx[i]-u][cnt1][cnt2/2] * vars->N_M1[my_iindx[u+1]-j][cnt3][cnt4/2];
#endif
}
/* collect all cases where dia+cnt1+cnt3 or dib+cnt2+cnt4 exceeds maxD1, maxD2, respectively */
else{
vars->E_M_rem[ij] = MIN2(vars->E_M_rem[ij],
vars->E_M[my_iindx[i]-u][cnt1][cnt2/2] + vars->E_M[my_iindx[u+1]-j][cnt3][cnt4/2]
);
}
}
}
}
}
}
}
/* thats all folks for the multiloop decomposition... */
adjustArrayBoundaries(&vars->E_M[ij],
&vars->k_min_values_m[ij],
&vars->k_max_values_m[ij],
&vars->l_min_values_m[ij],
&vars->l_max_values_m[ij],
min_k_real_m,
max_k_real_m,
min_l_real_m,
max_l_real_m
);
adjustArrayBoundaries(&vars->E_M1[ij],
&vars->k_min_values_m1[ij],
&vars->k_max_values_m1[ij],
&vars->l_min_values_m1[ij],
&vars->l_max_values_m1[ij],
min_k_real_m1,
max_k_real_m1,
min_l_real_m1,
max_l_real_m1
);
#ifdef COUNT_STATES
/* actually we should adjust the array boundaries here but we might never use the count states option more than once so what....*/
#endif
} /* end of j-loop */
}
/* calculate energies of 5' and 3' fragments */
/* prepare first entries in E_F5 */
for(cnt1 = 1; cnt1 <= TURN+1; cnt1++){
vars->E_F5[cnt1] = (int **)space(sizeof(int *));
vars->E_F5[cnt1][0] = (int *)space(sizeof(int));
vars->E_F5[cnt1][0][0] = 0;
vars->E_F5_rem[cnt1] = INF;
vars->k_min_values_f[cnt1] = vars->k_max_values_f[cnt1] = 0;
vars->l_min_values_f[cnt1] = (int *)space(sizeof(int));
vars->l_max_values_f[cnt1] = (int *)space(sizeof(int));
vars->l_min_values_f[cnt1][0] = vars->l_max_values_f[cnt1][0] = 0;
#ifdef COUNT_STATES
vars->N_F5[cnt1] = (unsigned long **)space(sizeof(unsigned long *));
vars->N_F5[cnt1][0] = (unsigned long *)space(sizeof(unsigned long));
vars->N_F5[cnt1][0][0] = 1;
#endif
}
for (j=TURN+2; j <= seq_length; j++) {
unsigned int da = referenceBPs1[my_iindx[1]-j] - referenceBPs1[my_iindx[1]-j+1];
unsigned int db = referenceBPs2[my_iindx[1]-j] - referenceBPs2[my_iindx[1]-j+1];
type=ptype[my_iindx[1]-j];
additional_en = 0;
if(type){
if(dangles == 2)
additional_en += E_ExtLoop(type, -1, j < seq_length ? S1[j+1] : -1, P);
else
additional_en += E_ExtLoop(type, -1, -1, P);
}
/* make min and max k guess for memory allocation */
int min_k_guess, max_k_guess, min_l_guess, max_l_guess;
int *min_l_real, *max_l_real, min_k_real, max_k_real;
min_k_guess = min_l_guess = 0;
max_k_guess = referenceBPs1[my_iindx[1]-j] + mm1[my_iindx[1]-j];
max_l_guess = referenceBPs2[my_iindx[1]-j] + mm2[my_iindx[1]-j];
prepareBoundaries(min_k_guess,
max_k_guess,
min_l_guess,
max_l_guess,
bpdist[my_iindx[1]-j],
&vars->k_min_values_f[j],
&vars->k_max_values_f[j],
&vars->l_min_values_f[j],
&vars->l_max_values_f[j]
);
preparePosteriorBoundaries( vars->k_max_values_f[j] - vars->k_min_values_f[j] + 1,
vars->k_min_values_f[j],
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
prepareArray( &vars->E_F5[j],
vars->k_min_values_f[j],
vars->k_max_values_f[j],
vars->l_min_values_f[j],
vars->l_max_values_f[j]
);
#ifdef COUNT_STATES
prepareArray2( &vars->N_F5[j],
vars->k_min_values_f[j],
vars->k_max_values_f[j],
vars->l_min_values_f[j],
vars->l_max_values_f[j]
);
#endif
/* begin the actual computation of 5' end energies */
/* j-1 is unpaired ... */
vars->E_F5_rem[j] = vars->E_F5_rem[j-1];
for(cnt1 = vars->k_min_values_f[j-1]; cnt1 <= vars->k_max_values_f[j-1]; cnt1++){
for(cnt2 = vars->l_min_values_f[j-1][cnt1]; cnt2 <= vars->l_max_values_f[j-1][cnt1]; cnt2+=2){
if(((cnt1 + da) <= maxD1) && ((cnt2 + db) <= maxD2)){
vars->E_F5[j][cnt1+da][(cnt2+db)/2] = MIN2( vars->E_F5[j][cnt1+da][(cnt2+db)/2],
vars->E_F5[j-1][cnt1][cnt2/2]
);
updatePosteriorBoundaries(cnt1 + da,
cnt2 + db,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
vars->N_F5[j][cnt1+da][(cnt2+db)/2] += vars->N_F5[j-1][cnt1][cnt2/2];
#endif
}
/* collect all cases where da+cnt1 or db+cnt2 exceeds maxD1, maxD2, respectively */
else{
vars->E_F5_rem[j] = MIN2(vars->E_F5_rem[j], vars->E_F5[j-1][cnt1][cnt2/2]);
}
}
}
/* j pairs with 1 */
if(vars->E_C_rem[my_iindx[1]-j] != INF){
vars->E_F5_rem[j] = MIN2(vars->E_F5_rem[j], vars->E_C_rem[my_iindx[1]-j] + additional_en);
}
if(vars->E_C[my_iindx[1]-j])
for(cnt1 = vars->k_min_values[my_iindx[1]-j]; cnt1 <= vars->k_max_values[my_iindx[1]-j]; cnt1++)
for(cnt2 = vars->l_min_values[my_iindx[1]-j][cnt1]; cnt2 <= vars->l_max_values[my_iindx[1]-j][cnt1]; cnt2+=2){
if(vars->E_C[my_iindx[1]-j][cnt1][cnt2/2] != INF){
vars->E_F5[j][cnt1][cnt2/2] = MIN2( vars->E_F5[j][cnt1][cnt2/2],
vars->E_C[my_iindx[1]-j][cnt1][cnt2/2]+ additional_en
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
vars->N_F5[j][cnt1][cnt2/2] += vars->N_C[my_iindx[1]-j][cnt1][cnt2/2];
#endif
}
}
/* j pairs with some other nucleotide -> see below */
for (i=j-TURN-1; i>1; i--) {
ij = my_iindx[i]-j;
type = ptype[ij];
if (type) {
if(dangles == 2)
additional_en = E_ExtLoop(type, S1[i-1], j < seq_length ? S1[j+1] : -1, P);
else
additional_en = E_ExtLoop(type, -1, -1, P);
if(vars->E_C_rem[ij] != INF){
for(cnt3 = vars->k_min_values_f[i-1]; cnt3 <= vars->k_max_values_f[i-1]; cnt3++)
for(cnt4 = vars->l_min_values_f[i-1][cnt3]; cnt4 <= vars->l_max_values_f[i-1][cnt3]; cnt4+=2){
if(vars->E_F5[i-1][cnt3][cnt4/2] != INF){
vars->E_F5_rem[j] = MIN2(vars->E_F5_rem[j],
vars->E_F5[i-1][cnt3][cnt4/2] + vars->E_C_rem[ij] + additional_en
);
}
}
if(vars->E_F5_rem[i-1] != INF){
vars->E_F5_rem[j] = MIN2(vars->E_F5_rem[j],
vars->E_F5_rem[i-1] + vars->E_C_rem[ij] + additional_en
);
}
}
if((vars->E_F5_rem[i-1] != INF) && (vars->E_C[ij])){
for(cnt1 = vars->k_min_values[ij]; cnt1 <= vars->k_max_values[ij]; cnt1++)
for(cnt2 = vars->l_min_values[ij][cnt1]; cnt2 <= vars->l_max_values[ij][cnt1]; cnt2+=2)
if(vars->E_C[ij][cnt1][cnt2/2]!= INF){
vars->E_F5_rem[j] = MIN2(vars->E_F5_rem[j],
vars->E_F5_rem[i-1] + vars->E_C[ij][cnt1][cnt2/2] + additional_en
);
}
}
if(!vars->E_C[ij]) continue;
unsigned int d1a = referenceBPs1[my_iindx[1]-j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1]-i+1];
unsigned int d1b = referenceBPs2[my_iindx[1]-j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1]-i+1];
for(cnt1 = vars->k_min_values[ij]; cnt1 <= vars->k_max_values[ij]; cnt1++)
for(cnt2 = vars->l_min_values[ij][cnt1]; cnt2 <= vars->l_max_values[ij][cnt1]; cnt2+=2)
for(cnt3 = vars->k_min_values_f[i-1]; cnt3 <= vars->k_max_values_f[i-1]; cnt3++)
for(cnt4 = vars->l_min_values_f[i-1][cnt3]; cnt4 <= vars->l_max_values_f[i-1][cnt3]; cnt4+=2){
if(vars->E_F5[i-1][cnt3][cnt4/2] != INF && vars->E_C[ij][cnt1][cnt2/2]!= INF){
if(((cnt1 + cnt3 + d1a) <= maxD1) && ((cnt2 + cnt4 + d1b) <= maxD2)){
vars->E_F5[j][cnt1+cnt3+d1a][(cnt2+cnt4+d1b)/2] = MIN2( vars->E_F5[j][cnt1+cnt3+d1a][(cnt2+cnt4+d1b)/2],
vars->E_F5[i-1][cnt3][cnt4/2] + vars->E_C[ij][cnt1][cnt2/2] + additional_en
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1a,
cnt2 + cnt4 + d1b,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
vars->N_F5[j][cnt1+cnt3+d1a][(cnt2+cnt4+d1b)/2] += vars->N_F5[i-1][cnt3][cnt4/2] * vars->N_C[ij][cnt1][cnt2/2];
#endif
}
/* collect all cases where d1a+cnt1+cnt3 or d1b+cnt2+cnt4 exceeds maxD1, maxD2, respectively */
else{
vars->E_F5_rem[j] = MIN2(vars->E_F5_rem[j],
vars->E_F5[i-1][cnt3][cnt4/2] + vars->E_C[ij][cnt1][cnt2/2] + additional_en
);
}
}
}
}
}
/* resize and move memory portions of energy matrix E_F5 */
adjustArrayBoundaries(&vars->E_F5[j],
&vars->k_min_values_f[j],
&vars->k_max_values_f[j],
&vars->l_min_values_f[j],
&vars->l_max_values_f[j],
min_k_real,
max_k_real,
min_l_real,
max_l_real
);
} /* end of j-loop */
if(compute_2Dfold_F3){
/* prepare first entries in E_F3 */
for(cnt1 = seq_length; cnt1 >= seq_length-TURN-1; cnt1--){
vars->E_F3[cnt1] = (int **)space(sizeof(int *));
vars->E_F3[cnt1][0] = (int *) space(sizeof(int));
vars->E_F3[cnt1][0][0] = 0;
vars->k_min_values_f3[cnt1] = vars->k_max_values_f3[cnt1] = 0;
vars->l_min_values_f3[cnt1] = (int *)space(sizeof(int));
vars->l_max_values_f3[cnt1] = (int *)space(sizeof(int));
vars->l_min_values_f3[cnt1][0] = vars->l_max_values_f3[cnt1][0] = 0;
}
/* begin calculations */
for (j=seq_length-TURN-2; j >= 1; j--){
unsigned int da = referenceBPs1[my_iindx[j]-seq_length] - referenceBPs1[my_iindx[j+1]-seq_length];
unsigned int db = referenceBPs2[my_iindx[j]-seq_length] - referenceBPs2[my_iindx[j+1]-seq_length];
type=ptype[my_iindx[j]-seq_length];
additional_en = 0;
if(type){
if(dangles == 2)
additional_en += E_ExtLoop(type, j > 1 ? S1[j-1] : -1, -1, P);
else
additional_en += E_ExtLoop(type, -1, -1, P);
}
/* make min and max k guess for memory allocation */
int min_k_guess, max_k_guess, min_l_guess, max_l_guess;
int *min_l_real, *max_l_real, min_k_real, max_k_real;
min_k_guess = min_l_guess = 0;
max_k_guess = referenceBPs1[my_iindx[j]-seq_length] + mm1[my_iindx[j]-seq_length];
max_l_guess = referenceBPs2[my_iindx[j]-seq_length] + mm2[my_iindx[j]-seq_length];
prepareBoundaries(min_k_guess,
max_k_guess,
min_l_guess,
max_l_guess,
bpdist[my_iindx[j]-seq_length],
&vars->k_min_values_f3[j],
&vars->k_max_values_f3[j],
&vars->l_min_values_f3[j],
&vars->l_max_values_f3[j]
);
preparePosteriorBoundaries( vars->k_max_values_f3[j] - vars->k_min_values_f3[j] + 1,
vars->k_min_values_f3[j],
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
prepareArray( &vars->E_F3[j],
vars->k_min_values_f3[j],
vars->k_max_values_f3[j],
vars->l_min_values_f3[j],
vars->l_max_values_f3[j]
);
/* begin the actual computation of 5' end energies */
/* j is unpaired ... */
for(cnt1 = vars->k_min_values_f3[j+1]; cnt1 <= vars->k_max_values_f3[j+1]; cnt1++){
for(cnt2 = vars->l_min_values_f3[j+1][cnt1]; cnt2 <= vars->l_max_values_f3[j+1][cnt1]; cnt2+=2){
vars->E_F3[j][cnt1+da][(cnt2+db)/2] = MIN2( vars->E_F3[j][cnt1+da][(cnt2+db)/2],
vars->E_F3[j+1][cnt1][cnt2/2]
);
updatePosteriorBoundaries(cnt1 + da,
cnt2 + db,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
}
/* j pairs with n */
if(vars->E_C[my_iindx[j]-seq_length])
for(cnt1 = vars->k_min_values[my_iindx[j]-seq_length]; cnt1 <= vars->k_max_values[my_iindx[j]-seq_length]; cnt1++)
for(cnt2 = vars->l_min_values[my_iindx[j]-seq_length][cnt1]; cnt2 <= vars->l_max_values[my_iindx[j]-seq_length][cnt1]; cnt2+=2){
if(vars->E_C[my_iindx[j]-seq_length][cnt1][cnt2/2] != INF){
vars->E_F3[j][cnt1][cnt2/2] = MIN2( vars->E_F3[j][cnt1][cnt2/2],
vars->E_C[my_iindx[j]-seq_length][cnt1][cnt2/2]+ additional_en
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
}
/* j pairs with some other nucleotide -> see below */
for (i=j-TURN-1; i>1; i--) {
ij = my_iindx[i]-j;
if(!vars->E_C[ij]) continue;
type = ptype[ij];
if (type) {
unsigned int d1a = referenceBPs1[my_iindx[1]-j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1]-i+1];
unsigned int d1b = referenceBPs2[my_iindx[1]-j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1]-i+1];
if(dangles == 2)
additional_en = E_ExtLoop(type, S1[i-1], j < seq_length ? S1[j+1] : -1, P);
else
additional_en = E_ExtLoop(type, -1, -1, P);
for(cnt1 = vars->k_min_values[ij]; cnt1 <= vars->k_max_values[ij]; cnt1++)
for(cnt2 = vars->l_min_values[ij][cnt1]; cnt2 <= vars->l_max_values[ij][cnt1]; cnt2+=2)
for(cnt3 = vars->k_min_values_f[i-1]; cnt3 <= vars->k_max_values_f[i-1]; cnt3++)
for(cnt4 = vars->l_min_values_f[i-1][cnt3]; cnt4 <= vars->l_max_values_f[i-1][cnt3]; cnt4+=2){
if(vars->E_F5[i-1][cnt3][cnt4/2] != INF && vars->E_C[ij][cnt1][cnt2/2]!= INF){
vars->E_F5[j][cnt1+cnt3+d1a][(cnt2+cnt4+d1b)/2] = MIN2( vars->E_F5[j][cnt1+cnt3+d1a][(cnt2+cnt4+d1b)/2],
vars->E_F5[i-1][cnt3][cnt4/2] + vars->E_C[ij][cnt1][cnt2/2] + additional_en
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1a,
cnt2 + cnt4 + d1b,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef COUNT_STATES
vars->N_F5[j][cnt1+cnt3+d1a][(cnt2+cnt4+d1b)/2] += vars->N_F5[i-1][cnt3][cnt4/2] * vars->N_C[ij][cnt1][cnt2/2];
#endif
}
}
}
}
/* resize and move memory portions of energy matrix E_F5 */
adjustArrayBoundaries(&vars->E_F5[j],
&vars->k_min_values_f[j],
&vars->k_max_values_f[j],
&vars->l_min_values_f[j],
&vars->l_max_values_f[j],
min_k_real,
max_k_real,
min_l_real,
max_l_real
);
} /* end of j-loop */
}
}
/*---------------------------------------------------------------------------*/
PUBLIC void update_TwoDfold_params(TwoDfold_vars *vars){
if(vars->P) free(vars->P);
vars->P = scale_parameters();
make_pair_matrix();
}
/*---------------------------------------------------------------------------*/
PRIVATE void make_ptypes(TwoDfold_vars *vars) {
int n,i,j,k,l;
n=vars->S[0];
for (k=1; k<n-TURN; k++)
for (l=1; l<=2; l++) {
int type,ntype=0,otype=0;
i=k; j = i+TURN+l; if (j>n) continue;
type = pair[vars->S[i]][vars->S[j]];
while ((i>=1)&&(j<=n)) {
if ((i>1)&&(j<n)) ntype = pair[vars->S[i-1]][vars->S[j+1]];
if (noLonelyPairs && (!otype) && (!ntype))
type = 0; /* i.j can only form isolated pairs */
vars->ptype[vars->my_iindx[i]-j] = (char) type;
otype = type;
type = ntype;
i--; j++;
}
}
}
PRIVATE void backtrack_f5(unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars){
int *my_iindx, energy, type, dangles, cnt1, cnt2, cnt3, cnt4;
int **l_min_values, **l_max_values,**l_min_values_f, **l_max_values_f;
int *k_min_values, *k_max_values,*k_min_values_f, *k_max_values_f;
int ***E_C, ***E_F5;
int *E_C_rem, *E_F5_rem;
unsigned int i, ij, seq_length, maxD1, maxD2;
short *S1;
unsigned int *referenceBPs1, *referenceBPs2;
char *ptype;
paramT *P;
unsigned int da, db;
P = vars->P;
seq_length = vars->seq_length;
S1 = vars->S1;
ptype = vars->ptype;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
dangles = vars->dangles;
E_F5 = vars->E_F5;
l_min_values_f = vars->l_min_values_f;
l_max_values_f = vars->l_max_values_f;
k_min_values_f = vars->k_min_values_f;
k_max_values_f = vars->k_max_values_f;
E_C = vars->E_C;
l_min_values = vars->l_min_values;
l_max_values = vars->l_max_values;
k_min_values = vars->k_min_values;
k_max_values = vars->k_max_values;
E_F5_rem = vars->E_F5_rem;
E_C_rem = vars->E_C_rem;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
da = referenceBPs1[my_iindx[1]-j] - referenceBPs1[my_iindx[1]-j+1];
db = referenceBPs2[my_iindx[1]-j] - referenceBPs2[my_iindx[1]-j+1];
if(j<TURN+2) return;
/* F5[j] == F5[j-1] ? */
if(k == -1){
if(E_F5_rem[j]==INF)
return;
else if(E_F5_rem[j] == E_F5_rem[j-1]){
backtrack_f5(j-1,k,l,structure, vars);
return;
}
else if(E_F5[j-1]){
for(cnt1 = k_min_values_f[j-1];
cnt1 <= k_max_values_f[j-1];
cnt1++){
for(cnt2 = l_min_values_f[j-1][cnt1];
cnt2 <= l_max_values_f[j-1][cnt1];
cnt2+=2){
if(((cnt1 + da) > maxD1) || ((cnt2 + db) > maxD2)){
if(E_F5_rem[j] == E_F5[j-1][cnt1][cnt2/2]){
backtrack_f5(j-1, cnt1, cnt2, structure, vars);
return;
}
}
}
}
}
}
else if((k >= da) && (l >= db)){
if(E_F5[j-1]){
if((k - da >= k_min_values_f[j-1]) && (k - da <= k_max_values_f[j-1])){
if((l - db >= l_min_values_f[j-1][k-da]) && (l - db <= l_max_values_f[j-1][k-da]))
if(E_F5[j-1][k-da][(l-db)/2] == E_F5[j][k][l/2]){
backtrack_f5(j-1, k-da, l-db, structure, vars);
return;
}
}
}
}
type = ptype[my_iindx[1]-j];
if(type){
if(dangles == 2)
energy = E_ExtLoop(type, -1, j < seq_length ? S1[j+1] : -1, P);
else
energy = E_ExtLoop(type, -1, -1, P);
if(k == -1){
if(E_C_rem[my_iindx[1]-j] + energy == E_F5_rem[j]){
backtrack_c(1, j, -1, -1, structure, vars);
return;
}
}
else if(k >= k_min_values[my_iindx[1]-j] && (k <= k_max_values[my_iindx[1]-j])){
if((l >= l_min_values[my_iindx[1]-j][k]) && (l <= l_max_values[my_iindx[1]-j][k]))
if(E_C[my_iindx[1]-j][k][l/2] + energy == E_F5[j][k][l/2]){
backtrack_c(1, j, k, l, structure, vars);
return;
}
}
}
for (i=j-TURN-1; i>1; i--) {
ij = my_iindx[i]-j;
type = ptype[ij];
if (type) {
unsigned int d1a = referenceBPs1[my_iindx[1]-j] - referenceBPs1[ij] - referenceBPs1[my_iindx[1]-i+1];
unsigned int d1b = referenceBPs2[my_iindx[1]-j] - referenceBPs2[ij] - referenceBPs2[my_iindx[1]-i+1];
if(dangles == 2)
energy = E_ExtLoop(type, S1[i-1], j < seq_length ? S1[j+1] : -1, P);
else
energy = E_ExtLoop(type, -1, -1, P);
if(k == -1){
if(E_C_rem[ij] != INF){
for(cnt1 = k_min_values_f[i-1];
cnt1 <= k_max_values_f[i-1];
cnt1++){
for(cnt2 = l_min_values_f[i-1][cnt1];
cnt2 <= l_max_values_f[i-1][cnt1];
cnt2+=2){
if(E_F5_rem[j] == (E_F5[i-1][cnt1][cnt2/2] + E_C_rem[ij] + energy)){
backtrack_f5(i-1, cnt1, cnt2, structure, vars);
backtrack_c(i,j,-1,-1,structure, vars);
return;
}
}
}
if(E_F5_rem[j] == (E_F5_rem[i-1] + E_C_rem[ij] + energy)){
backtrack_f5(i-1, -1, -1, structure, vars);
backtrack_c(i,j,-1,-1,structure,vars);
return;
}
}
if(E_F5_rem[i-1] != INF){
for(cnt1 = k_min_values[ij];
cnt1 <= k_max_values[ij];
cnt1++){
for(cnt2 = l_min_values[ij][cnt1];
cnt2 <= l_max_values[ij][cnt1];
cnt2 += 2){
if(E_F5_rem[j] == (E_F5_rem[i-1] + E_C[ij][cnt1][cnt2/2] + energy)){
backtrack_f5(i-1,-1,-1,structure,vars);
backtrack_c(i,j,cnt1,cnt2,structure,vars);
return;
}
}
}
}
for(cnt1 = k_min_values_f[i-1];
cnt1 <= k_max_values_f[i-1];
cnt1++)
for(cnt2 = l_min_values_f[i-1][cnt1];
cnt2 <= l_max_values_f[i-1][cnt1];
cnt2 += 2)
for(cnt3 = k_min_values[ij];
cnt3 <= k_max_values[ij];
cnt3++)
for(cnt4 = l_min_values[ij][cnt3];
cnt4 <= l_max_values[ij][cnt3];
cnt4 += 2){
if(((cnt1 + cnt3 + d1a)>maxD1) || ((cnt2+cnt4+d1b)>maxD2)){
if(E_F5_rem[j] == (E_F5[i-1][cnt1][cnt2/2] + E_C[ij][cnt3][cnt4/2] + energy)){
backtrack_f5(i-1,cnt1,cnt2,structure,vars);
backtrack_c(i,j,cnt3,cnt4,structure,vars);
return;
}
}
}
}
else if((k >= d1a) && (l >= d1b)){
int k_f_max = MIN2(k-d1a, k_max_values_f[i-1]);
for(cnt1 = k_min_values_f[i-1]; cnt1 <= k_f_max; cnt1++){
int l_f_max = MIN2(l - d1b, l_max_values_f[i-1][cnt1]);
for(cnt2 = l_min_values_f[i-1][cnt1]; cnt2 <= l_f_max; cnt2+=2){
int k_c = k - d1a - cnt1;
if((k_c >= k_min_values[ij]) && (k_c <= k_max_values[ij])){
int l_c = l - d1b - cnt2;
if((l_c >= l_min_values[ij][k_c]) && (l_c <= l_max_values[ij][k_c])){
if(E_F5[j][k][l/2] == (E_F5[i-1][cnt1][cnt2/2] + E_C[ij][k_c][l_c/2] + energy)){
backtrack_f5(i-1, cnt1, cnt2, structure, vars);
backtrack_c(i, j, k_c, l_c, structure, vars);
return;
}
}
}
}
}
}
}
}
nrerror("backtracking failed in f5");
}
PRIVATE void backtrack_c(unsigned int i, unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars){
unsigned int p, q, pq, ij, maxp, maxD1, maxD2;
int *my_iindx, type, type_2, energy, no_close, dangles, base_d1, base_d2, d1, d2, cnt1, cnt2, cnt3, cnt4;
int **l_min_values, **l_max_values,**l_min_values_m, **l_max_values_m,**l_min_values_m1, **l_max_values_m1;
int *k_min_values, *k_max_values,*k_min_values_m, *k_max_values_m,*k_min_values_m1, *k_max_values_m1;
int ***E_C, ***E_M, ***E_M1, *E_C_rem, *E_M_rem, *E_M1_rem;
short *S1;
unsigned int *referenceBPs1, *referenceBPs2;
char *ptype, *sequence;
paramT *P;
P = vars->P;
sequence = vars->sequence;
S1 = vars->S1;
ptype = vars->ptype;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
dangles = vars->dangles;
E_C = vars->E_C;
l_min_values = vars->l_min_values;
l_max_values = vars->l_max_values;
k_min_values = vars->k_min_values;
k_max_values = vars->k_max_values;
E_M = vars->E_M;
l_min_values_m = vars->l_min_values_m;
l_max_values_m = vars->l_max_values_m;
k_min_values_m = vars->k_min_values_m;
k_max_values_m = vars->k_max_values_m;
E_M1 = vars->E_M1;
l_min_values_m1 = vars->l_min_values_m1;
l_max_values_m1 = vars->l_max_values_m1;
k_min_values_m1 = vars->k_min_values_m1;
k_max_values_m1 = vars->k_max_values_m1;
E_C_rem = vars->E_C_rem;
E_M_rem = vars->E_M_rem;
E_M1_rem = vars->E_M1_rem;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
ij = my_iindx[i]-j;
int e = (k==-1) ? E_C_rem[ij] : E_C[ij][k][l/2];
type = ptype[ij];
no_close = (((type==3)||(type==4))&&no_closingGU);
structure[i-1] = '(';
structure[j-1] = ')';
base_d1 = ((unsigned int)vars->reference_pt1[i] != j) ? 1 : -1;
base_d2 = ((unsigned int)vars->reference_pt2[i] != j) ? 1 : -1;
base_d1 += referenceBPs1[ij];
base_d2 += referenceBPs2[ij];
if(k == -1){
if(((unsigned int)base_d1 > maxD1) || ((unsigned int)base_d2 > maxD2)){
if(e == E_Hairpin(j-i-1, type, S1[i+1], S1[j-1], sequence+i-1, P)) return;
}
}
else{
if((unsigned int)base_d1 == k)
if((unsigned int)base_d2 == l)
if(E_Hairpin(j-i-1, type, S1[i+1], S1[j-1], sequence+i-1, P) == e) return;
}
maxp = MIN2(j-2-TURN,i+MAXLOOP+1);
for(p = i+1; p <= maxp; p++){
unsigned int minq, ln_pre;
minq = p + TURN + 1;
ln_pre = j - i - 1;
if(ln_pre > minq + MAXLOOP) minq = ln_pre - MAXLOOP - 1;
for (q = minq; q < j; q++) {
pq = my_iindx[p]-q;
type_2 = ptype[pq];
if (type_2==0) continue;
type_2 = rtype[type_2];
/* d2 = dbp(S_{i,j}, S_{p.q} + {i,j}) */
d1 = base_d1 - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[pq];
energy = E_IntLoop(p-i-1, j-q-1, type, type_2, S1[i+1], S1[j-1], S1[p-1], S1[q+1], P);
if(k == -1){
if(E_C_rem[pq] != INF)
if(e == (E_C_rem[pq] + energy)){
backtrack_c(p,q,-1,-1,structure,vars);
return;
}
if(E_C[pq])
for(cnt1 = k_min_values[pq];
cnt1 <= k_max_values[pq];
cnt1++)
for(cnt2 = l_min_values[pq][cnt1];
cnt2 <= l_max_values[pq][cnt1];
cnt2 += 2){
if(((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)){
if(e == (E_C[pq][cnt1][cnt2/2] + energy)){
backtrack_c(p,q,cnt1,cnt2,structure,vars);
return;
}
}
}
}
else{
if(!E_C[pq]) continue;
if(d1 <= k && d2 <= l){
if((k-d1 >= k_min_values[pq]) && (k-d1) <= k_max_values[pq])
if((l - d2 >= l_min_values[pq][k-d1]) && (l-d2 <= l_max_values[pq][k-d1]))
if(E_C[pq][k-d1][(l-d2)/2] + energy == e){
backtrack_c(p, q, k-d1, l-d2, structure, vars);
return;
}
}
}
} /* end q-loop */
} /* end p-loop */
/* multi-loop decomposition ------------------------*/
if(!no_close){
unsigned int u;
int tt;
if(k==-1){
for(u=i+TURN+2; u<j-TURN-2;u++){
int i1u, u1j1;
i1u = my_iindx[i+1]-u;
u1j1 = my_iindx[u+1]-j+1;
tt = rtype[type];
energy = P->MLclosing;
if(dangles == 2)
energy += E_MLstem(tt, S1[j-1], S1[i+1], P);
else
energy += E_MLstem(tt, -1, -1, P);
if(E_M_rem[i1u] != INF){
if(E_M1[u1j1])
for(cnt1 = k_min_values_m1[u1j1];
cnt1 <= k_max_values_m1[u1j1];
cnt1++)
for(cnt2 = l_min_values_m1[u1j1][cnt1];
cnt2 <= l_max_values_m1[u1j1][cnt1];
cnt2 += 2){
if(e == (E_M_rem[i1u] + E_M1[u1j1][cnt1][cnt2/2] + energy)){
backtrack_m(i+1,u,-1,-1,structure,vars);
backtrack_m1(u+1,j-1,cnt1,cnt2,structure,vars);
return;
}
}
if(E_M1_rem[u1j1] != INF){
if(e == (E_M_rem[i1u] + E_M1_rem[u1j1] + energy)){
backtrack_m(i+1, u, -1, -1, structure, vars);
backtrack_m1(u+1, j-1, -1, -1, structure, vars);
return;
}
}
}
if(E_M1_rem[u1j1] != INF){
if(E_M[i1u])
for(cnt1 = k_min_values_m[i1u];
cnt1 <= k_max_values_m[i1u];
cnt1++)
for(cnt2 = l_min_values_m[i1u][cnt1];
cnt2 <= l_max_values_m[i1u][cnt1];
cnt2 += 2)
if(e == (E_M[i1u][cnt1][cnt2/2] + E_M1_rem[u1j1] + energy)){
backtrack_m(i+1,u,cnt1,cnt2,structure,vars);
backtrack_m1(u+1,j-1,-1,-1,structure,vars);
return;
}
}
/* now all cases where we exceed the maxD1/D2 scope by combination of E_M and E_M1 */
if(!E_M[i1u]) continue;
if(!E_M1[u1j1]) continue;
/* get distance to reference if closing this multiloop
* dist3 = dbp(S_{i,j}, {i,j} + S_{i+1.u} + S_{u+1,j-1})
*/
d1 = base_d1 - referenceBPs1[i1u] - referenceBPs1[u1j1];
d2 = base_d2 - referenceBPs2[i1u] - referenceBPs2[u1j1];
for(cnt1 = vars->k_min_values_m[i1u];
cnt1 <= vars->k_max_values_m[i1u];
cnt1++)
for(cnt2 = vars->l_min_values_m[i1u][cnt1];
cnt2 <= vars->l_max_values_m[i1u][cnt1];
cnt2+=2)
for(cnt3 = vars->k_min_values_m1[u1j1];
cnt3 <= vars->k_max_values_m1[u1j1];
cnt3++)
for(cnt4 = vars->l_min_values_m1[u1j1][cnt3];
cnt4 <= vars->l_max_values_m1[u1j1][cnt3];
cnt4+=2){
if(((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)){
if(e == (E_M[i1u][cnt1][cnt2/2] + E_M1[u1j1][cnt3][cnt4/2] + energy)){
backtrack_m(i+1,u,cnt1,cnt2,structure,vars);
backtrack_m1(u+1,j-1,cnt3,cnt4,structure,vars);
return;
}
}
}
}
}
else{
for(u=i+TURN+2; u<j-TURN-2;u++){
int i1u, u1j1;
i1u = my_iindx[i+1]-u;
u1j1 = my_iindx[u+1]-j+1;
if(!E_M[i1u]) continue;
if(!E_M1[u1j1]) continue;
/* get distance to reference if closing this multiloop
* dist3 = dbp(S_{i,j}, {i,j} + S_{i+1.u} + S_{u+1,j-1})
*/
d1 = base_d1 - referenceBPs1[i1u] - referenceBPs1[u1j1];
d2 = base_d2 - referenceBPs2[i1u] - referenceBPs2[u1j1];
tt = rtype[type];
energy = P->MLclosing;
if(dangles == 2)
energy += E_MLstem(tt, S1[j-1], S1[i+1], P);
else
energy += E_MLstem(tt, -1, -1, P);
if((d1 <= k) && (d2 <= l))
for(cnt1 = k_min_values_m[i1u];
cnt1 <= MIN2(k-d1, k_max_values_m[i1u]);
cnt1++)
for(cnt2 = l_min_values_m[i1u][cnt1];
cnt2 <= MIN2(l-d2, l_max_values_m[i1u][cnt1]);
cnt2+=2)
if( ((k-d1-cnt1) >= k_min_values_m1[u1j1])
&& ((k-d1-cnt1) <= k_max_values_m1[u1j1]))
if( ((l-d2-cnt2) >= l_min_values_m1[u1j1][k-d1-cnt1])
&& ((l-d2-cnt2) <= l_max_values_m1[u1j1][k-d1-cnt1]))
if(e == (energy + E_M[i1u][cnt1][cnt2/2] + E_M1[u1j1][k-d1-cnt1][(l-d2-cnt2)/2])){
backtrack_m(i+1, u, cnt1, cnt2, structure, vars);
backtrack_m1(u+1, j-1, k-d1-cnt1, l-d2-cnt2, structure, vars);
return;
}
}
}
}
nrerror("backtracking failed in c");
}
PRIVATE void backtrack_m(unsigned int i, unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars){
unsigned int u, ij, seq_length, base_d1, base_d2, d1, d2, maxD1, maxD2;
int *my_iindx, type, energy, dangles,circ, cnt1, cnt2, cnt3, cnt4;
int **l_min_values, **l_max_values,**l_min_values_m, **l_max_values_m;
int *k_min_values, *k_max_values,*k_min_values_m, *k_max_values_m;
int ***E_C, ***E_M, *E_C_rem, *E_M_rem;
short *S1;
unsigned int *referenceBPs1, *referenceBPs2;
char *ptype;
paramT *P;
P = vars->P;
seq_length = vars->seq_length;
S1 = vars->S1;
circ = vars->circ;
ptype = vars->ptype;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
dangles = vars->dangles;
E_C = vars->E_C;
l_min_values = vars->l_min_values;
l_max_values = vars->l_max_values;
k_min_values = vars->k_min_values;
k_max_values = vars->k_max_values;
E_M = vars->E_M;
l_min_values_m = vars->l_min_values_m;
l_max_values_m = vars->l_max_values_m;
k_min_values_m = vars->k_min_values_m;
k_max_values_m = vars->k_max_values_m;
E_C_rem = vars->E_C_rem;
E_M_rem = vars->E_M_rem;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
ij = my_iindx[i]-j;
int e = (k == -1) ? E_M_rem[ij] : E_M[ij][k][l/2];
base_d1 = referenceBPs1[ij];
base_d2 = referenceBPs2[ij];
if(k == -1){
/* new_fML = ML(i+1,j)+c */
d1 = base_d1 - referenceBPs1[my_iindx[i+1]-j];
d2 = base_d2 - referenceBPs2[my_iindx[i+1]-j];
if(E_M_rem[my_iindx[i+1]-j] != INF){
if(e == (E_M_rem[my_iindx[i+1]-j] + P->MLbase)){
backtrack_m(i+1,j,-1,-1,structure,vars);
return;
}
}
if(E_M[my_iindx[i+1]-j])
for(cnt1 = k_min_values_m[my_iindx[i+1]-j];
cnt1 <= k_max_values_m[my_iindx[i+1]-j];
cnt1++)
for(cnt2 = l_min_values_m[my_iindx[i+1]-j][cnt1];
cnt2 <= l_max_values_m[my_iindx[i+1]-j][cnt1];
cnt2 += 2)
if(((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)){
if(e == (E_M[my_iindx[i+1]-j][cnt1][cnt2/2] + P->MLbase)){
backtrack_m(i+1,j,cnt1,cnt2,structure,vars);
return;
}
}
/* new_fML = min(ML(i,j-1) + c, new_fML) */
d1 = base_d1 - referenceBPs1[ij+1];
d2 = base_d2 - referenceBPs2[ij+1];
if(E_M_rem[ij+1] != INF){
if(e == (E_M_rem[ij+1] + P->MLbase)){
backtrack_m(i,j-1,-1,-1,structure,vars);
return;
}
}
if(E_M[ij+1])
for(cnt1 = k_min_values_m[ij+1];
cnt1 <= k_max_values_m[ij+1];
cnt1++)
for(cnt2 = l_min_values_m[ij+1][cnt1];
cnt2 <= l_max_values_m[ij+1][cnt1];
cnt2 += 2)
if(((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)){
if(e == (E_M[ij+1][cnt1][cnt2/2] + P->MLbase)){
backtrack_m(i,j-1,cnt1,cnt2,structure,vars);
return;
}
}
/* new_fML = min(new_fML, C(i,j)+b) */
if(E_C_rem[ij] != INF){
type = ptype[ij];
if(dangles == 2)
energy = E_MLstem(type, ((i > 1) || circ) ? S1[i-1] : -1, ((j < seq_length) || circ) ? S1[j+1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if(e == (E_C_rem[ij] + energy)){
backtrack_c(i,j,-1,-1,structure,vars);
return;
}
}
/* modular decomposition -------------------------------*/
for(u = i+1+TURN; u <= j-2-TURN; u++){
int iu, uj;
iu = my_iindx[i]-u;
uj = my_iindx[u+1]-j;
type = ptype[uj];
d1 = base_d1 - referenceBPs1[iu] - referenceBPs1[uj];
d2 = base_d2 - referenceBPs2[iu] - referenceBPs2[uj];
if(dangles == 2)
energy = E_MLstem(type, S1[u], (j < seq_length) || circ ? S1[j+1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if(E_M_rem[iu] != INF){
if(E_C[uj])
for(cnt1 = k_min_values[uj];
cnt1 <= k_max_values[uj];
cnt1++)
for(cnt2 = l_min_values[uj][cnt1];
cnt2 <= l_max_values[uj][cnt1];
cnt2 += 2)
if(e == (E_M_rem[iu] + E_C[uj][cnt1][cnt2/2] + energy)){
backtrack_m(i,u,-1,-1,structure,vars);
backtrack_c(u+1,j,cnt1,cnt2,structure, vars);
return;
}
if(E_C_rem[uj] != INF){
if(e == (E_M_rem[iu] + E_C_rem[uj] + energy)){
backtrack_m(i,u,-1,-1,structure,vars);
backtrack_c(u+1,j,-1,-1,structure,vars);
return;
}
}
}
if(E_C_rem[uj] != INF){
if(E_M[iu])
for(cnt1 = k_min_values_m[iu];
cnt1 <= k_max_values_m[iu];
cnt1++)
for(cnt2 = l_min_values_m[iu][cnt1];
cnt2 <= l_max_values_m[iu][cnt1];
cnt2 += 2)
if(e == (E_M[iu][cnt1][cnt2/2] + E_C_rem[uj] + energy)){
backtrack_m(i,u,cnt1,cnt2,structure,vars);
backtrack_c(u+1,j,-1,-1,structure,vars);
return;
}
}
if(!E_M[iu]) continue;
if(!E_C[uj]) continue;
for(cnt1 = k_min_values_m[iu];
cnt1 <= k_max_values_m[iu];
cnt1++)
for(cnt2 = l_min_values_m[iu][cnt1];
cnt2 <= l_max_values_m[iu][cnt1];
cnt2 += 2)
for(cnt3 = k_min_values[uj];
cnt3 <= k_max_values[uj];
cnt3++){
for(cnt4 = l_min_values[uj][cnt3];
cnt4 <= l_max_values[uj][cnt3];
cnt4 += 2)
if(((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2))
if(e == (E_M[iu][cnt1][cnt2/2] + E_C[uj][cnt3][cnt4/2] + energy)){
backtrack_m(i, u, cnt1, cnt2, structure, vars);
backtrack_c(u+1, j, cnt3, cnt4, structure, vars);
return;
}
}
}
} /* end if (k == -1) */
else{
d1 = base_d1 - referenceBPs1[my_iindx[i+1]-j];
d2 = base_d2 - referenceBPs2[my_iindx[i+1]-j];
/* new_fML = ML(i+1,j)+c */
if(d1 <= k && d2 <= l)
if((k-d1 >= k_min_values_m[my_iindx[i+1]-j]) && (k-d1 <= k_max_values_m[my_iindx[i+1]-j]))
if((l-d2 >= l_min_values_m[my_iindx[i+1]-j][k-d1]) && (l-d2 <= l_max_values_m[my_iindx[i+1]-j][k-d1])){
if(E_M[my_iindx[i+1]-j][k-d1][(l-d2)/2] + P->MLbase == e){
backtrack_m(i+1, j, k-d1, l-d2, structure, vars);
return;
}
}
d1 = base_d1 - referenceBPs1[ij+1];
d2 = base_d2 - referenceBPs2[ij+1];
/* new_fML = min(ML(i,j-1) + c, new_fML) */
if(E_M[ij+1])
if(d1 <= k && d2 <= l)
if((k-d1 >= k_min_values_m[ij+1]) && (k-d1 <= k_max_values_m[ij+1]))
if((l-d2 >= l_min_values_m[ij+1][k-d1]) && (l-d2 <= l_max_values_m[ij+1][k-d1]))
if(E_M[ij+1][k-d1][(l-d2)/2] + P->MLbase == e){
backtrack_m(i, j-1, k-d1, l-d2, structure, vars);
return;
}
/* new_fML = min(new_fML, C(i,j)+b) */
if(E_C[ij]){
type = ptype[ij];
if(dangles == 2)
energy = E_MLstem(type, ((i > 1) || circ) ? S1[i-1] : -1, ((j < seq_length) || circ) ? S1[j+1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if((k >= k_min_values[ij]) && (k <= k_max_values[ij]))
if((l >= l_min_values[ij][k]) && (l <= l_max_values[ij][k])){
if(E_C[ij][k][l/2] + energy == e){
backtrack_c(i, j, k, l, structure, vars);
return;
}
}
}
/* modular decomposition -------------------------------*/
for(u = i+1+TURN; u <= j-2-TURN; u++){
if(!E_M[my_iindx[i]-u]) continue;
if(!E_C[my_iindx[u+1]-j]) continue;
type = ptype[my_iindx[u+1]-j];
d1 = base_d1 - referenceBPs1[my_iindx[i]-u] - referenceBPs1[my_iindx[u+1]-j];
d2 = base_d2 - referenceBPs2[my_iindx[i]-u] - referenceBPs2[my_iindx[u+1]-j];
if(dangles == 2)
energy = E_MLstem(type, S1[u], ((j < seq_length) || circ) ? S1[j+1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if(d1 <= k && d2 <= l)
for(cnt1 = k_min_values_m[my_iindx[i]-u]; cnt1 <= MIN2(k-d1, k_max_values_m[my_iindx[i]-u]); cnt1++)
for(cnt2 = l_min_values_m[my_iindx[i]-u][cnt1]; cnt2 <= MIN2(l-d2, l_max_values_m[my_iindx[i]-u][cnt1]); cnt2+=2)
if((k-d1-cnt1 >= k_min_values[my_iindx[u+1]-j]) && (k-d1-cnt1 <= k_max_values[my_iindx[u+1]-j]))
if((l-d2-cnt2 >= l_min_values[my_iindx[u+1]-j][k-d1-cnt1]) && (l-d2-cnt2 <= l_max_values[my_iindx[u+1]-j][k-d1-cnt1]))
if(E_M[my_iindx[i]-u][cnt1][cnt2/2] + E_C[my_iindx[u+1]-j][k-d1-cnt1][(l-d2-cnt2)/2] + energy == e){
backtrack_m(i, u, cnt1, cnt2, structure, vars);
backtrack_c(u+1, j, k-d1-cnt1, l-d2-cnt2, structure, vars);
return;
}
}
}
nrerror("backtracking failed in fML\n");
}
PRIVATE void backtrack_m1(unsigned int i, unsigned int j, int k, int l, char *structure, TwoDfold_vars *vars){
unsigned int ij, seq_length, d1, d2, *referenceBPs1, *referenceBPs2, maxD1, maxD2;
int *my_iindx, **l_min_values, **l_max_values,**l_min_values_m1, **l_max_values_m1;
int *k_min_values, *k_max_values,*k_min_values_m1, *k_max_values_m1, cnt1, cnt2;
int ***E_C, ***E_M1, *E_C_rem, *E_M1_rem, type, dangles, circ, energy, e_m1;
short *S1;
char *ptype;
paramT *P;
P = vars->P;
seq_length = vars->seq_length;
S1 = vars->S1;
ptype = vars->ptype;
circ = vars->circ;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
dangles = vars->dangles;
E_C = vars->E_C;
l_min_values = vars->l_min_values;
l_max_values = vars->l_max_values;
k_min_values = vars->k_min_values;
k_max_values = vars->k_max_values;
E_M1 = vars->E_M1;
l_min_values_m1 = vars->l_min_values_m1;
l_max_values_m1 = vars->l_max_values_m1;
k_min_values_m1 = vars->k_min_values_m1;
k_max_values_m1 = vars->k_max_values_m1;
E_C_rem = vars->E_C_rem;
E_M1_rem = vars->E_M1_rem;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
ij = my_iindx[i]-j;
e_m1 = (k == -1) ? E_M1_rem[ij] : E_M1[ij][k][l/2];
type = ptype[ij];
d1 = referenceBPs1[ij] - referenceBPs1[ij+1];
d2 = referenceBPs2[ij] - referenceBPs2[ij+1];
if(dangles == 2)
energy = E_MLstem(type, (i > 1) || circ ? S1[i-1] : -1, (j < seq_length) || circ ? S1[j+1] : -1, P);
else
energy = E_MLstem(type, -1, -1, P);
if(k == -1){
if(E_C_rem[ij] != INF){
if(e_m1 == (E_C_rem[ij] + energy)){
backtrack_c(i,j,-1,-1,structure,vars);
return;
}
}
if(E_M1_rem[ij+1] != INF){
if(e_m1 == (E_M1_rem[ij+1] + P->MLbase)){
backtrack_m1(i,j-1,-1,-1,structure,vars);
return;
}
}
for(cnt1 = k_min_values_m1[ij+1];
cnt1 <= k_max_values_m1[ij+1];
cnt1++)
for(cnt2 = l_min_values_m1[ij+1][cnt1];
cnt2 <= l_max_values_m1[ij+1][cnt1];
cnt2 += 2)
if(((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2)){
if(e_m1 == (E_M1[ij+1][cnt1][cnt2/2] + P->MLbase)){
backtrack_m1(i,j-1,cnt1,cnt2,structure,vars);
return;
}
}
}
else{
if(E_C[ij])
if((k >= k_min_values[ij]) && (k <= k_max_values[ij]))
if((l >= l_min_values[ij][k]) && (l <= l_max_values[ij][k]))
if(E_C[ij][k][l/2] + energy == e_m1){
backtrack_c(i, j, k, l, structure, vars);
return;
}
if(d1 <= k && d2 <= l)
if((k-d1 >= k_min_values_m1[ij+1]) && (k-d1 <= k_max_values_m1[ij+1]))
if((l-d2 >= l_min_values_m1[ij+1][k-d1]) && (l-d2 <= l_max_values_m1[ij+1][k-d1]))
if(E_M1[ij+1][k-d1][(l-d2)/2] + P->MLbase == e_m1){
backtrack_m1(i, j-1, k-d1, l-d2, structure, vars);
return;
}
}
nrerror("backtack failed in m1\n");
}
PRIVATE void backtrack_fc(int k, int l, char *structure, TwoDfold_vars *vars){
unsigned int d, i, j, seq_length, base_d1, base_d2, d1, d2, maxD1, maxD2;
int *my_iindx, energy, cnt1, cnt2, cnt3, cnt4;
short *S1;
unsigned int *referenceBPs1, *referenceBPs2;
char *sequence, *ptype;
int **E_Fc, **E_FcH, **E_FcI, **E_FcM, ***E_C, ***E_M, ***E_M2;
int *E_C_rem, *E_M_rem, *E_M2_rem, E_Fc_rem, E_FcH_rem, E_FcI_rem, E_FcM_rem;
int **l_min_values, **l_max_values, *k_min_values, *k_max_values;
int **l_min_values_m, **l_max_values_m, *k_min_values_m, *k_max_values_m;
int **l_min_values_m2, **l_max_values_m2, *k_min_values_m2, *k_max_values_m2;
int *l_min_values_fcH, *l_max_values_fcH, k_min_values_fcH, k_max_values_fcH;
int *l_min_values_fcI, *l_max_values_fcI, k_min_values_fcI, k_max_values_fcI;
int *l_min_values_fcM, *l_max_values_fcM, k_min_values_fcM, k_max_values_fcM;
paramT *P;
P = vars->P;
sequence = vars->sequence;
seq_length = vars->seq_length;
S1 = vars->S1;
ptype = vars->ptype;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
base_d1 = referenceBPs1[my_iindx[1]-seq_length];
base_d2 = referenceBPs2[my_iindx[1]-seq_length];
E_C = vars->E_C;
l_min_values = vars->l_min_values;
l_max_values = vars->l_max_values;
k_min_values = vars->k_min_values;
k_max_values = vars->k_max_values;
E_M = vars->E_M;
l_min_values_m = vars->l_min_values_m;
l_max_values_m = vars->l_max_values_m;
k_min_values_m = vars->k_min_values_m;
k_max_values_m = vars->k_max_values_m;
E_M2 = vars->E_M2;
l_min_values_m2 = vars->l_min_values_m2;
l_max_values_m2 = vars->l_max_values_m2;
k_min_values_m2 = vars->k_min_values_m2;
k_max_values_m2 = vars->k_max_values_m2;
E_Fc = vars->E_Fc;
E_FcI = vars->E_FcI;
l_min_values_fcI = vars->l_min_values_fcI;
l_max_values_fcI = vars->l_max_values_fcI;
k_min_values_fcI = vars->k_min_values_fcI;
k_max_values_fcI = vars->k_max_values_fcI;
E_FcH = vars->E_FcH;
l_min_values_fcH = vars->l_min_values_fcH;
l_max_values_fcH = vars->l_max_values_fcH;
k_min_values_fcH = vars->k_min_values_fcH;
k_max_values_fcH = vars->k_max_values_fcH;
E_FcM = vars->E_FcM;
l_min_values_fcM = vars->l_min_values_fcM;
l_max_values_fcM = vars->l_max_values_fcM;
k_min_values_fcM = vars->k_min_values_fcM;
k_max_values_fcM = vars->k_max_values_fcM;
E_C_rem = vars->E_C_rem;
E_M_rem = vars->E_M_rem;
E_M2_rem = vars->E_M2_rem;
E_Fc_rem = vars->E_Fc_rem;
E_FcH_rem = vars->E_FcH_rem;
E_FcI_rem = vars->E_FcI_rem;
E_FcM_rem = vars->E_FcM_rem;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
if(k==-1){
/* check if mfe might be open chain */
if(E_Fc_rem == 0)
if((referenceBPs1[my_iindx[1]-seq_length] > maxD1) || (referenceBPs2[my_iindx[1]-seq_length] > maxD2))
return;
/* check for hairpin configurations */
if(E_Fc_rem == E_FcH_rem){
for (d = TURN+2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij;
int type, no_close;
char loopseq[10];
i = j-d+1;
ij = my_iindx[i]-j;
u = seq_length-j + i-1;
if (u<TURN) continue;
type = ptype[ij];
no_close = (((type==3)||(type==4))&&no_closingGU);
type=rtype[type];
if (!type) continue;
if(no_close) continue;
d1 = base_d1 - referenceBPs1[ij];
d2 = base_d2 - referenceBPs2[ij];
if (u<7) {
strcpy(loopseq , sequence+j-1);
strncat(loopseq, sequence, i);
}
energy = E_Hairpin(u, type, S1[j+1], S1[i-1], loopseq, P);
if(E_C_rem[ij] != INF){
if(E_Fc_rem == (E_C_rem[ij] + energy)){
backtrack_c(i,j,-1,-1,structure,vars);
return;
}
}
if(E_C[ij])
for(cnt1 = k_min_values[ij];
cnt1 <= k_max_values[ij];
cnt1++)
for(cnt2 = l_min_values[ij][cnt1];
cnt2 <= l_max_values[ij][cnt1];
cnt2 += 2)
if(((cnt1 + d1) > maxD1) || ((cnt2 + d2) > maxD2))
if(E_Fc_rem == (E_C[ij][cnt1][cnt2/2] + energy)){
backtrack_c(i,j,cnt1,cnt2,structure,vars);
return;
}
}
}
/* check for interior loop configurations */
if(E_Fc_rem == E_FcI_rem){
for (d = TURN+2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij, p, q, pq;
int type, type_2;
i = j-d+1;
ij = my_iindx[i]-j;
u = seq_length-j + i-1;
if (u<TURN) continue;
type = rtype[(unsigned int)ptype[ij]];
if (!type) continue;
for(p = j+1; p < seq_length ; p++){
unsigned int u1, qmin, ln_pre;
u1 = p-j-1;
if (u1+i-1>MAXLOOP) break;
qmin = p + TURN + 1;
ln_pre = u1 + i + seq_length;
if(ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1;
for(q = qmin; q <= seq_length; q++){
unsigned int u2;
pq = my_iindx[p]-q;
type_2 = rtype[(unsigned int)ptype[pq]];
if (type_2==0) continue;
u2 = i-1 + seq_length-q;
if (u1+u2>MAXLOOP) continue;
energy = E_IntLoop(u1, u2, type, type_2, S1[j+1], S1[i-1], S1[p-1], S1[q+1], P);
if(E_C_rem[ij] != INF){
if(E_C[pq])
for(cnt1 = k_min_values[pq];
cnt1 <= k_max_values[pq];
cnt1++)
for(cnt2 = l_min_values[pq][cnt1];
cnt2 <= l_max_values[pq][cnt1];
cnt2 += 2)
if(E_Fc_rem == (E_C_rem[ij] + E_C[pq][cnt1][cnt2/2] + energy)){
backtrack_c(i,j,-1,-1,structure,vars);
backtrack_c(p,q,cnt1,cnt2,structure,vars);
return;
}
if(E_C_rem[pq] != INF){
if(E_Fc_rem == (E_C_rem[ij] + E_C_rem[pq] + energy)){
backtrack_c(i,j,-1,-1,structure,vars);
backtrack_c(p,q,-1,-1,structure,vars);
return;
}
}
}
if(E_C_rem[pq] != INF){
if(E_C[ij])
for(cnt1 = k_min_values[ij];
cnt1 <= k_max_values[ij];
cnt1++)
for(cnt2 = l_min_values[ij][cnt1];
cnt2 <= l_max_values[ij][cnt1];
cnt2 += 2)
if(E_Fc_rem == (E_C[ij][cnt1][cnt2/2] + E_C_rem[pq] + energy)){
backtrack_c(i,j,cnt1,cnt2,structure,vars);
backtrack_c(p,q,-1,-1,structure,vars);
return;
}
}
if(!(E_C[ij])) continue;
if(!(E_C[pq])) continue;
/* get distance to reference if closing the interior loop
* d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j})
* d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j})
*/
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq];
for(cnt1 = k_min_values[ij];
cnt1 <= k_max_values[ij];
cnt1++)
for(cnt2 = l_min_values[ij][cnt1];
cnt2 <= l_max_values[ij][cnt1];
cnt2 += 2)
for(cnt3 = k_min_values[pq];
cnt3 <= k_max_values[pq];
cnt3++)
for(cnt4 = l_min_values[pq][cnt3];
cnt4 <= l_max_values[pq][cnt3];
cnt4 += 2)
if(((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2))
if(E_Fc_rem == (E_C[ij][cnt1][cnt2/2] + E_C[pq][cnt3][cnt4/2] + energy)){
backtrack_c(i, j, cnt1, cnt2, structure, vars);
backtrack_c(p, q, cnt3, cnt4, structure, vars);
return;
}
} /* end for p */
} /* end for q */
}
}
/* check for multi loop configurations */
if(E_Fc_rem == E_FcM_rem){
if(seq_length > 2*TURN)
for (i=TURN+1; i<seq_length-2*TURN; i++) {
/* get distancies to references
* d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n})
* d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n})
*/
if(E_M_rem[my_iindx[1]-i] != INF){
if(E_M2[i+1])
for(cnt1 = k_min_values_m2[i+1];
cnt1 <= k_max_values_m2[i+1];
cnt1++)
for(cnt2 = l_min_values_m2[i+1][cnt1];
cnt2 <= l_max_values_m2[i+1][cnt1];
cnt2 += 2)
if(E_Fc_rem == (E_M_rem[my_iindx[1]-i] + E_M2[i+1][cnt1][cnt2/2] + P->MLclosing)){
backtrack_m(1,i,-1,-1,structure,vars);
backtrack_m2(i+1,cnt1,cnt2,structure,vars);
return;
}
if(E_M2_rem[i+1] != INF){
if(E_Fc_rem == (E_M_rem[my_iindx[1]-i] + E_M2_rem[i+1] + P->MLclosing)){
backtrack_m(1,i,-1,-1,structure,vars);
backtrack_m2(i+1,-1,-1,structure,vars);
return;
}
}
}
if(E_M2_rem[i+1] != INF){
if(E_M[my_iindx[1]-i])
for(cnt1 = k_min_values_m[my_iindx[1]-i];
cnt1 <= k_max_values_m[my_iindx[1]-i];
cnt1++)
for(cnt2 = l_min_values_m[my_iindx[1]-i][cnt1];
cnt2 <= l_max_values_m[my_iindx[1]-i][cnt1];
cnt2 += 2)
if(E_Fc_rem == (E_M[my_iindx[1]-i][cnt1][cnt2/2] + E_M2_rem[i+1] + P->MLclosing)){
backtrack_m(1,i,cnt1,cnt2,structure,vars);
backtrack_m2(i+1,-1,-1,structure,vars);
return;
}
}
if(!(E_M[my_iindx[1]-i])) continue;
if(!(E_M2[i+1])) continue;
d1 = base_d1 - referenceBPs1[my_iindx[1]-i] - referenceBPs1[my_iindx[i+1]-seq_length];
d2 = base_d2 - referenceBPs2[my_iindx[1]-i] - referenceBPs2[my_iindx[i+1]-seq_length];
for(cnt1 = k_min_values_m[my_iindx[1]-i];
cnt1 <= k_max_values_m[my_iindx[1]-i];
cnt1++)
for(cnt2 = l_min_values_m[my_iindx[1]-i][cnt1];
cnt2 <= l_max_values_m[my_iindx[1]-i][cnt1];
cnt2 += 2)
for(cnt3 = k_min_values_m2[i+1];
cnt3 <= k_max_values_m2[i+1];
cnt3++)
for(cnt4 = l_min_values_m2[i+1][cnt3];
cnt4 <= l_max_values_m2[i+1][cnt3];
cnt4 += 2)
if(((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)){
if(E_Fc_rem == (E_M[my_iindx[1]-i][cnt1][cnt2/2] + E_M2[i+1][cnt3][cnt4/2] + P->MLclosing)){
backtrack_m(1, i, cnt1, cnt2, structure, vars);
backtrack_m2(i+1, cnt3, cnt4, structure, vars);
return;
}
}
}
}
}
else{
/* open chain ? */
if(E_Fc[k][l/2] == 0)
if((k == referenceBPs1[my_iindx[1]-seq_length]) && (l == referenceBPs2[my_iindx[1]-seq_length])){
return;
}
if((k >= k_min_values_fcH) && (k <= k_max_values_fcH)){
if((l >= l_min_values_fcH[k]) && (l <= l_max_values_fcH[k]))
if(E_Fc[k][l/2] == E_FcH[k][l/2]){
for (d = TURN+2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij;
int type, no_close;
char loopseq[10];
i = j-d+1;
ij = my_iindx[i]-j;
if (!E_C[ij]) continue;
u = seq_length-j + i-1;
if (u<TURN) continue;
type = ptype[ij];
no_close = (((type==3)||(type==4))&&no_closingGU);
type=rtype[type];
if (!type) continue;
if(no_close) continue;
d1 = base_d1 - referenceBPs1[ij];
d2 = base_d2 - referenceBPs2[ij];
if (u<7) {
strcpy(loopseq , sequence+j-1);
strncat(loopseq, sequence, i);
}
energy = E_Hairpin(u, type, S1[j+1], S1[i-1], loopseq, P);
if((k >= d1) && (l >= d2))
if((k-d1 >= k_min_values[ij]) && (k-d1 <= k_max_values[ij]))
if((l-d2 >= l_min_values[ij][k-d1]) && (l-d2 <= l_max_values[ij][k-d1])){
if(E_Fc[k][l/2] == E_C[ij][k-d1][(l-d2)/2] + energy){
backtrack_c(i, j, k-d1, l-d2, structure, vars);
return;
}
}
}
}
}
if((k >= k_min_values_fcI) && (k <= k_max_values_fcI)){
if((l >= l_min_values_fcI[k]) && (l <= l_max_values_fcI[k]))
if(E_Fc[k][l/2] == E_FcI[k][l/2]){
for (d = TURN+2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij, p, q, pq;
int type, type_2;
i = j-d+1;
ij = my_iindx[i]-j;
if(!E_C[ij]) continue;
u = seq_length-j + i-1;
if (u<TURN) continue;
type = ptype[ij];
type=rtype[type];
if (!type) continue;
for(p = j+1; p < seq_length ; p++){
unsigned int u1, qmin, ln_pre;
u1 = p-j-1;
if (u1+i-1>MAXLOOP) break;
qmin = p + TURN + 1;
ln_pre = u1 + i + seq_length;
if(ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1;
for(q = qmin; q <= seq_length; q++){
unsigned int u2;
pq = my_iindx[p]-q;
if(!E_C[pq]) continue;
type_2 = rtype[(unsigned int)ptype[pq]];
if (type_2==0) continue;
u2 = i-1 + seq_length-q;
if (u1+u2>MAXLOOP) continue;
/* get distance to reference if closing the interior loop
* d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j})
* d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j})
*/
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq];
energy = E_IntLoop(u1, u2, type, type_2, S1[j+1], S1[i-1], S1[p-1], S1[q+1], P);
if((k >= d1) && (l >= d2))
for(cnt1 = k_min_values[ij]; cnt1 <= MIN2(k_max_values[ij], k - d1); cnt1++)
for(cnt2 = l_min_values[ij][cnt1]; cnt2 <= MIN2(l_max_values[ij][cnt1], l - d2); cnt2+=2)
if((k - d1 - cnt1 >= k_min_values[pq]) && (k - d1 - cnt1 <= k_max_values[pq]))
if((l - d2 - cnt2 >= l_min_values[pq][k-d1-cnt1]) && (l - d2 - cnt2 <= l_max_values[pq][k-d1-cnt1])){
if((E_C[ij][cnt1][cnt2/2] + E_C[pq][k-d1-cnt1][(l-d2-cnt2)/2] + energy) == E_Fc[k][l/2]){
backtrack_c(i, j, cnt1, cnt2, structure, vars);
backtrack_c(p, q, k - d1 - cnt1, l - d2 - cnt2, structure, vars);
return;
}
}
}
}
}
}
}
if((k >= k_min_values_fcM) && (k <= k_max_values_fcM)){
if((l >= l_min_values_fcM[k]) && (l <= l_max_values_fcM[k]))
if(E_Fc[k][l/2] == E_FcM[k][l/2]){
if(seq_length > 2*TURN)
for (i=TURN+1; i<seq_length-2*TURN; i++) {
/* get distancies to references
* d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n})
* d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n})
*/
if(!E_M[my_iindx[1]-i]) continue;
if(!E_M2[i+1]) continue;
d1 = base_d1 - referenceBPs1[my_iindx[1]-i] - referenceBPs1[my_iindx[i+1]-seq_length];
d2 = base_d2 - referenceBPs2[my_iindx[1]-i] - referenceBPs2[my_iindx[i+1]-seq_length];
if((k >= d1) && (l >= d2))
for(cnt1 = k_min_values_m[my_iindx[1]-i]; cnt1 <= MIN2(k_max_values_m[my_iindx[1]-i], k-d1); cnt1++)
for(cnt2 = l_min_values_m[my_iindx[1]-i][cnt1]; cnt2 <= MIN2(l_max_values_m[my_iindx[1]-i][cnt1], l-d2); cnt2+=2)
if((k - d1 - cnt1 >= k_min_values_m2[i+1]) && (k - d1 - cnt1 <= k_max_values_m2[i+1]))
if((l - d2 - cnt2 >= l_min_values_m2[i+1][k-d1-cnt1]) && (l - d2 - cnt2 <= l_max_values_m2[i+1][k-d1-cnt1]))
if((E_M[my_iindx[1]-i][cnt1][cnt2/2] + E_M2[i+1][k-d1-cnt1][(l-d2-cnt2)/2] + P->MLclosing) == E_FcM[k][l/2]){
backtrack_m(1, i, cnt1, cnt2, structure, vars);
backtrack_m2(i+1, k - d1 - cnt1, l - d2 - cnt2, structure, vars);
return;
}
}
}
}
}
nrerror("backtack failed in fc\n");
}
PRIVATE void backtrack_m2(unsigned int i, int k, int l, char *structure, TwoDfold_vars *vars){
unsigned int j, ij, j3, n;
unsigned int *referenceBPs1, *referenceBPs2;
unsigned int d1, d2, base_d1, base_d2, maxD1, maxD2;
int *my_iindx, cnt1, cnt2, cnt3, cnt4;
int ***E_M1, ***E_M2, *E_M2_rem, *E_M1_rem, e;
int **l_min_values_m1, **l_max_values_m1, *k_min_values_m1, *k_max_values_m1;
n = vars->seq_length;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
E_M1 = vars->E_M1;
l_min_values_m1 = vars->l_min_values_m1;
l_max_values_m1 = vars->l_max_values_m1;
k_min_values_m1 = vars->k_min_values_m1;
k_max_values_m1 = vars->k_max_values_m1;
E_M1_rem = vars->E_M1_rem;
E_M2 = vars->E_M2;
E_M2_rem = vars->E_M2_rem;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
base_d1 = referenceBPs1[my_iindx[i]-n];
base_d2 = referenceBPs2[my_iindx[i]-n];
if(k == -1){
e = E_M2_rem[i];
for (j=i+TURN+1; j<n-TURN-1; j++){
if(E_M1_rem[my_iindx[i]-j] != INF){
if(E_M1[my_iindx[j+1]-n])
for(cnt1 = k_min_values_m1[my_iindx[j+1]-n];
cnt1 <= k_max_values_m1[my_iindx[j+1]-n];
cnt1++)
for(cnt2 = l_min_values_m1[my_iindx[j+1]-n][cnt1];
cnt2 <= l_max_values_m1[my_iindx[j+1]-n][cnt1];
cnt2++)
if(e == E_M1_rem[my_iindx[i]-j] + E_M1[my_iindx[j+1]-n][cnt1][cnt2/2]){
backtrack_m1(i, j, k, l, structure, vars);
backtrack_m1(j+1, n, cnt1, cnt2, structure, vars);
return;
}
if(E_M1_rem[my_iindx[j+1]-n] != INF){
if(e == E_M1_rem[my_iindx[i]-j] + E_M1_rem[my_iindx[j+1]-n]){
backtrack_m1(i, j, k, l, structure, vars);
backtrack_m1(j+1, n, k, l, structure, vars);
return;
}
}
}
if(E_M1_rem[my_iindx[j+1]-n] != INF){
if(E_M1[my_iindx[i]-j])
for(cnt1 = k_min_values_m1[my_iindx[i]-j];
cnt1 <= k_max_values_m1[my_iindx[i]-j];
cnt1++)
for(cnt2 = l_min_values_m1[my_iindx[i]-j][cnt1];
cnt2 <= l_max_values_m1[my_iindx[i]-j][cnt1];
cnt2 += 2)
if(e == E_M1[my_iindx[i]-j][cnt1][cnt2/2] + E_M1_rem[my_iindx[j+1]-n]){
backtrack_m1(i, j, cnt1, cnt2, structure, vars);
backtrack_m1(j+1, n, k, l, structure, vars);
return;
}
}
if(!E_M1[my_iindx[i]-j]) continue;
if(!E_M1[my_iindx[j+1]-n]) continue;
d1 = referenceBPs1[my_iindx[i]-n] - referenceBPs1[my_iindx[i]-j] - referenceBPs1[my_iindx[j+1]-n];
d2 = referenceBPs2[my_iindx[i]-n] - referenceBPs2[my_iindx[i]-j] - referenceBPs2[my_iindx[j+1]-n];
for(cnt1 = k_min_values_m1[my_iindx[i]-j]; cnt1 <= k_max_values_m1[my_iindx[i]-j]; cnt1++)
for(cnt2 = l_min_values_m1[my_iindx[i]-j][cnt1]; cnt2 <= l_max_values_m1[my_iindx[i]-j][cnt1]; cnt2+=2){
for(cnt3 = k_min_values_m1[my_iindx[j+1]-n]; cnt3 <= k_max_values_m1[my_iindx[j+1]-n]; cnt3++)
for(cnt4 = l_min_values_m1[my_iindx[j+1]-n][cnt3]; cnt4 <= l_max_values_m1[my_iindx[j+1]-n][cnt3]; cnt4+=2){
if(((cnt1 + cnt3 + d1) > maxD1) || ((cnt2 + cnt4 + d2) > maxD2)){
if(e == E_M1[my_iindx[i]-j][cnt1][cnt2/2] + E_M1[my_iindx[j+1]-n][cnt3][cnt4/2]){
backtrack_m1(i, j, cnt1, cnt2, structure, vars);
backtrack_m1(j+1, n, cnt3, cnt4, structure, vars);
return;
}
}
}
}
}
}
else{
for(j=i+TURN+1; j<n-TURN-1; j++){
if(!E_M1[my_iindx[i]-j]) continue;
if(!E_M1[my_iindx[j+1]-n]) continue;
ij = my_iindx[i]-j;
j3 = my_iindx[j+1]-n;
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[j3];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[j3];
for(cnt1 = k_min_values_m1[ij]; cnt1 <= MIN2(k_max_values_m1[ij], k - d1); cnt1++)
for(cnt2 = l_min_values_m1[ij][cnt1]; cnt2 <= MIN2(l_max_values_m1[ij][cnt1], l-d2); cnt2+=2)
if((k - d1 - cnt1 >= k_min_values_m1[j3]) && (k - d1 - cnt1 <= k_max_values_m1[j3]))
if((l - d2 - cnt2 >= l_min_values_m1[j3][k - d1 - cnt1]) && (l - d2 - cnt2 <= l_max_values_m1[j3][k-d1-cnt1]))
if(E_M1[ij][cnt1][cnt2/2] + E_M1[j3][k-d1-cnt1][(l-d2-cnt2)/2] == E_M2[i][k][l/2]){
backtrack_m1(i, j, cnt1, cnt2, structure, vars);
backtrack_m1(j+1, n, k-d1-cnt1, l-d2-cnt2, structure, vars);
return;
}
}
}
nrerror("backtack failed in m2\n");
}
PRIVATE void mfe_circ(TwoDfold_vars *vars){
unsigned int d, i, j, maxD1, maxD2, seq_length, *referenceBPs1, *referenceBPs2, d1, d2, base_d1, base_d2, *mm1, *mm2, *bpdist;
int *my_iindx, energy, cnt1, cnt2, cnt3, cnt4;
short *S1;
char *sequence, *ptype;
int ***E_C, ***E_M, ***E_M1;
int *E_C_rem, *E_M_rem, *E_M1_rem;
int **l_min_values, **l_max_values, **l_min_values_m, **l_max_values_m, **l_min_values_m1, **l_max_values_m1;
int *k_min_values, *k_max_values,*k_min_values_m, *k_max_values_m,*k_min_values_m1, *k_max_values_m1;
paramT *P;
P = vars->P;
sequence = vars->sequence;
seq_length = vars->seq_length;
maxD1 = vars->maxD1;
maxD2 = vars->maxD2;
S1 = vars->S1;
ptype = vars->ptype;
my_iindx = vars->my_iindx;
referenceBPs1 = vars->referenceBPs1;
referenceBPs2 = vars->referenceBPs2;
mm1 = vars->mm1;
mm2 = vars->mm2;
bpdist = vars->bpdist;
E_C = vars->E_C;
l_min_values = vars->l_min_values;
l_max_values = vars->l_max_values;
k_min_values = vars->k_min_values;
k_max_values = vars->k_max_values;
E_M = vars->E_M;
l_min_values_m = vars->l_min_values_m;
l_max_values_m = vars->l_max_values_m;
k_min_values_m = vars->k_min_values_m;
k_max_values_m = vars->k_max_values_m;
E_M1 = vars->E_M1;
l_min_values_m1 = vars->l_min_values_m1;
l_max_values_m1 = vars->l_max_values_m1;
k_min_values_m1 = vars->k_min_values_m1;
k_max_values_m1 = vars->k_max_values_m1;
E_C_rem = vars->E_C_rem;
E_M_rem = vars->E_M_rem;
E_M1_rem = vars->E_M1_rem;
#ifdef _OPENMP
#pragma omp parallel for private(d1,d2,cnt1,cnt2,cnt3,cnt4,j, i)
#endif
for(i=1; i<seq_length-TURN-1; i++){
/* guess memory requirements for M2 */
int min_k, max_k, max_l, min_l;
int min_k_real, max_k_real, *min_l_real, *max_l_real;
min_k = min_l = 0;
max_k = mm1[my_iindx[i]-seq_length] + referenceBPs1[my_iindx[i] - seq_length];
max_l = mm2[my_iindx[i]-seq_length] + referenceBPs2[my_iindx[i] - seq_length];
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[i] - seq_length],
&vars->k_min_values_m2[i],
&vars->k_max_values_m2[i],
&vars->l_min_values_m2[i],
&vars->l_max_values_m2[i]
);
prepareArray( &vars->E_M2[i],
vars->k_min_values_m2[i],
vars->k_max_values_m2[i],
vars->l_min_values_m2[i],
vars->l_max_values_m2[i]
);
preparePosteriorBoundaries( vars->k_max_values_m2[i] - vars->k_min_values_m2[i] + 1,
vars->k_min_values_m2[i],
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
/* begin filling of M2 array */
for (j=i+TURN+1; j<seq_length-TURN-1; j++){
if(E_M1_rem[my_iindx[i]-j] != INF){
if(E_M1[my_iindx[j+1]-seq_length])
for(cnt1 = k_min_values_m1[my_iindx[j+1]-seq_length];
cnt1 <= k_max_values_m1[my_iindx[j+1]-seq_length];
cnt1++)
for(cnt2 = l_min_values_m1[my_iindx[j+1]-seq_length][cnt1];
cnt2 <= l_max_values_m1[my_iindx[j+1]-seq_length][cnt1];
cnt2++)
vars->E_M2_rem[i] = MIN2(vars->E_M2_rem[i],
E_M1_rem[my_iindx[i]-j] + E_M1[my_iindx[j+1]-seq_length][cnt1][cnt2/2]
);
if(E_M1_rem[my_iindx[j+1]-seq_length] != INF)
vars->E_M2_rem[i] = MIN2(vars->E_M2_rem[i], E_M1_rem[my_iindx[i]-j] + E_M1_rem[my_iindx[j+1]-seq_length]);
}
if(E_M1_rem[my_iindx[j+1]-seq_length] != INF){
if(E_M1[my_iindx[i]-j])
for(cnt1 = k_min_values_m1[my_iindx[i]-j];
cnt1 <= k_max_values_m1[my_iindx[i]-j];
cnt1++)
for(cnt2 = l_min_values_m1[my_iindx[i]-j][cnt1];
cnt2 <= l_max_values_m1[my_iindx[i]-j][cnt1];
cnt2 += 2)
vars->E_M2_rem[i] = MIN2(vars->E_M2_rem[i],
E_M1[my_iindx[i]-j][cnt1][cnt2/2] + E_M1_rem[my_iindx[j+1]-seq_length]
);
}
if(!E_M1[my_iindx[i]-j]) continue;
if(!E_M1[my_iindx[j+1]-seq_length]) continue;
d1 = referenceBPs1[my_iindx[i]-seq_length] - referenceBPs1[my_iindx[i]-j] - referenceBPs1[my_iindx[j+1]-seq_length];
d2 = referenceBPs2[my_iindx[i]-seq_length] - referenceBPs2[my_iindx[i]-j] - referenceBPs2[my_iindx[j+1]-seq_length];
for(cnt1 = k_min_values_m1[my_iindx[i]-j]; cnt1 <= k_max_values_m1[my_iindx[i]-j]; cnt1++)
for(cnt2 = l_min_values_m1[my_iindx[i]-j][cnt1]; cnt2 <= l_max_values_m1[my_iindx[i]-j][cnt1]; cnt2+=2){
for(cnt3 = k_min_values_m1[my_iindx[j+1]-seq_length]; cnt3 <= k_max_values_m1[my_iindx[j+1]-seq_length]; cnt3++)
for(cnt4 = l_min_values_m1[my_iindx[j+1]-seq_length][cnt3]; cnt4 <= l_max_values_m1[my_iindx[j+1]-seq_length][cnt3]; cnt4+=2){
if(((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)){
vars->E_M2[i][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2)/2] = MIN2( vars->E_M2[i][cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2)/2],
E_M1[my_iindx[i]-j][cnt1][cnt2/2] + E_M1[my_iindx[j+1]-seq_length][cnt3][cnt4/2]
);
updatePosteriorBoundaries(cnt1+cnt3+d1,
cnt2+cnt4+d2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
else{
vars->E_M2_rem[i] = MIN2(vars->E_M2_rem[i],
E_M1[my_iindx[i]-j][cnt1][cnt2/2] + E_M1[my_iindx[j+1]-seq_length][cnt3][cnt4/2]
);
}
}
}
}
/* resize and move memory portions of energy matrix E_M2 */
adjustArrayBoundaries(&vars->E_M2[i],
&vars->k_min_values_m2[i],
&vars->k_max_values_m2[i],
&vars->l_min_values_m2[i],
&vars->l_max_values_m2[i],
min_k_real,
max_k_real,
min_l_real,
max_l_real
);
} /* end for i */
base_d1 = referenceBPs1[my_iindx[1]-seq_length];
base_d2 = referenceBPs2[my_iindx[1]-seq_length];
/* guess memory requirements for E_FcH, E_FcI and E_FcM */
int min_k, max_k, max_l, min_l;
int min_k_real, max_k_real, min_k_real_fcH, max_k_real_fcH, min_k_real_fcI, max_k_real_fcI, min_k_real_fcM, max_k_real_fcM;
int *min_l_real, *max_l_real, *min_l_real_fcH, *max_l_real_fcH, *min_l_real_fcI, *max_l_real_fcI,*min_l_real_fcM, *max_l_real_fcM;
min_k = min_l = 0;
max_k = mm1[my_iindx[1] - seq_length] + referenceBPs1[my_iindx[1] - seq_length];
max_l = mm2[my_iindx[1] - seq_length] + referenceBPs2[my_iindx[1] - seq_length];
#ifdef _OPENMP
#pragma omp sections
{
#pragma omp section
{
#endif
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&vars->k_min_values_fc,
&vars->k_max_values_fc,
&vars->l_min_values_fc,
&vars->l_max_values_fc
);
prepareArray( &vars->E_Fc,
vars->k_min_values_fc,
vars->k_max_values_fc,
vars->l_min_values_fc,
vars->l_max_values_fc
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&vars->k_min_values_fcH,
&vars->k_max_values_fcH,
&vars->l_min_values_fcH,
&vars->l_max_values_fcH
);
prepareArray( &vars->E_FcH,
vars->k_min_values_fcH,
vars->k_max_values_fcH,
vars->l_min_values_fcH,
vars->l_max_values_fcH
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&vars->k_min_values_fcI,
&vars->k_max_values_fcI,
&vars->l_min_values_fcI,
&vars->l_max_values_fcI
);
prepareArray( &vars->E_FcI,
vars->k_min_values_fcI,
vars->k_max_values_fcI,
vars->l_min_values_fcI,
vars->l_max_values_fcI
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
prepareBoundaries(min_k,
max_k,
min_l,
max_l,
bpdist[my_iindx[1] - seq_length],
&vars->k_min_values_fcM,
&vars->k_max_values_fcM,
&vars->l_min_values_fcM,
&vars->l_max_values_fcM
);
prepareArray( &vars->E_FcM,
vars->k_min_values_fcM,
vars->k_max_values_fcM,
vars->l_min_values_fcM,
vars->l_max_values_fcM
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
preparePosteriorBoundaries( max_k - min_k + 1,
min_k,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
preparePosteriorBoundaries( max_k - min_k + 1,
min_k,
&min_k_real_fcH,
&max_k_real_fcH,
&min_l_real_fcH,
&max_l_real_fcH
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
preparePosteriorBoundaries( max_k - min_k + 1,
min_k,
&min_k_real_fcI,
&max_k_real_fcI,
&min_l_real_fcI,
&max_l_real_fcI
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
preparePosteriorBoundaries( max_k - min_k + 1,
min_k,
&min_k_real_fcM,
&max_k_real_fcM,
&min_l_real_fcM,
&max_l_real_fcM
);
#ifdef _OPENMP
}
}
#endif
/* begin actual energy calculations */
#ifdef _OPENMP
#pragma omp sections private(d, d1,d2,cnt1,cnt2,cnt3,cnt4,j, i, energy)
{
#pragma omp section
{
#endif
for (d = TURN+2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij;
int type, no_close;
char loopseq[10];
i = j-d+1;
ij = my_iindx[i]-j;
u = seq_length-j + i-1;
if (u<TURN) continue;
type = ptype[ij];
no_close = (((type==3)||(type==4))&&no_closingGU);
type=rtype[type];
if (!type) continue;
if(no_close) continue;
d1 = base_d1 - referenceBPs1[ij];
d2 = base_d2 - referenceBPs2[ij];
if (u<7) {
strcpy(loopseq , sequence+j-1);
strncat(loopseq, sequence, i);
}
energy = E_Hairpin(u, type, S1[j+1], S1[i-1], loopseq, P);
if(E_C_rem[ij] != INF)
vars->E_FcH_rem = MIN2(vars->E_FcH_rem, E_C_rem[ij] + energy);
if (!E_C[ij]) continue;
for(cnt1 = k_min_values[ij]; cnt1 <= k_max_values[ij]; cnt1++)
for(cnt2 = l_min_values[ij][cnt1]; cnt2 <= l_max_values[ij][cnt1]; cnt2 += 2){
if(((cnt1 + d1) <= maxD1) && ((cnt2 + d2) <= maxD2)){
vars->E_FcH[cnt1 + d1][(cnt2+d2)/2] = MIN2( vars->E_FcH[cnt1 + d1][(cnt2+d2)/2],
energy + E_C[ij][cnt1][cnt2/2]
);
updatePosteriorBoundaries(cnt1 + d1,
cnt2 + d2,
&min_k_real_fcH,
&max_k_real_fcH,
&min_l_real_fcH,
&max_l_real_fcH
);
}
else
vars->E_FcH_rem = MIN2(vars->E_FcH_rem, energy + E_C[ij][cnt1][cnt2/2]);
}
}
/* end of i-j loop */
/* resize and move memory portions of energy matrix E_FcH */
adjustArrayBoundaries(&vars->E_FcH,
&vars->k_min_values_fcH,
&vars->k_max_values_fcH,
&vars->l_min_values_fcH,
&vars->l_max_values_fcH,
min_k_real_fcH,
max_k_real_fcH,
min_l_real_fcH,
max_l_real_fcH
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
for (d = TURN+2; d <= seq_length; d++) /* i,j in [1..length] */
for (j = d; j <= seq_length; j++) {
unsigned int u, ij, p, q, pq;
int type, type_2, no_close;
i = j-d+1;
ij = my_iindx[i]-j;
u = seq_length-j + i-1;
if (u<TURN) continue;
type = ptype[ij];
no_close = (((type==3)||(type==4))&&no_closingGU);
type=rtype[type];
if (!type) continue;
if(no_close) continue;
if(E_C_rem[ij] != INF){
for(p = j+1; p < seq_length ; p++){
unsigned int u1, qmin, ln_pre;
u1 = p-j-1;
if (u1+i-1>MAXLOOP) break;
qmin = p + TURN + 1;
ln_pre = u1 + i + seq_length;
if(ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1;
for(q = qmin; q <= seq_length; q++){
unsigned int u2;
pq = my_iindx[p]-q;
type_2 = rtype[(unsigned int)ptype[pq]];
if (type_2==0) continue;
u2 = i-1 + seq_length-q;
if (u1+u2>MAXLOOP) continue;
/* get distance to reference if closing the interior loop
* d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j})
* d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j})
*/
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq];
energy = E_IntLoop(u1, u2, type, type_2, S1[j+1], S1[i-1], S1[p-1], S1[q+1], P);
if(E_C_rem[pq] != INF)
vars->E_FcI_rem = MIN2(vars->E_FcI_rem, E_C_rem[ij] + E_C_rem[pq] + energy);
if(E_C[pq])
for(cnt1 = k_min_values[pq];
cnt1 <= k_max_values[pq];
cnt1++)
for(cnt2 = l_min_values[pq][cnt1];
cnt2 <= l_max_values[pq][cnt1];
cnt2 += 2)
vars->E_FcI_rem = MIN2(vars->E_FcI_rem, E_C_rem[ij] + E_C[pq][cnt1][cnt2/2] + energy);
}
}
}
if(E_C[ij]){
for(p = j+1; p < seq_length ; p++){
unsigned int u1, qmin, ln_pre;
u1 = p-j-1;
if (u1+i-1>MAXLOOP) break;
qmin = p + TURN + 1;
ln_pre = u1 + i + seq_length;
if(ln_pre > qmin + MAXLOOP) qmin = ln_pre - MAXLOOP - 1;
for(q = qmin; q <= seq_length; q++){
unsigned int u2;
pq = my_iindx[p]-q;
type_2 = rtype[(unsigned int)ptype[pq]];
if (type_2==0) continue;
u2 = i-1 + seq_length-q;
if (u1+u2>MAXLOOP) continue;
/* get distance to reference if closing the interior loop
* d2a = dbp(T1_[1,n}, T1_{p,q} + T1_{i,j})
* d2b = dbp(T2_[1,n}, T2_{p,q} + T2_{i,j})
*/
d1 = base_d1 - referenceBPs1[ij] - referenceBPs1[pq];
d2 = base_d2 - referenceBPs2[ij] - referenceBPs2[pq];
energy = E_IntLoop(u1, u2, type, type_2, S1[j+1], S1[i-1], S1[p-1], S1[q+1], P);
if(E_C_rem[pq] != INF){
for(cnt1 = k_min_values[ij];
cnt1 <= k_max_values[ij];
cnt1++)
for(cnt2 = l_min_values[ij][cnt1];
cnt2 <= l_max_values[ij][cnt1];
cnt2 += 2)
vars->E_FcI_rem = MIN2(vars->E_FcI_rem, E_C[ij][cnt1][cnt2/2] + E_C_rem[pq] + energy);
}
if(E_C[pq])
for(cnt1 = k_min_values[ij];
cnt1 <= k_max_values[ij];
cnt1++)
for(cnt2 = l_min_values[ij][cnt1];
cnt2 <= l_max_values[ij][cnt1];
cnt2 += 2)
for(cnt3 = k_min_values[pq];
cnt3 <= k_max_values[pq];
cnt3++)
for(cnt4 = l_min_values[pq][cnt3];
cnt4 <= l_max_values[pq][cnt3];
cnt4 += 2){
if(((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)){
vars->E_FcI[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2)/2] = MIN2(
vars->E_FcI[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2)/2],
E_C[ij][cnt1][cnt2/2]
+ E_C[pq][cnt3][cnt4/2]
+ energy
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1,
cnt2 + cnt4 + d2,
&min_k_real_fcI,
&max_k_real_fcI,
&min_l_real_fcI,
&max_l_real_fcI
);
}
else{
vars->E_FcI_rem = MIN2(
vars->E_FcI_rem,
E_C[ij][cnt1][cnt2/2]
+ E_C[pq][cnt3][cnt4/2]
+ energy
);
}
}
}
}
}
}
/* end of i-j loop */
/* resize and move memory portions of energy matrix E_FcI */
adjustArrayBoundaries(&vars->E_FcI,
&vars->k_min_values_fcI,
&vars->k_max_values_fcI,
&vars->l_min_values_fcI,
&vars->l_max_values_fcI,
min_k_real_fcI,
max_k_real_fcI,
min_l_real_fcI,
max_l_real_fcI
);
#ifdef _OPENMP
}
#pragma omp section
{
#endif
if(seq_length > 2*TURN){
for (i=TURN+1; i<seq_length-2*TURN; i++) {
/* get distancies to references
* d3a = dbp(T1_[1,n}, T1_{1,k} + T1_{k+1, n})
* d3b = dbp(T2_[1,n}, T2_{1,k} + T2_{k+1, n})
*/
d1 = base_d1 - referenceBPs1[my_iindx[1]-i] - referenceBPs1[my_iindx[i+1]-seq_length];
d2 = base_d2 - referenceBPs2[my_iindx[1]-i] - referenceBPs2[my_iindx[i+1]-seq_length];
if(E_M_rem[my_iindx[1]-i] != INF){
if(vars->E_M2[i+1])
for(cnt1 = vars->k_min_values_m2[i+1];
cnt1 <= vars->k_max_values_m2[i+1];
cnt1++)
for(cnt2 = vars->l_min_values_m2[i+1][cnt1];
cnt2 <= vars->l_max_values_m2[i+1][cnt1];
cnt2 += 2)
vars->E_FcM_rem = MIN2(vars->E_FcM_rem, E_M_rem[my_iindx[1]-i] + vars->E_M2[i+1][cnt1][cnt2/2] + P->MLclosing);
if(vars->E_M2_rem[i+1] != INF)
vars->E_FcM_rem = MIN2(vars->E_FcM_rem, E_M_rem[my_iindx[1]-i] + vars->E_M2_rem[i+1] + P->MLclosing);
}
if(vars->E_M2_rem[i+1] != INF){
if(E_M[my_iindx[1]-i])
for(cnt1 = k_min_values_m[my_iindx[1]-i];
cnt1 <= k_max_values_m[my_iindx[1]-i];
cnt1++)
for(cnt2 = l_min_values_m[my_iindx[1]-i][cnt1];
cnt2 <= l_max_values_m[my_iindx[1]-i][cnt1];
cnt2 += 2)
vars->E_FcM_rem = MIN2(vars->E_FcM_rem, E_M[my_iindx[1]-i][cnt1][cnt2/2] + vars->E_M2_rem[i+1] + P->MLclosing);
}
if(!E_M[my_iindx[1]-i]) continue;
if(!vars->E_M2[i+1]) continue;
for(cnt1 = k_min_values_m[my_iindx[1]-i]; cnt1 <= k_max_values_m[my_iindx[1]-i]; cnt1++)
for(cnt2 = l_min_values_m[my_iindx[1]-i][cnt1]; cnt2 <= l_max_values_m[my_iindx[1]-i][cnt1]; cnt2 += 2)
for(cnt3 = vars->k_min_values_m2[i+1]; cnt3 <= vars->k_max_values_m2[i+1]; cnt3++)
for(cnt4 = vars->l_min_values_m2[i+1][cnt3]; cnt4 <= vars->l_max_values_m2[i+1][cnt3]; cnt4 += 2){
if(((cnt1 + cnt3 + d1) <= maxD1) && ((cnt2 + cnt4 + d2) <= maxD2)){
vars->E_FcM[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2)/2] = MIN2(
vars->E_FcM[cnt1 + cnt3 + d1][(cnt2 + cnt4 + d2)/2],
E_M[my_iindx[1]-i][cnt1][cnt2/2]
+ vars->E_M2[i+1][cnt3][cnt4/2]
+ P->MLclosing
);
updatePosteriorBoundaries(cnt1 + cnt3 + d1,
cnt2 + cnt4 + d2,
&min_k_real_fcM,
&max_k_real_fcM,
&min_l_real_fcM,
&max_l_real_fcM
);
}
else{
vars->E_FcM_rem = MIN2(
vars->E_FcM_rem,
E_M[my_iindx[1]-i][cnt1][cnt2/2]
+ vars->E_M2[i+1][cnt3][cnt4/2]
+ P->MLclosing
);
}
}
}
}
/* resize and move memory portions of energy matrix E_FcM */
adjustArrayBoundaries(&vars->E_FcM,
&vars->k_min_values_fcM,
&vars->k_max_values_fcM,
&vars->l_min_values_fcM,
&vars->l_max_values_fcM,
min_k_real_fcM,
max_k_real_fcM,
min_l_real_fcM,
max_l_real_fcM
);
#ifdef _OPENMP
}
}
#endif
/* compute E_Fc_rem */
vars->E_Fc_rem = MIN2(vars->E_FcH_rem, vars->E_FcI_rem);
vars->E_Fc_rem = MIN2(vars->E_Fc_rem, vars->E_FcM_rem);
/* add the case were structure is unfolded chain */
if((referenceBPs1[my_iindx[1]-seq_length] > maxD1) || (referenceBPs2[my_iindx[1]-seq_length] > maxD2))
vars->E_Fc_rem = MIN2(vars->E_Fc_rem, 0);
/* compute all E_Fc */
for(cnt1 = vars->k_min_values_fcH; cnt1 <= vars->k_max_values_fcH; cnt1++)
for(cnt2 = vars->l_min_values_fcH[cnt1]; cnt2 <= vars->l_max_values_fcH[cnt1]; cnt2 += 2){
vars->E_Fc[cnt1][cnt2/2] = MIN2(vars->E_Fc[cnt1][cnt2/2],
vars->E_FcH[cnt1][cnt2/2]
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
for(cnt1 = vars->k_min_values_fcI; cnt1 <= vars->k_max_values_fcI; cnt1++)
for(cnt2 = vars->l_min_values_fcI[cnt1]; cnt2 <= vars->l_max_values_fcI[cnt1]; cnt2 += 2){
vars->E_Fc[cnt1][cnt2/2] = MIN2(vars->E_Fc[cnt1][cnt2/2],
vars->E_FcI[cnt1][cnt2/2]
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
for(cnt1 = vars->k_min_values_fcM; cnt1 <= vars->k_max_values_fcM; cnt1++)
for(cnt2 = vars->l_min_values_fcM[cnt1]; cnt2 <= vars->l_max_values_fcM[cnt1]; cnt2 += 2){
vars->E_Fc[cnt1][cnt2/2] = MIN2(vars->E_Fc[cnt1][cnt2/2],
vars->E_FcM[cnt1][cnt2/2]
);
updatePosteriorBoundaries(cnt1,
cnt2,
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
}
/* add the case were structure is unfolded chain */
vars->E_Fc[referenceBPs1[my_iindx[1]-seq_length]][referenceBPs2[my_iindx[1]-seq_length]/2] = MIN2(vars->E_Fc[referenceBPs1[my_iindx[1]-seq_length]][referenceBPs2[my_iindx[1]-seq_length]/2],
0);
updatePosteriorBoundaries(referenceBPs1[my_iindx[1]-seq_length],
referenceBPs2[my_iindx[1]-seq_length],
&min_k_real,
&max_k_real,
&min_l_real,
&max_l_real
);
adjustArrayBoundaries(&vars->E_Fc,
&vars->k_min_values_fc,
&vars->k_max_values_fc,
&vars->l_min_values_fc,
&vars->l_max_values_fc,
min_k_real,
max_k_real,
min_l_real,
max_l_real
);
}
PRIVATE void adjustArrayBoundaries(int ***array, int *k_min, int *k_max, int **l_min, int **l_max, int k_min_post, int k_max_post, int *l_min_post, int *l_max_post){
int cnt1;
int k_diff_pre = k_min_post - *k_min;
int mem_size = k_max_post - k_min_post + 1;
if(k_min_post < INF){
/* free all the unused memory behind actual data */
for(cnt1 = k_max_post + 1; cnt1 <= *k_max; cnt1++){
(*array)[cnt1] += (*l_min)[cnt1]/2;
free((*array)[cnt1]);
}
/* free unused memory before actual data */
for(cnt1 = *k_min; cnt1 < k_min_post; cnt1++){
(*array)[cnt1] += (*l_min)[cnt1]/2;
free((*array)[cnt1]);
}
/* move data to front and thereby eliminating unused memory in front of actual data */
if(k_diff_pre > 0){
memmove((int **)(*array),((int **)(*array)) + k_diff_pre, sizeof(int *) * mem_size);
memmove((int *) (*l_min),((int *) (*l_min)) + k_diff_pre, sizeof(int) * mem_size);
memmove((int *) (*l_max),((int *) (*l_max)) + k_diff_pre, sizeof(int) * mem_size);
}
/* reallocating memory to actual size used */
*array += *k_min;
*array = (int **)realloc(*array, sizeof(int *) * mem_size);
*array -= k_min_post;
*l_min += *k_min;
*l_min = (int *)realloc(*l_min, sizeof(int) * mem_size);
*l_min -= k_min_post;
*l_max += *k_min;
*l_max = (int *)realloc(*l_max, sizeof(int) * mem_size);
*l_max -= k_min_post;
/* adjust l dimension of array */
for(cnt1 = k_min_post; cnt1 <= k_max_post; cnt1++){
if(l_min_post[cnt1] < INF){
/* new memsize */
mem_size = (l_max_post[cnt1] - l_min_post[cnt1] + 1)/2 + 1;
/* reshift the pointer */
(*array)[cnt1] += (*l_min)[cnt1]/2;
int shift = (l_min_post[cnt1]%2 == (*l_min)[cnt1]%2) ? 0 : 1;
/* eliminate unused memory in front of actual data */
unsigned int start = (l_min_post[cnt1] - (*l_min)[cnt1])/2 + shift;
if(start > 0)
memmove((int *)((*array)[cnt1]), (int *)((*array)[cnt1])+start, sizeof(int) * mem_size);
(*array)[cnt1] = (int *) realloc((*array)[cnt1], sizeof(int) * mem_size);
(*array)[cnt1] -= l_min_post[cnt1]/2;
}
else{
/* free according memory */
(*array)[cnt1] += (*l_min)[cnt1]/2;
free((*array)[cnt1]);
}
(*l_min)[cnt1] = l_min_post[cnt1];
(*l_max)[cnt1] = l_max_post[cnt1];
}
}
else{
/* we have to free all unused memory */
for(cnt1 = *k_min; cnt1 <= *k_max; cnt1++){
(*array)[cnt1] += (*l_min)[cnt1]/2;
free((*array)[cnt1]);
}
(*l_min) += *k_min;
(*l_max) += *k_min;
free(*l_min);
free(*l_max);
(*array) += *k_min;
free(*array);
*array = NULL;
}
l_min_post += *k_min;
l_max_post += *k_min;
free(l_min_post);
free(l_max_post);
*k_min = k_min_post;
*k_max = k_max_post;
}
INLINE PRIVATE void preparePosteriorBoundaries(int size, int shift, int *min_k, int *max_k, int **min_l, int **max_l){
int i;
*min_k = INF;
*max_k = 0;
*min_l = (int *)space(sizeof(int) * size);
*max_l = (int *)space(sizeof(int) * size);
for(i = 0; i < size; i++){
(*min_l)[i] = INF;
(*max_l)[i] = 0;
}
*min_l -= shift;
*max_l -= shift;
}
INLINE PRIVATE void updatePosteriorBoundaries(int d1, int d2, int *min_k, int *max_k, int **min_l, int **max_l){
(*min_l)[d1] = MIN2((*min_l)[d1], d2);
(*max_l)[d1] = MAX2((*max_l)[d1], d2);
*min_k = MIN2(*min_k, d1);
*max_k = MAX2(*max_k, d1);
}
INLINE PRIVATE void prepareBoundaries(int min_k_pre, int max_k_pre, int min_l_pre, int max_l_pre, int bpdist, int *min_k, int *max_k, int **min_l, int **max_l){
int cnt;
int mem = max_k_pre - min_k_pre + 1;
*min_k = min_k_pre;
*max_k = max_k_pre;
*min_l = (int *) space(sizeof(int) * mem);
*max_l = (int *) space(sizeof(int) * mem);
*min_l -= min_k_pre;
*max_l -= min_k_pre;
/* for each k guess the according minimum l*/
for(cnt = min_k_pre; cnt <= max_k_pre; cnt++){
(*min_l)[cnt] = min_l_pre;
(*max_l)[cnt] = max_l_pre;
while((*min_l)[cnt] + cnt < bpdist) (*min_l)[cnt]++;
if((bpdist % 2) != (((*min_l)[cnt] + cnt) % 2)) (*min_l)[cnt]++;
}
}
INLINE PRIVATE void prepareArray(int ***array, int min_k, int max_k, int *min_l, int *max_l){
int i, j, mem;
*array = (int **)space(sizeof(int *) * (max_k - min_k + 1));
*array -= min_k;
for(i = min_k; i <= max_k; i++){
mem = (max_l[i] - min_l[i] + 1)/2 + 1;
(*array)[i] = (int *)space(sizeof(int) * mem);
for(j = 0; j < mem; j++)
(*array)[i][j] = INF;
(*array)[i] -= min_l[i]/2;
}
}
INLINE PRIVATE void prepareArray2(unsigned long ***array, int min_k, int max_k, int *min_l, int *max_l){
int i, mem;
*array = (unsigned long **)space(sizeof(unsigned long *) * (max_k - min_k + 1));
*array -= min_k;
for(i = min_k; i <= max_k; i++){
mem = (max_l[i] - min_l[i] + 1)/2 + 1;
(*array)[i] = (unsigned long *)space(sizeof(unsigned long) * mem);
(*array)[i] -= min_l[i]/2;
}
}
|
GB_unop__atan_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__atan_fc64_fc64)
// op(A') function: GB (_unop_tran__atan_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = catan (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = catan (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = catan (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATAN || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__atan_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = catan (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = catan (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__atan_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
calcv.c | #include <stddef.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <math.h>
#ifdef _FOR_R
#include <R_ext/Print.h>
#define fprintf(f, message) REprintf(message)
#else
#include <stdio.h>
#endif
/* TODO: use qsort_s for argsorting, or switch to C++ std::sort */
typedef struct indexed_double {double x; size_t ix;} indexed_double;
int comp_for_argsort_dbl(const void *a, const void *b)
{
return ( ((indexed_double*)a)->x > ((indexed_double*)b)->x )? 1 : -1;
}
int comp_for_argsort_szt(const void *a, const void *b)
{
return (((size_t*)a)[0] > ((size_t*)b)[0])? 1 : -1;
}
void argsort_naive_dbl(double a[], size_t out[], indexed_double buffer[], size_t n)
{
/* Note: this is a rather inefficient procedure and can be improve with e.g. C++'s sort */
for (size_t i = 0; i < n; i++) {
buffer[i].x = a[i];
buffer[i].ix = i;
}
qsort(buffer, n, sizeof(indexed_double), comp_for_argsort_dbl);
for (size_t i = 0; i < n; i++) { out[i] = buffer[i].ix; }
}
void argsort_naive_szt(size_t a[], size_t out[], size_t buffer[], size_t n)
{
for (size_t i = 0; i < n; i++) {
buffer[i * 2] = a[i];
buffer[i * 2 + 1] = i;
}
qsort(buffer, n, sizeof(size_t) * 2, comp_for_argsort_szt);
for (size_t i = 0; i < n; i++) { out[i] = buffer[i * 2 + 1]; }
}
double find_min(double a[], size_t n)
{
double out = HUGE_VAL;
for (size_t i = 0; i < n; i++) { out = (out > a[i])? a[i] : out; }
return out;
}
void calc_cost(double row_C[], double out[], size_t inner_order[], size_t ncol)
{
double min_in_row = find_min(row_C, ncol);
for (size_t i = 0; i < ncol; i++) { out[i] = row_C[inner_order[i]] - min_in_row; }
}
void calc_rectangle_width(double cost[], double out[], size_t ncol)
{
for (size_t i = 0; i < ncol - 1; i++) { out[i] = cost[i + 1] - cost[i]; }
}
void sort_by_ix(double a[], size_t ix[], double buffer[], size_t n)
{
for (size_t i = 0; i < n; i++){ buffer[i] = a[ix[i]]; }
memcpy(a, buffer, sizeof(double) * n);
}
size_t *inner_order;
size_t *out_order;
indexed_double *buffer_argsort_dbl;
size_t *buffer_argsort_szt;
double *cost_buffer;
double *rectangle_width_arr;
#pragma omp threadprivate(inner_order, out_order, buffer_argsort_dbl, buffer_argsort_szt, cost_buffer, rectangle_width_arr)
int calculate_V(double C[], double V[], size_t nrow, size_t ncol, int nthreads)
{
int out_of_mem = 0;
/* Note: MSVC is stuck with an older version of OpenMP (17 years old at the time or writing this)
which does not support 'max' reductions */
#ifdef _OPENMP
#if !defined(_MSC_VER) && _OPENMP>20080101
#pragma omp parallel reduction(max:out_of_mem)
#endif
#endif
{
inner_order = (size_t*) malloc(sizeof(size_t) * ncol);
out_order = (size_t*) malloc(sizeof(size_t) * ncol);
buffer_argsort_dbl = (indexed_double*) malloc(sizeof(indexed_double) * ncol);
buffer_argsort_szt = (size_t*) malloc(sizeof(size_t) * ncol * 2);
cost_buffer = (double*) malloc(sizeof(double) * ncol);
rectangle_width_arr = (double*) malloc(sizeof(double) * (ncol - 1));
if (inner_order == NULL || out_order == NULL || buffer_argsort_dbl == NULL || buffer_argsort_szt == NULL ||
cost_buffer == NULL || rectangle_width_arr == NULL) {
out_of_mem = 1;
}
}
if (out_of_mem) {
fprintf(stderr, "Error: Could not allocate memory for the procedure.\n");
goto cleanup;
}
#pragma omp parallel for schedule(static) num_threads(nthreads) firstprivate(C, V, nrow, ncol)
for (size_t row = 0; row < nrow; row++) {
argsort_naive_dbl(C + row * ncol, inner_order, buffer_argsort_dbl, ncol);
calc_cost(C + row * ncol, cost_buffer, inner_order, ncol);
argsort_naive_szt(inner_order, out_order, buffer_argsort_szt, ncol);
calc_rectangle_width(cost_buffer, rectangle_width_arr, ncol);
V[row * ncol] = 0;
for (size_t col = 0; col < ncol - 1; col++) { V[row * ncol + col + 1] = V[row * ncol + col] + rectangle_width_arr[col] / ((double) col + 1); }
sort_by_ix(V + row * ncol, out_order, cost_buffer, ncol);
}
cleanup:
#pragma omp parallel
{
free(inner_order);
free(buffer_argsort_dbl);
free(cost_buffer);
free(rectangle_width_arr);
}
return out_of_mem;
}
|
pooling_3x3_pack8.h |
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void pooling3x3s2_max_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
__m256 _max00 = _mm256_max_ps(_r00, _r01);
_max00 = _mm256_max_ps(_max00, _r02);
_max00 = _mm256_max_ps(_max00, _r10);
_max00 = _mm256_max_ps(_max00, _r11);
__m256 _max01 = _mm256_max_ps(_r12, _r20);
_max01 = _mm256_max_ps(_max01, _r21);
_max01 = _mm256_max_ps(_max01, _r22);
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr, _mm256_max_ps(_max00, _max01));
__m256 _max10 = _mm256_max_ps(_r03, _r04);
_max10 = _mm256_max_ps(_max10, _r02);
_max10 = _mm256_max_ps(_max10, _r13);
_max10 = _mm256_max_ps(_max10, _r14);
__m256 _max11 = _mm256_max_ps(_r12, _r23);
_max10 = _mm256_max_ps(_max10, _r24);
_max10 = _mm256_max_ps(_max10, _r22);
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r06 = _mm256_loadu_ps(r0 + 48);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r16 = _mm256_loadu_ps(r1 + 48);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
__m256 _r26 = _mm256_loadu_ps(r2 + 48);
_mm256_storeu_ps(outptr + 8, _mm256_max_ps(_max10, _max11));
__m256 _max20 = _mm256_max_ps(_r05, _r06);
_max20 = _mm256_max_ps(_max20, _r04);
_max20 = _mm256_max_ps(_max20, _r15);
_max20 = _mm256_max_ps(_max20, _r16);
__m256 _max21 = _mm256_max_ps(_r14, _r25);
_max20 = _mm256_max_ps(_max20, _r26);
_max20 = _mm256_max_ps(_max20, _r24);
__m256 _r07 = _mm256_loadu_ps(r0 + 56);
__m256 _r08 = _mm256_loadu_ps(r0 + 64);
__m256 _r17 = _mm256_loadu_ps(r1 + 56);
__m256 _r18 = _mm256_loadu_ps(r1 + 64);
__m256 _r27 = _mm256_loadu_ps(r2 + 56);
__m256 _r28 = _mm256_loadu_ps(r2 + 64);
_mm256_storeu_ps(outptr + 16, _mm256_max_ps(_max20, _max21));
__m256 _max30 = _mm256_max_ps(_r07, _r08);
_max30 = _mm256_max_ps(_max30, _r06);
_max30 = _mm256_max_ps(_max30, _r17);
_max30 = _mm256_max_ps(_max30, _r18);
__m256 _max31 = _mm256_max_ps(_r16, _r27);
_max30 = _mm256_max_ps(_max30, _r28);
_max30 = _mm256_max_ps(_max30, _r26);
_mm256_storeu_ps(outptr + 24, _mm256_max_ps(_max30, _max31));
r0 += 64;
r1 += 64;
r2 += 64;
outptr += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
__m256 _max00 = _mm256_max_ps(_r00, _r01);
_max00 = _mm256_max_ps(_max00, _r02);
_max00 = _mm256_max_ps(_max00, _r10);
_max00 = _mm256_max_ps(_max00, _r11);
__m256 _max01 = _mm256_max_ps(_r12, _r20);
_max01 = _mm256_max_ps(_max01, _r21);
_max01 = _mm256_max_ps(_max01, _r22);
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr, _mm256_max_ps(_max00, _max01));
__m256 _max10 = _mm256_max_ps(_r03, _r04);
_max10 = _mm256_max_ps(_max10, _r02);
_max10 = _mm256_max_ps(_max10, _r13);
_max10 = _mm256_max_ps(_max10, _r14);
__m256 _max11 = _mm256_max_ps(_r12, _r23);
_max10 = _mm256_max_ps(_max10, _r24);
_max10 = _mm256_max_ps(_max10, _r22);
_mm256_storeu_ps(outptr + 8, _mm256_max_ps(_max10, _max11));
r0 += 32;
r1 += 32;
r2 += 32;
outptr += 16;
}
for (; j < outw; j++)
{
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
__m256 _max0 = _mm256_max_ps(_r00, _r01);
_max0 = _mm256_max_ps(_max0, _r02);
_max0 = _mm256_max_ps(_max0, _r10);
_max0 = _mm256_max_ps(_max0, _r11);
__m256 _max1 = _mm256_max_ps(_r12, _r20);
_max1 = _mm256_max_ps(_max1, _r21);
_max1 = _mm256_max_ps(_max1, _r22);
_mm256_storeu_ps(outptr, _mm256_max_ps(_max0, _max1));
r0 += 16;
r1 += 16;
r2 += 16;
outptr += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
THZTensorMath.c | /**
* Copyright (c) 2015-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
# include <complex.h>
#ifndef THZ_GENERIC_FILE
#define THZ_GENERIC_FILE "generic/THZTensorMath.c"
#else
#define THZ_OMP_OVERHEAD_THZRESHOLD 100000
void THZTensor_(fill)(THZTensor *r_, real value)
{
TH_TENSOR_APPLY(real, r_,
THZVector_(fill)(r__data, value, r__size); break;);
}
void THZTensor_(zero)(THZTensor *r_)
{
TH_TENSOR_APPLY(real, r_,
THZVector_(fill)(r__data, 0, r__size); break;);
}
void THZTensor_(maskedFill)(THZTensor *tensor, THByteTensor *mask, real value)
{
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1) THError("Mask tensor can take 0 and 1 values only");
else if (*mask_data == 1) *tensor_data = value;);
}
void THZTensor_(maskedCopy)(THZTensor *tensor, THByteTensor *mask, THZTensor* src )
{
THZTensor *srct = THZTensor_(newContiguous)(src);
real *src_data = THZTensor_(data)(srct);
long cntr = 0;
long nelem = THZTensor_(nElement)(srct);
TH_TENSOR_APPLY2(real, tensor, unsigned char, mask,
if (*mask_data > 1)
{
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = *src_data;
src_data++;
cntr++;
if (cntr > nelem)
THError("Number of elements of src != mask");
});
if (cntr != nelem)
THError("Number of elements of src != mask");
THZTensor_(free)(srct);
}
void THZTensor_(maskedSelect)(THZTensor *tensor, THZTensor *src, THByteTensor *mask)
{
long numel = THByteTensor_sumall(mask);
real *tensor_data;
THZTensor_(resize1d)(tensor,numel);
tensor_data = THZTensor_(data)(tensor);
TH_TENSOR_APPLY2(real, src, unsigned char, mask,
if (*mask_data > 1)
{
THError("Mask tensor can take 0 and 1 values only");
}
else if (*mask_data == 1)
{
*tensor_data = *src_data;
tensor_data++;
});
}
void THZTensor_(indexSelect)(THZTensor *tensor, THZTensor *src, int dim, THLongTensor *index)
{
long i, numel;
THLongStorage *newSize;
THZTensor *tSlice, *sSlice;
long *index_data;
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension,4,"Indexing dim is out of bounds");
THArgCheck(src->nDimension > 0,2,"Source tensor is empty");
numel = THLongTensor_nElement(index);
newSize = THLongStorage_newWithSize(src->nDimension);
THLongStorage_rawCopy(newSize,src->size);
newSize->data[dim] = numel;
THZTensor_(resize)(tensor,newSize,NULL);
THLongStorage_free(newSize);
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (src->nDimension > 1)
{
tSlice = THZTensor_(new)();
sSlice = THZTensor_(new)();
THZTensor_(select)(tSlice, tensor, dim, i);
THZTensor_(select)(sSlice, src, dim, index_data[i]-1);
THZTensor_(copy)(tSlice, sSlice);
THZTensor_(free)(tSlice);
THZTensor_(free)(sSlice);
}
else
{
THZTensor_(set1d)(tensor,i,THZTensor_(get1d)(src,index_data[i]-1));
}
}
THLongTensor_free(index);
}
void THZTensor_(indexCopy)(THZTensor *tensor, int dim, THLongTensor *index, THZTensor *src)
{
long i, numel;
THZTensor *tSlice, *sSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < src->nDimension,4,"Indexing dim is out of bounds");
THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (tensor->nDimension > 1 )
{
tSlice = THZTensor_(new)();
sSlice = THZTensor_(new)();
THZTensor_(select)(tSlice, tensor, dim, index_data[i]-1);
THZTensor_(select)(sSlice, src, dim, i);
THZTensor_(copy)(tSlice, sSlice);
THZTensor_(free)(tSlice);
THZTensor_(free)(sSlice);
}
else
{
THZTensor_(set1d)(tensor,index_data[i]-1,THZTensor_(get1d)(src,i));
}
}
THLongTensor_free(index);
}
void THZTensor_(indexFill)(THZTensor *tensor, int dim, THLongTensor *index, real val)
{
long i, numel;
THZTensor *tSlice;
long *index_data;
numel = THLongTensor_nElement(index);
THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector");
THArgCheck(dim < tensor->nDimension,4,"Indexing dim is out of bounds");
index = THLongTensor_newContiguous(index);
index_data = THLongTensor_data(index);
for (i=0; i<numel; i++)
{
if (tensor->nDimension > 1 )
{
tSlice = THZTensor_(new)();
THZTensor_(select)(tSlice, tensor,dim,index_data[i]-1);
THZTensor_(fill)(tSlice, val);
THZTensor_(free)(tSlice);
}
else
{
THZTensor_(set1d)(tensor,index_data[i]-1,val);
}
}
THLongTensor_free(index);
}
accreal THZTensor_(dot)(THZTensor *tensor, THZTensor *src)
{
accreal sum = 0;
/* we use a trick here. careful with that. */
TH_TENSOR_APPLY2(real, tensor, real, src,
long sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i);
sum += THZBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride);
tensor_i += sz;
src_i += sz;
tensor_data += sz*tensor_stride;
src_data += sz*src_stride;
break;);
return sum;
}
real THZTensor_(minall)(THZTensor *tensor)
{
real theMin;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMin = THZTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor, if(CABS(*tensor_data) < CABS(theMin)) theMin = *tensor_data;);
return theMin;
}
real THZTensor_(maxall)(THZTensor *tensor)
{
real theMax;
THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension");
theMax = THZTensor_(data)(tensor)[0];
TH_TENSOR_APPLY(real, tensor, if(CABS(*tensor_data) > CABS(theMax)) theMax = *tensor_data;);
return theMax;
}
accreal THZTensor_(sumall)(THZTensor *tensor)
{
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;);
return sum;
}
void THZTensor_(add)(THZTensor *r_, THZTensor *t, real value)
{
THZTensor_(resizeAs)(r_, t);
if (THZTensor_(isContiguous)(r_) && THZTensor_(isContiguous)(t) && THZTensor_(nElement)(r_) == THZTensor_(nElement)(t)) {
real *tp = THZTensor_(data)(t);
real *rp = THZTensor_(data)(r_);
long sz = THZTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > THZ_OMP_OVERHEAD_THZRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] + value;
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;);
}
}
void THZTensor_(mul)(THZTensor *r_, THZTensor *t, real value)
{
THZTensor_(resizeAs)(r_, t);
if (THZTensor_(isContiguous)(r_) && THZTensor_(isContiguous)(t) && THZTensor_(nElement)(r_) == THZTensor_(nElement)(t)) {
real *tp = THZTensor_(data)(t);
real *rp = THZTensor_(data)(r_);
long sz = THZTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > THZ_OMP_OVERHEAD_THZRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] * value;
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;);
}
}
void THZTensor_(div)(THZTensor *r_, THZTensor *t, real value)
{
THZTensor_(resizeAs)(r_, t);
if (THZTensor_(isContiguous)(r_) && THZTensor_(isContiguous)(t) && THZTensor_(nElement)(r_) == THZTensor_(nElement)(t)) {
real *tp = THZTensor_(data)(t);
real *rp = THZTensor_(data)(r_);
long sz = THZTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > THZ_OMP_OVERHEAD_THZRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] / value;
} else {
TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;);
}
}
void THZTensor_(cadd)(THZTensor *r_, THZTensor *t, real value, THZTensor *src)
{
THZTensor_(resizeAs)(r_, t);
if (THZTensor_(isContiguous)(r_) && THZTensor_(isContiguous)(t) && THZTensor_(isContiguous)(src) && THZTensor_(nElement)(r_) == THZTensor_(nElement)(src)) {
if(r_ == t) {
THZBlas_(axpy)(THZTensor_(nElement)(t), value, THZTensor_(data)(src), 1, THZTensor_(data)(r_), 1);
} else {
real *tp = THZTensor_(data)(t);
real *sp = THZTensor_(data)(src);
real *rp = THZTensor_(data)(r_);
long sz = THZTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > THZ_OMP_OVERHEAD_THZRESHOLD) private(i)
for (i=0; i< sz; i++)
rp[i] = tp[i] + value * sp[i];
}
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;);
}
}
THZ_API void THZTensor_(polar)(THZTensor *r_, THTensor *abso, THTensor *angle)
{
if (THZTensor_(isContiguous)(r_) && THTensor_(isContiguous)(abso) && THTensor_(isContiguous)(angle) && THZTensor_(nElement)(r_) == THTensor_(nElement)(angle)) {
realscalar *tp = THTensor_(data)(abso);
realscalar *sp = THTensor_(data)(angle);
real *rp = THZTensor_(data)(r_);
long sz = THTensor_(nElement)(abso);
long i;
#pragma omp parallel for if(sz > THZ_OMP_OVERHEAD_THZRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] * cos(sp[i]) + tp[i] * sin(sp[i]) * I;
} else {
TH_TENSOR_APPLY3(real, r_, realscalar, abso, realscalar, angle, *r__data = *abso_data * cos(*angle_data) + *abso_data * sin(*angle_data) * I;);
}
//*out = ccx(*abs*cos(*arg),*abs*sin(*arg));
}
void THZTensor_(cmul)(THZTensor *r_, THZTensor *t, THZTensor *src)
{
THZTensor_(resizeAs)(r_, t);
if (THZTensor_(isContiguous)(r_) && THZTensor_(isContiguous)(t) && THZTensor_(isContiguous)(src) && THZTensor_(nElement)(r_) == THZTensor_(nElement)(src)) {
real *tp = THZTensor_(data)(t);
real *sp = THZTensor_(data)(src);
real *rp = THZTensor_(data)(r_);
long sz = THZTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > THZ_OMP_OVERHEAD_THZRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] * sp[i];
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;);
}
}
void THZTensor_(cdiv)(THZTensor *r_, THZTensor *t, THZTensor *src)
{
THZTensor_(resizeAs)(r_, t);
if (THZTensor_(isContiguous)(r_) && THZTensor_(isContiguous)(t) && THZTensor_(isContiguous)(src) && THZTensor_(nElement)(r_) == THZTensor_(nElement)(src)) {
real *tp = THZTensor_(data)(t);
real *sp = THZTensor_(data)(src);
real *rp = THZTensor_(data)(r_);
long sz = THZTensor_(nElement)(t);
long i;
#pragma omp parallel for if(sz > THZ_OMP_OVERHEAD_THZRESHOLD) private(i)
for (i=0; i<sz; i++)
rp[i] = tp[i] / sp[i];
} else {
TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / *src_data;);
}
}
void THZTensor_(addcmul)(THZTensor *r_, THZTensor *t, real value, THZTensor *src1, THZTensor *src2)
{
if(r_ != t)
{
THZTensor_(resizeAs)(r_, t);
THZTensor_(copy)(r_, t);
}
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;);
}
void THZTensor_(addcdiv)(THZTensor *r_, THZTensor *t, real value, THZTensor *src1, THZTensor *src2)
{
if(r_ != t)
{
THZTensor_(resizeAs)(r_, t);
THZTensor_(copy)(r_, t);
}
TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;);
}
void THZTensor_(addmv)(THZTensor *r_, real beta, THZTensor *t, real alpha, THZTensor *mat, THZTensor *vec)
{
if( (mat->nDimension != 2) || (vec->nDimension != 1) )
THError("matrix and vector expected");
if( mat->size[1] != vec->size[0] )
THError("size mismatch");
if(t->nDimension != 1)
THError("size mismatch");
if(t->size[0] != mat->size[0])
THError("size mismatch");
if(r_ != t)
{
THZTensor_(resizeAs)(r_, t);
THZTensor_(copy)(r_, t);
}
if(mat->stride[0] == 1)
{
THZBlas_(gemv)('n', mat->size[0], mat->size[1],
alpha, THZTensor_(data)(mat), mat->stride[1],
THZTensor_(data)(vec), vec->stride[0],
beta, THZTensor_(data)(r_), r_->stride[0]);
}
else if(mat->stride[1] == 1)
{
THZBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THZTensor_(data)(mat), mat->stride[0],
THZTensor_(data)(vec), vec->stride[0],
beta, THZTensor_(data)(r_), r_->stride[0]);
}
else
{
THZTensor *cmat = THZTensor_(newContiguous)(mat);
THZBlas_(gemv)('t', mat->size[1], mat->size[0],
alpha, THZTensor_(data)(cmat), cmat->stride[0],
THZTensor_(data)(vec), vec->stride[0],
beta, THZTensor_(data)(r_), r_->stride[0]);
THZTensor_(free)(cmat);
}
}
void THZTensor_(addmm)(THZTensor *r_, real beta, THZTensor *t, real alpha, THZTensor *m1, THZTensor *m2)
{
char transpose_r, transpose_m1, transpose_m2;
THZTensor *r__, *m1_, *m2_;
if( (m1->nDimension != 2) || (m2->nDimension != 2) )
THError("matrix and matrix expected");
if(t->nDimension != 2)
THError("size mismatch");
if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) || (m1->size[1] != m2->size[0]) )
THError("size mismatch");
if(t != r_)
{
THZTensor_(resizeAs)(r_, t);
THZTensor_(copy)(r_, t);
}
/* printf("%ldx%ld = %ldx%ld X %ldx%ld\n", r_->size[0], r_->size[1], m1->size[0], m1->size[1], m2->size[0], m2->size[1]); */
/* r_ */
if(r_->stride[0] == 1)
{
transpose_r = 'n';
r__ = r_;
}
else if(r_->stride[1] == 1)
{
THZTensor *swap = m2;
m2 = m1;
m1 = swap;
transpose_r = 't';
r__ = r_;
}
else
{
transpose_r = 'n';
r__ = THZTensor_(newWithSize2d)(r_->size[1], r_->size[0]);
THZTensor_(copy)(r__, r_);
THZTensor_(transpose)(r__, NULL, 0, 1);
}
/* m1 */
if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1)
{
transpose_m1 = 'n';
m1_ = m1;
}
else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1)
{
transpose_m1 = 't';
m1_ = m1;
}
else
{
transpose_m1 = (transpose_r == 'n' ? 't' : 'n');
m1_ = THZTensor_(newContiguous)(m1);
}
/* m2 */
if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1)
{
transpose_m2 = 'n';
m2_ = m2;
}
else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1)
{
transpose_m2 = 't';
m2_ = m2;
}
else
{
transpose_m2 = (transpose_r == 'n' ? 't' : 'n');
m2_ = THZTensor_(newContiguous)(m2);
}
/* do the operation */
THZBlas_(gemm)(transpose_m1,
transpose_m2,
r__->size[(transpose_r == 'n' ? 0 : 1)],
r__->size[(transpose_r == 'n' ? 1 : 0)],
m1_->size[(transpose_r == 'n' ? 1 : 0)],
alpha,
THZTensor_(data)(m1_),
(transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]),
THZTensor_(data)(m2_),
(transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]),
beta,
THZTensor_(data)(r__),
r__->stride[(transpose_r == 'n' ? 1 : 0)]);
/* free intermediate variables */
if(m1_ != m1)
THZTensor_(free)(m1_);
if(m2_ != m2)
THZTensor_(free)(m2_);
if(r__ != r_)
THZTensor_(freeCopyTo)(r__, r_);
}
void THZTensor_(addr)(THZTensor *r_, real beta, THZTensor *t, real alpha, THZTensor *vec1, THZTensor *vec2)
{
if( (vec1->nDimension != 1) || (vec2->nDimension != 1) )
THError("vector and vector expected");
if(t->nDimension != 2)
THError("size mismatch");
if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) )
THError("size mismatch");
if(r_ != t)
{
THZTensor_(resizeAs)(r_, t);
THZTensor_(copy)(r_, t);
}
if(beta != 1)
THZTensor_(mul)(r_, r_, beta);
if(r_->stride[0] == 1)
{
THZBlas_(gerc)(vec1->size[0], vec2->size[0],
alpha, THZTensor_(data)(vec1), vec1->stride[0],
THZTensor_(data)(vec2), vec2->stride[0],
THZTensor_(data)(r_), r_->stride[1]);
}
else
{
THZTensor *cr = r_;
if(r_->stride[1] != 1)
cr = THZTensor_(newClone)(r_);
THZTensor *cvec2 = THZTensor_(new)();
THZTensor_(conj)(cvec2, vec2);
THZBlas_(geru)(cvec2->size[0], vec1->size[0],
alpha, THZTensor_(data)(cvec2), cvec2->stride[0],
THZTensor_(data)(vec1), vec1->stride[0],
THZTensor_(data)(cr), cr->stride[0]);
THZTensor_(free)(cvec2);
if (cr != r_)
THZTensor_(freeCopyTo)(cr, r_);
}
}
void THZTensor_(addru)(THZTensor *r_, real beta, THZTensor *t, real alpha, THZTensor *vec1, THZTensor *vec2)
{
if( (vec1->nDimension != 1) || (vec2->nDimension != 1) )
THError("vector and vector expected");
if(t->nDimension != 2)
THError("size mismatch");
if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) )
THError("size mismatch");
if(r_ != t)
{
THZTensor_(resizeAs)(r_, t);
THZTensor_(copy)(r_, t);
}
if(beta != 1)
THZTensor_(mul)(r_, r_, beta);
if(r_->stride[0] == 1)
{
THZBlas_(geru)(vec1->size[0], vec2->size[0],
alpha, THZTensor_(data)(vec1), vec1->stride[0],
THZTensor_(data)(vec2), vec2->stride[0],
THZTensor_(data)(r_), r_->stride[1]);
}
else if(r_->stride[0] == 1)
{
THZBlas_(geru)(vec2->size[0], vec1->size[0],
alpha, THZTensor_(data)(vec2), vec2->stride[0],
THZTensor_(data)(vec1), vec1->stride[0],
THZTensor_(data)(r_), r_->stride[0]);
}
else
{
THZTensor *cr = THZTensor_(newClone)(r_);
THZBlas_(geru)(vec2->size[0], vec1->size[0],
alpha, THZTensor_(data)(vec2), vec2->stride[0],
THZTensor_(data)(vec1), vec1->stride[0],
THZTensor_(data)(cr), cr->stride[0]);
THZTensor_(freeCopyTo)(cr, r_);
}
}
long THZTensor_(numel)(THZTensor *t)
{
return THZTensor_(nElement)(t);
}
void THZTensor_(max)(THZTensor *values_, THLongTensor *indices_, THZTensor *t, int dimension)
{
THLongStorage *dim;
long i;
THArgCheck(dimension >= 0 && dimension < THZTensor_(nDimension)(t), 2, "dimension out of range");
dim = THZTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THZTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
long theIndex = 0;
real theMax = t_data[0];
for(i = 1; i < t_size; i++)
{
if(CABS(t_data[i*t_stride]) > CABS(theMax))
{
theIndex = i;
theMax = t_data[i*t_stride];
}
}
*indices__data = theIndex;
*values__data = theMax;);
}
void THZTensor_(min)(THZTensor *values_, THLongTensor *indices_, THZTensor *t, int dimension)
{
THLongStorage *dim;
long i;
THArgCheck(dimension >= 0 && dimension < THZTensor_(nDimension)(t), 2, "dimension out of range");
dim = THZTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THZTensor_(resize)(values_, dim, NULL);
THLongTensor_resize(indices_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension,
long theIndex = 0;
real theMin = t_data[0];
for(i = 1; i < t_size; i++)
{
if(CABS(t_data[i*t_stride]) < CABS(theMin))
{
theIndex = i;
theMin = t_data[i*t_stride];
}
}
*indices__data = theIndex;
*values__data = theMin;);
}
void THZTensor_(sum)(THZTensor *r_, THZTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THZTensor_(nDimension)(t), 2, "dimension out of range");
dim = THZTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THZTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride];
*r__data = (real)sum;);
}
void THZTensor_(prod)(THZTensor *r_, THZTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THZTensor_(nDimension)(t), 2, "dimension out of range");
dim = THZTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THZTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal prod = 1;
long i;
for(i = 0; i < t_size; i++)
prod *= t_data[i*t_stride];
*r__data = (real)prod;);
}
void THZTensor_(cumsum)(THZTensor *r_, THZTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THZTensor_(nDimension)(t), 2, "dimension out of range");
THZTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumsum = 0;
long i;
for(i = 0; i < t_size; i++)
{
cumsum += t_data[i*t_stride];
r__data[i*r__stride] = (real)cumsum;
});
}
void THZTensor_(cumprod)(THZTensor *r_, THZTensor *t, int dimension)
{
THArgCheck(dimension >= 0 && dimension < THZTensor_(nDimension)(t), 2, "dimension out of range");
THZTensor_(resizeAs)(r_, t);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal cumprod = 1;
long i;
for(i = 0; i < t_size; i++)
{
cumprod *= t_data[i*t_stride];
r__data[i*r__stride] = (real)cumprod;
});
}
accreal THZTensor_(trace)(THZTensor *t)
{
real *t_data = THZTensor_(data)(t);
accreal sum = 0;
long i = 0;
long t_stride_0, t_stride_1, t_diag_size;
THArgCheck(THZTensor_(nDimension)(t) == 2, 1, "not a matrix");
t_stride_0 = THZTensor_(stride)(t, 0);
t_stride_1 = THZTensor_(stride)(t, 1);
t_diag_size = THMin(THZTensor_(size)(t, 0), THZTensor_(size)(t, 1));
while(i < t_diag_size)
{
sum += t_data[i*(t_stride_0+t_stride_1)];
i++;
}
return sum;
}
void THZTensor_(cross)(THZTensor *r_, THZTensor *a, THZTensor *b, int dimension)
{
int i;
if(THZTensor_(nDimension)(a) != THZTensor_(nDimension)(b))
THError("inconsitent tensor sizes");
for(i = 0; i < THZTensor_(nDimension)(a); i++)
{
if(THZTensor_(size)(a, i) != THZTensor_(size)(b, i))
THError("inconsistent tensor sizes");
}
if(dimension < 0)
{
for(i = 0; i < THZTensor_(nDimension)(a); i++)
{
if(THZTensor_(size)(a, i) == 3)
{
dimension = i;
break;
}
}
if(dimension < 0)
THError("no dimension of size 3");
}
THArgCheck(dimension >= 0 && dimension < THZTensor_(nDimension)(a), 3, "dimension out of range");
THArgCheck(THZTensor_(size)(a, dimension) == 3, 3, "dimension size is not 3");
THZTensor_(resizeAs)(r_, a);
TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension,
r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride];
r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride];
r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];);
}
void THZTensor_(zeros)(THZTensor *r_, THLongStorage *size)
{
THZTensor_(resize)(r_, size, NULL);
THZTensor_(zero)(r_);
}
void THZTensor_(ones)(THZTensor *r_, THLongStorage *size)
{
THZTensor_(resize)(r_, size, NULL);
THZTensor_(fill)(r_, 1);
}
void THZTensor_(diag)(THZTensor *r_, THZTensor *t, int k)
{
THArgCheck(THZTensor_(nDimension)(t) == 1 || THZTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected");
if(THZTensor_(nDimension)(t) == 1)
{
real *t_data = THZTensor_(data)(t);
long t_stride_0 = THZTensor_(stride)(t, 0);
long t_size = THZTensor_(size)(t, 0);
long sz = t_size + (k >= 0 ? k : -k);
real *r__data;
long r__stride_0;
long r__stride_1;
long i;
THZTensor_(resize2d)(r_, sz, sz);
THZTensor_(zero)(r_);
r__data = THZTensor_(data)(r_);
r__stride_0 = THZTensor_(stride)(r_, 0);
r__stride_1 = THZTensor_(stride)(r_, 1);
r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0);
for(i = 0; i < t_size; i++)
r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0];
}
else
{
real *t_data = THZTensor_(data)(t);
long t_stride_0 = THZTensor_(stride)(t, 0);
long t_stride_1 = THZTensor_(stride)(t, 1);
long sz;
real *r__data;
long r__stride_0;
long i;
if(k >= 0)
sz = THMin(THZTensor_(size)(t, 0), THZTensor_(size)(t, 1)-k);
else
sz = THMin(THZTensor_(size)(t, 0)+k, THZTensor_(size)(t, 1));
THZTensor_(resize1d)(r_, sz);
r__data = THZTensor_(data)(r_);
r__stride_0 = THZTensor_(stride)(r_, 0);
t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0);
for(i = 0; i < sz; i++)
r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)];
}
}
void THZTensor_(eye)(THZTensor *r_, long n, long m)
{
real *r__data;
long i, sz;
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THZTensor_(resize2d)(r_, n, m);
THZTensor_(zero)(r_);
i = 0;
r__data = THZTensor_(data)(r_);
sz = THMin(THZTensor_(size)(r_, 0), THZTensor_(size)(r_, 1));
for(i = 0; i < sz; i++)
r__data[i*(r_->stride[0]+r_->stride[1])] = 1;
}
void THZTensor_(reshape)(THZTensor *r_, THZTensor *t, THLongStorage *size)
{
THZTensor_(resize)(r_, size, NULL);
THZTensor_(copy)(r_, t);
}
/* I cut and pasted (slightly adapted) the quicksort code from
http://www.alienryderflex.com/quicksort/
This public-domain C implementation by Darel Rex Finley.
Thanks man :)
Updated Oct 16 2013: change choice of pivot to avoid worst-case being a pre-sorted input - Daniel and Julien
Updated Oct 24 2013: change pivot comparison to strict inequality to avoid worst-case on constant input, see Sedgewick, Algorithms in C, Addison Wesley, 1990, p. 120 - Julien
*/
#define MAX_LEVELS 300
static void THZTensor_(quicksortascend)(real *arr, long *idx, long elements, long stride)
{
long beg[MAX_LEVELS], end[MAX_LEVELS], i=0, L, R, P, swap, pid;
real rswap, piv;
beg[0]=0; end[0]=elements;
while (i>=0) {
L=beg[i]; R=end[i]-1;
if (L<R) {
P=(L+R)>>1; /* Choose pivot as middle element of the current block */
piv=arr[P*stride];
pid=idx[P*stride];
rswap=arr[L*stride];
swap=idx[L*stride];
arr[L*stride]=piv;
idx[L*stride]=pid;
arr[P*stride]=rswap;
idx[P*stride]=swap;
while (L<R) {
while (CABS(arr[R*stride])>CABS(piv) && L<R)
R--;
if (L<R) {
idx[L*stride]=idx[R*stride];
arr[L*stride]=arr[R*stride];
L++;
}
while (CABS(arr[L*stride])<CABS(piv) && L<R)
L++;
if (L<R) {
idx[R*stride]=idx[L*stride];
arr[R*stride]=arr[L*stride];
R--;
}
}
idx[L*stride]=pid;
arr[L*stride]=piv;
beg[i+1]=L+1;
end[i+1]=end[i];
end[i++]=L;
if (end[i]-beg[i]>end[i-1]-beg[i-1]) {
swap=beg[i]; beg[i]=beg[i-1]; beg[i-1]=swap;
swap=end[i]; end[i]=end[i-1]; end[i-1]=swap;
}
}
else {
i--;
}
}
}
static void THZTensor_(quicksortdescend)(real *arr, long *idx, long elements, long stride)
{
long beg[MAX_LEVELS], end[MAX_LEVELS], i=0, L, R, P, swap, pid;
real rswap, piv;
beg[0]=0; end[0]=elements;
while (i>=0) {
L=beg[i]; R=end[i]-1;
if (L<R) {
P=(L+R)>>1; /* Choose pivot as middle element of the current block */
piv=arr[P*stride];
pid=idx[P*stride];
rswap=arr[L*stride];
swap=idx[L*stride];
arr[L*stride]=piv;
idx[L*stride]=pid;
arr[P*stride]=rswap;
idx[P*stride]=swap;
while (L<R) {
while (CABS(arr[R*stride])<CABS(piv) && L<R)
R--;
if (L<R) {
idx[L*stride]=idx[R*stride];
arr[L*stride]=arr[R*stride];
L++;
}
while (CABS(arr[L*stride])>CABS(piv) && L<R)
L++;
if (L<R) {
idx[R*stride]=idx[L*stride];
arr[R*stride]=arr[L*stride];
R--;
}
}
idx[L*stride]=pid;
arr[L*stride]=piv;
beg[i+1]=L+1;
end[i+1]=end[i];
end[i++]=L;
if (end[i]-beg[i]>end[i-1]-beg[i-1]) {
swap=beg[i]; beg[i]=beg[i-1]; beg[i-1]=swap;
swap=end[i]; end[i]=end[i-1]; end[i-1]=swap;
}
}
else {
i--;
}
}
}
void THZTensor_(sort)(THZTensor *rt_, THLongTensor *ri_, THZTensor *t, int dimension, int descendingOrder)
{
THArgCheck(dimension >= 0 && dimension < THZTensor_(nDimension)(t), 2, "invalid dimension");
THZTensor_(resizeAs)(rt_, t);
THZTensor_(copy)(rt_, t);
{
THLongStorage *size = THZTensor_(newSizeOf)(t);
THLongTensor_resize(ri_, size, NULL);
THLongStorage_free(size);
}
if(descendingOrder)
{
TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension,
long i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THZTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);)
}
else
{
TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension,
long i;
for(i = 0; i < ri__size; i++)
ri__data[i*ri__stride] = i;
THZTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);)
}
}
void THZTensor_(tril)(THZTensor *r_, THZTensor *t, long k)
{
long t_size_0, t_size_1;
long t_stride_0, t_stride_1;
long r__stride_0, r__stride_1;
real *t_data, *r__data;
long r, c;
THArgCheck(THZTensor_(nDimension)(t) == 2, 1, "not a matrix");
THZTensor_(resizeAs)(r_, t);
t_size_0 = THZTensor_(size)(t, 0);
t_size_1 = THZTensor_(size)(t, 1);
t_stride_0 = THZTensor_(stride)(t, 0);
t_stride_1 = THZTensor_(stride)(t, 1);
r__stride_0 = THZTensor_(stride)(r_, 0);
r__stride_1 = THZTensor_(stride)(r_, 1);
r__data = THZTensor_(data)(r_);
t_data = THZTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
long sz = THMin(r+k+1, t_size_1);
for(c = THMax(0, r+k); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
}
}
void THZTensor_(triu)(THZTensor *r_, THZTensor *t, long k)
{
long t_size_0, t_size_1;
long t_stride_0, t_stride_1;
long r__stride_0, r__stride_1;
real *t_data, *r__data;
long r, c;
THArgCheck(THZTensor_(nDimension)(t) == 2, 1, "not a matrix");
THZTensor_(resizeAs)(r_, t);
t_size_0 = THZTensor_(size)(t, 0);
t_size_1 = THZTensor_(size)(t, 1);
t_stride_0 = THZTensor_(stride)(t, 0);
t_stride_1 = THZTensor_(stride)(t, 1);
r__stride_0 = THZTensor_(stride)(r_, 0);
r__stride_1 = THZTensor_(stride)(r_, 1);
r__data = THZTensor_(data)(r_);
t_data = THZTensor_(data)(t);
for(r = 0; r < t_size_0; r++)
{
long sz = THMin(r+k, t_size_1);
for(c = THMax(0, r+k); c < t_size_1; c++)
r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1];
for(c = 0; c < sz; c++)
r__data[r*r__stride_0+c*r__stride_1] = 0;
}
}
void THZTensor_(cat)(THZTensor *r_, THZTensor *ta, THZTensor *tb, int dimension)
{
THLongStorage *size;
int i;
int ndim = THMax(ta->nDimension, tb->nDimension);
ndim = THMax(ndim, dimension+1);
THArgCheck(dimension >= 0, 4, "invalid dimension");
size = THLongStorage_newWithSize(ndim);
for(i = 0; i < ndim; i++)
{
int tadi = (i < ta->nDimension ? ta->size[i] : 1);
int tbdi = (i < tb->nDimension ? tb->size[i] : 1);
if(i == dimension)
size->data[i] = tadi+tbdi;
else
{
if(tadi != tbdi)
{
THLongStorage_free(size);
THError("inconsistent tensor sizes");
}
size->data[i] = tadi;
}
}
THZTensor_(resize)(r_, size, NULL);
THLongStorage_free(size);
{
THZTensor *nta = THZTensor_(newWithTensor)(r_);
THZTensor_(narrow)(nta, NULL, dimension, 0, (dimension < ta->nDimension ? ta->size[dimension] : 1));
THZTensor_(copy)(nta, ta);
THZTensor_(free)(nta);
}
{
THZTensor *ntb = THZTensor_(newWithTensor)(r_);
THZTensor_(narrow)(ntb, NULL, dimension, (dimension < ta->nDimension ? ta->size[dimension] : 1), (dimension < tb->nDimension ? tb->size[dimension] : 1));
THZTensor_(copy)(ntb, tb);
THZTensor_(free)(ntb);
}
}
#define TENSOR_IMPLEMENT_LOGICAL(NAME,OP) \
void THZTensor_(NAME##Value)(THByteTensor *r_, THZTensor* t, real value) \
{ \
THLongStorage *tsz = THZTensor_(newSizeOf)(t); \
THByteTensor_resize(r_, tsz, NULL); \
THLongStorage_free(tsz); \
THByteTensor_zero(r_); \
TH_TENSOR_APPLY2(unsigned char, r_, real, t, \
if (CABS(*t_data) OP CABS(value)) *r__data = 1;); \
} \
void THZTensor_(NAME##Tensor)(THByteTensor *r_, THZTensor *ta, THZTensor *tb) \
{ \
THLongStorage *tsz = THZTensor_(newSizeOf)(ta); \
THByteTensor_resize(r_, tsz, NULL); \
THLongStorage_free(tsz); \
THByteTensor_zero(r_); \
TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \
if(CABS(*ta_data) OP CABS(*tb_data)) *r__data = 1;); \
} \
TENSOR_IMPLEMENT_LOGICAL(lt,<)
TENSOR_IMPLEMENT_LOGICAL(gt,>)
TENSOR_IMPLEMENT_LOGICAL(le,<=)
TENSOR_IMPLEMENT_LOGICAL(ge,>=)
TENSOR_IMPLEMENT_LOGICAL(eq,==)
TENSOR_IMPLEMENT_LOGICAL(ne,!=)
#define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \
void THZTensor_(NAME)(THZTensor *r_, THZTensor *t) \
{ \
THZTensor_(resizeAs)(r_, t); \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \
} \
#define LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(NAME, CFUNC) \
void THZTensor_(NAME)(THZTensor *r_, THZTensor *t, real value) \
{ \
THZTensor_(resizeAs)(r_, t); \
TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data, value);); \
} \
LAB_IMPLEMENT_BASIC_FUNCTION(log,CLOG)
LAB_IMPLEMENT_BASIC_FUNCTION(exp,CEXP)
LAB_IMPLEMENT_BASIC_FUNCTION(cos,CCOS)
LAB_IMPLEMENT_BASIC_FUNCTION(acos,CACOS)
LAB_IMPLEMENT_BASIC_FUNCTION(cosh,CACOSH)
LAB_IMPLEMENT_BASIC_FUNCTION(sin,CSIN)
LAB_IMPLEMENT_BASIC_FUNCTION(asin,CASIN)
LAB_IMPLEMENT_BASIC_FUNCTION(sinh,CSINH)
LAB_IMPLEMENT_BASIC_FUNCTION(tan,CTAN)
LAB_IMPLEMENT_BASIC_FUNCTION(atan,CATAN)
LAB_IMPLEMENT_BASIC_FUNCTION(tanh,CTANH)
LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(pow,CPOW)
LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,CSQRT)
LAB_IMPLEMENT_BASIC_FUNCTION(conj,CONJ)
LAB_IMPLEMENT_BASIC_FUNCTION(proj,CPROJ)
LAB_IMPLEMENT_BASIC_FUNCTION(zabs,CABS)
LAB_IMPLEMENT_BASIC_FUNCTION(zarg,CARG)
LAB_IMPLEMENT_BASIC_FUNCTION(zre,CREAL)
LAB_IMPLEMENT_BASIC_FUNCTION(zim,CIMAG)
#define LAB_IMPLEMENT_BASIC_FUNCTION_RETURN_FLOAT(NAME, CFUNC) \
void THZTensor_(NAME)(THFloatTensor *r, THZTensor *t) \
{ \
THLongStorage *tsz = THZTensor_(newSizeOf)(t); \
THFloatTensor_resize(r, tsz, NULL); \
THLongStorage_free(tsz); \
TH_TENSOR_APPLY2(real, t, float, r, *r_data = CFUNC(*t_data);); \
}
#define LAB_IMPLEMENT_BASIC_FUNCTION_RETURN_DOUBLE(NAME, CFUNC) \
void THZTensor_(NAME)(THDoubleTensor *r, THZTensor *t) \
{ \
THLongStorage *tsz = THZTensor_(newSizeOf)(t); \
THDoubleTensor_resize(r, tsz, NULL); \
THLongStorage_free(tsz); \
TH_TENSOR_APPLY2(real, t, double, r, *r_data = CFUNC(*t_data);); \
}
LAB_IMPLEMENT_BASIC_FUNCTION_RETURN_FLOAT(Float_abs,CABS)
LAB_IMPLEMENT_BASIC_FUNCTION_RETURN_FLOAT(Float_arg,CARG)
LAB_IMPLEMENT_BASIC_FUNCTION_RETURN_FLOAT(Float_re,CREAL)
LAB_IMPLEMENT_BASIC_FUNCTION_RETURN_FLOAT(Float_im,CIMAG)
LAB_IMPLEMENT_BASIC_FUNCTION_RETURN_DOUBLE(Double_abs,CABS)
LAB_IMPLEMENT_BASIC_FUNCTION_RETURN_DOUBLE(Double_arg,CARG)
LAB_IMPLEMENT_BASIC_FUNCTION_RETURN_DOUBLE(Double_re,CREAL)
LAB_IMPLEMENT_BASIC_FUNCTION_RETURN_DOUBLE(Double_im,CIMAG)
void THZTensor_(mean)(THZTensor *r_, THZTensor *t, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THZTensor_(nDimension)(t), 2, "invalid dimension");
dim = THZTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THZTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride];
*r__data = (real)sum/t_size;);
}
void THZTensor_(std)(THZTensor *r_, THZTensor *t, int dimension, int flag)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THZTensor_(nDimension)(t), 3, "invalid dimension");
dim = THZTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THZTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
accreal sum2 = 0;
long i;
for(i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
sum += z;
sum2 += z*z;
}
if(flag)
{
sum /= t_size;
sum2 /= t_size;
sum2 -= sum*sum;
sum2 = (cabs(sum2) < 0 ? 0 : sum2);
*r__data = (real)csqrt(sum2);
}
else
{
sum /= t_size;
sum2 /= t_size-1;
sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum;
sum2 = (cabs(sum2) < 0 ? 0 : sum2);
*r__data = (real)csqrt(sum2);
});
}
void THZTensor_(var)(THZTensor *r_, THZTensor *t, int dimension, int flag)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THZTensor_(nDimension)(t), 3, "invalid dimension");
dim = THZTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THZTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
accreal sum2 = 0;
long i;
for(i = 0; i < t_size; i++)
{
real z = t_data[i*t_stride];
sum += z;
sum2 += z*z;
}
if(flag)
{
sum /= t_size;
sum2 /= t_size;
sum2 -= sum*sum;
sum2 = (cabs(sum2) < 0 ? 0 : sum2);
*r__data = sum2;
}
else
{
sum /= t_size;
sum2 /= t_size-1;
sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum;
sum2 = (cabs(sum2) < 0 ? 0 : sum2);
*r__data = (real)sum2;
});
}
void THZTensor_(norm)(THZTensor *r_, THZTensor *t, real value, int dimension)
{
THLongStorage *dim;
THArgCheck(dimension >= 0 && dimension < THZTensor_(nDimension)(t), 3, "invalid dimension");
dim = THZTensor_(newSizeOf)(t);
THLongStorage_set(dim, dimension, 1);
THZTensor_(resize)(r_, dim, NULL);
THLongStorage_free(dim);
if(value == 0) {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += t_data[i*t_stride] != 0.0;
*r__data = sum;)
} else {
TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension,
accreal sum = 0;
long i;
for(i = 0; i < t_size; i++)
sum += pow(CABS(t_data[i*t_stride]), value);
*r__data = (real)cpow(sum, 1.0/value);)
}
}
accreal THZTensor_(normall)(THZTensor *tensor, real value)
{
accreal sum = 0;
if(value == 0) {
TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;);
return sum;
} else if(value == 1) {
TH_TENSOR_APPLY(real, tensor, sum += CABS(*tensor_data););
return sum;
} else if(value == 2) {
TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += z*z;);
return csqrt(sum);
} else {
TH_TENSOR_APPLY(real, tensor, sum += pow(CABS(*tensor_data), value););
return cpow(sum, 1.0/value);
}
}
void THZTensor_(renorm)(THZTensor *res, THZTensor *src, real value, int dimension, real maxnorm)
{
int i;
THZTensor *rowR, *rowS;
THArgCheck(dimension >= 0 && dimension < THZTensor_(nDimension)(src), 3, "invalid dimension");
THArgCheck(CABS(value) > 0, 2, "non-positive-norm not supported");
THArgCheck(THZTensor_(nDimension)(src) > 1, 1, "need at least 2 dimensions");
rowR = THZTensor_(new)();
rowS = THZTensor_(new)();
THZTensor_(resizeAs)(res, src);
for (i=0; i<src->size[dimension]; i++)
{
real norm = 0;
real new_norm;
THZTensor_(select)(rowS, src, dimension, i);
THZTensor_(select)(rowR, res, dimension, i);
if (value == 1) {
TH_TENSOR_APPLY(real, rowS, norm += CABS(*rowS_data););
} else if (value == 2) {
TH_TENSOR_APPLY(real, rowS, accreal z = *rowS_data; norm += z*z;);
} else {
TH_TENSOR_APPLY(real, rowS, norm += pow(CABS(*rowS_data), value););
}
norm = CPOW(norm, 1/value);
if (CABS(norm) > CABS(maxnorm))
{
new_norm = maxnorm / (norm + 1e-7);
TH_TENSOR_APPLY2(
real, rowR, real, rowS,
*rowR_data = (*rowS_data) * new_norm;
)
}
else
THZTensor_(copy)(rowR, rowS);
}
THZTensor_(free)(rowR);
THZTensor_(free)(rowS);
}
accreal THZTensor_(dist)(THZTensor *tensor, THZTensor *src, real value)
{
real sum = 0;
TH_TENSOR_APPLY2(real, tensor, real, src,
sum += pow(CABS(*tensor_data - *src_data), value);)
return CPOW(sum, 1.0/value);
}
accreal THZTensor_(meanall)(THZTensor *tensor)
{
THArgCheck(tensor->nDimension > 0, 1, "empty Tensor");
return THZTensor_(sumall)(tensor)/THZTensor_(nElement)(tensor);
}
accreal THZTensor_(varall)(THZTensor *tensor)
{
accreal mean = THZTensor_(meanall)(tensor);
accreal sum = 0;
TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean););
sum /= (THZTensor_(nElement)(tensor)-1);
return sum;
}
accreal THZTensor_(stdall)(THZTensor *tensor)
{
return csqrt(THZTensor_(varall)(tensor));
}
#endif
|
j3d27pt.gold.h | #include <cstring>
using std::memcpy;
template <class T>
void jacobi_gold(T *fout, const T *fin, double h2inv, double a, double b, int L, int M, int N) {
double (*out)[M][N] = (double (*)[M][N]) fout;
double (*in)[M][N] = (double (*)[M][N]) fin;
auto ftemp1 = new T[L * M * N];
auto ftemp2 = new T[L * M * N];
memset(ftemp1, 0, sizeof(T)*L*M*N);
memset(ftemp2, 0, sizeof(T)*L*M*N);
double (*temp1)[M][N] = (T (*)[M][N]) ftemp1;
double (*temp2)[M][N] = (T (*)[M][N]) ftemp2;
memcpy(ftemp1, fin, sizeof(T)*L*M*N);
double c = b * h2inv;
double d = c * 0.5;
double e = c * 0.125;
double f = c * 0.3;
for (int t = 0; t < 10; t++) {
#pragma omp parallel for
for (int k = 1; k < L - 1; ++k) {
for (int j = 1; j < M - 1; ++j) {
for (int i = 1; i < N - 1; ++i) {
if (!(t%2)) {
temp2[k][j][i] = a*temp1[k][j][i] -
d*(temp1[k-1][j-1][i-1] +
temp1[k-1][j-1][i+1] +
temp1[k-1][j+1][i-1] +
temp1[k-1][j+1][i+1] +
temp1[k+1][j-1][i-1] +
temp1[k+1][j-1][i+1] +
temp1[k+1][j+1][i-1] +
temp1[k+1][j+1][i+1]) +
e*(temp1[k-1][j-1][i] +
temp1[k-1][j][i-1] +
temp1[k-1][j][i+1] +
temp1[k-1][j+1][i] +
temp1[k][j-1][i-1] +
temp1[k][j-1][i+1] +
temp1[k][j+1][i-1] +
temp1[k][j+1][i+1] +
temp1[k+1][j-1][i] +
temp1[k+1][j][i-1] +
temp1[k+1][j][i+1] +
temp1[k][j+1][i]) +
f*(temp1[k-1][j][i] +
temp1[k][j-1][i] +
temp1[k][j][i-1] +
temp1[k][j][i+1] +
temp1[k][j+1][i] +
temp1[k+1][j][i]) +
0.13*temp1[k][j][i];
} else {
temp1[k][j][i] = a*temp2[k][j][i] -
d*(temp2[k-1][j-1][i-1] +
temp2[k-1][j-1][i+1] +
temp2[k-1][j+1][i-1] +
temp2[k-1][j+1][i+1] +
temp2[k+1][j-1][i-1] +
temp2[k+1][j-1][i+1] +
temp2[k+1][j+1][i-1] +
temp2[k+1][j+1][i+1]) +
e*(temp2[k-1][j-1][i] +
temp2[k-1][j][i-1] +
temp2[k-1][j][i+1] +
temp2[k-1][j+1][i] +
temp2[k][j-1][i-1] +
temp2[k][j-1][i+1] +
temp2[k][j+1][i-1] +
temp2[k][j+1][i+1] +
temp2[k+1][j-1][i] +
temp2[k+1][j][i-1] +
temp2[k+1][j][i+1] +
temp2[k][j+1][i]) +
f*(temp2[k-1][j][i] +
temp2[k][j-1][i] +
temp2[k][j][i-1] +
temp2[k][j][i+1] +
temp2[k][j+1][i] +
temp2[k+1][j][i]) +
0.13*temp2[k][j][i];
}
}
}
}
}
memcpy(fout, ftemp1, sizeof(T)*L*M*N);
}
|
DenseMatrix.h | //=================================================================================================
/*!
// \file blaze/math/smp/openmp/DenseMatrix.h
// \brief Header file for the OpenMP-based dense matrix SMP implementation
//
// Copyright (C) 2012-2017 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
#define _BLAZE_MATH_SMP_OPENMP_DENSEMATRIX_H_
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <omp.h>
#include <blaze/math/Aliases.h>
#include <blaze/math/AlignmentFlag.h>
#include <blaze/math/constraints/SMPAssignable.h>
#include <blaze/math/expressions/DenseMatrix.h>
#include <blaze/math/expressions/SparseMatrix.h>
#include <blaze/math/functors/AddAssign.h>
#include <blaze/math/functors/Assign.h>
#include <blaze/math/functors/MultAssign.h>
#include <blaze/math/functors/SchurAssign.h>
#include <blaze/math/functors/SubAssign.h>
#include <blaze/math/simd/SIMDTrait.h>
#include <blaze/math/smp/ParallelSection.h>
#include <blaze/math/smp/SerialSection.h>
#include <blaze/math/smp/ThreadMapping.h>
#include <blaze/math/StorageOrder.h>
#include <blaze/math/typetraits/IsDenseMatrix.h>
#include <blaze/math/typetraits/IsSIMDCombinable.h>
#include <blaze/math/typetraits/IsSMPAssignable.h>
#include <blaze/math/views/Submatrix.h>
#include <blaze/system/SMP.h>
#include <blaze/util/algorithms/Min.h>
#include <blaze/util/Assert.h>
#include <blaze/util/EnableIf.h>
#include <blaze/util/FunctionTrace.h>
#include <blaze/util/mpl/And.h>
#include <blaze/util/mpl/Not.h>
#include <blaze/util/mpl/Or.h>
#include <blaze/util/StaticAssert.h>
#include <blaze/util/Types.h>
namespace blaze {
//=================================================================================================
//
// OPENMP-BASED ASSIGNMENT KERNELS
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side dense matrix to be assigned.
// \param op The (compound) assignment operation.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a dense
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side dense matrix
, bool SO2 // Storage order of the right-hand side dense matrix
, typename OP > // Type of the assignment operation
void openmpAssign( DenseMatrix<MT1,SO1>& lhs, const DenseMatrix<MT2,SO2>& rhs, OP op )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
using ET1 = ElementType_<MT1>;
using ET2 = ElementType_<MT2>;
constexpr bool simdEnabled( MT1::simdEnabled && MT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value );
constexpr size_t SIMDSIZE( SIMDTrait< ElementType_<MT1> >::size );
const bool lhsAligned( (~lhs).isAligned() );
const bool rhsAligned( (~rhs).isAligned() );
const int threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t equalShare1( (~rhs).rows() / threadmap.first + addon1 );
const size_t rest1 ( equalShare1 & ( SIMDSIZE - 1UL ) );
const size_t rowsPerThread( ( simdEnabled && rest1 )?( equalShare1 - rest1 + SIMDSIZE ):( equalShare1 ) );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t equalShare2( (~rhs).columns() / threadmap.second + addon2 );
const size_t rest2 ( equalShare2 & ( SIMDSIZE - 1UL ) );
const size_t colsPerThread( ( simdEnabled && rest2 )?( equalShare2 - rest2 + SIMDSIZE ):( equalShare2 ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~rhs).rows() - row ) );
const size_t n( min( colsPerThread, (~rhs).columns() - column ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<aligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
else if( simdEnabled && lhsAligned ) {
auto target( submatrix<aligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
else if( simdEnabled && rhsAligned ) {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<aligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
else {
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse matrix to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side sparse matrix to be assigned.
// \param op The (compound) assignment operation.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a sparse
// matrix to a dense matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side sparse matrix
, bool SO2 // Storage order of the right-hand side sparse matrix
, typename OP > // Type of the assignment operation
void openmpAssign( DenseMatrix<MT1,SO1>& lhs, const SparseMatrix<MT2,SO2>& rhs, OP op )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
const size_t threads( omp_get_num_threads() );
const ThreadMapping threadmap( createThreadMapping( threads, ~rhs ) );
const size_t addon1 ( ( ( (~rhs).rows() % threadmap.first ) != 0UL )? 1UL : 0UL );
const size_t rowsPerThread( (~rhs).rows() / threadmap.first + addon1 );
const size_t addon2 ( ( ( (~rhs).columns() % threadmap.second ) != 0UL )? 1UL : 0UL );
const size_t colsPerThread( (~rhs).columns() / threadmap.second + addon2 );
#pragma omp for schedule(dynamic,1) nowait
for( size_t i=0; i<threads; ++i )
{
const size_t row ( ( i / threadmap.second ) * rowsPerThread );
const size_t column( ( i % threadmap.second ) * colsPerThread );
if( row >= (~rhs).rows() || column >= (~rhs).columns() )
continue;
const size_t m( min( rowsPerThread, (~lhs).rows() - row ) );
const size_t n( min( colsPerThread, (~lhs).columns() - column ) );
auto target( submatrix<unaligned>( ~lhs, row, column, m, n ) );
const auto source( submatrix<unaligned>( ~rhs, row, column, m, n ) );
op( target, source );
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// PLAIN ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the default OpenMP-based SMP assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
assign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be assigned.
// \return void
//
// This function implements the OpenMP-based SMP assignment to a dense matrix. Due to the
// explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
assign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, Assign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// ADDITION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the default OpenMP-based SMP addition assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
addAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be added.
// \return void
//
// This function implements the OpenMP-based SMP addition assignment to a dense matrix. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpAddAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
addAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, AddAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SUBTRACTION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment to a dense matrix.
// Due to the explicit application of the SFINAE principle, this function can only be selected by
// the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
subAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP subtracction assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment of a matrix to a
// dense matrix. Due to the explicit application of the SFINAE principle, this function can only
// be selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpSubAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
subAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, SubAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SCHUR PRODUCT ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP Schur product assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix for the Schur product.
// \return void
//
// This function implements the default OpenMP-based SMP Schur product assignment to a dense
// matrix. Due to the explicit application of the SFINAE principle, this function can only be
// selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>
, Or< Not< IsSMPAssignable<MT1> >
, Not< IsSMPAssignable<MT2> > > > >
smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
schurAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP Schur product assignment to a dense matrix.
// \ingroup math
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix for the Schur product.
// \return void
//
// This function implements the OpenMP-based SMP Schur product assignment to a dense matrix. Due
// to the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side dense matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< And< IsDenseMatrix<MT1>, IsSMPAssignable<MT1>, IsSMPAssignable<MT2> > >
smpSchurAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<MT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
schurAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
openmpAssign( ~lhs, ~rhs, SchurAssign() );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// MULTIPLICATION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense matrix.
// \ingroup smp
//
// \param lhs The target left-hand side dense matrix.
// \param rhs The right-hand side matrix to be multiplied.
// \return void
//
// This function implements the default OpenMP-based SMP multiplication assignment to a dense
// matrix.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename MT1 // Type of the left-hand side dense matrix
, bool SO1 // Storage order of the left-hand side matrix
, typename MT2 // Type of the right-hand side matrix
, bool SO2 > // Storage order of the right-hand side matrix
inline EnableIf_< IsDenseMatrix<MT1> >
smpMultAssign( Matrix<MT1,SO1>& lhs, const Matrix<MT2,SO2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).rows() == (~rhs).rows() , "Invalid number of rows" );
BLAZE_INTERNAL_ASSERT( (~lhs).columns() == (~rhs).columns(), "Invalid number of columns" );
multAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// COMPILE TIME CONSTRAINT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
namespace {
BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE );
}
/*! \endcond */
//*************************************************************************************************
} // namespace blaze
#endif
|
GB_unaryop__minv_int8_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int8_uint8
// op(A') function: GB_tran__minv_int8_uint8
// C type: int8_t
// A type: uint8_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CASTING(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int8_uint8
(
int8_t *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
8516.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp target teams distribute simd
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
GB_unop__sin_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__sin_fp32_fp32
// op(A') function: GB_unop_tran__sin_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = sinf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sinf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = sinf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SIN || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__sin_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sinf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sinf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__sin_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test0.c | int main() {
l: int k;
#pragma omp parallel
{
int i;
}
}
|
8685.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "covariance.h"
/* Array initialization. */
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
int i, j, j1, j2;
#pragma scop
/* Determine mean of column vectors of input data matrix */
{
#pragma omp parallel for
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Center the column vectors. */
#pragma omp parallel for
for (i = 0; i < _PB_N; i++)
{
#pragma omp parallel for
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
#pragma omp parallel for
for (j1 = 0; j1 < _PB_M; j1++)
{
#pragma omp parallel for
for (j2 = j1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += data[i][j1] * data[i][j2];
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
|
graph.h | #ifndef _ODPS_GRAPH_
#define _ODPS_GRAPH_
#include <memory>
#include <vector>
#include <map>
#include <iostream>
#include <fstream>
#include <omp.h>
#include <cstddef>
#include "graph_tool.h"
namespace apsara {
namespace odps {
namespace graph {
namespace query {
template<typename VType>
class Graph
{
public:
Graph() {}
virtual ~Graph() {}
virtual void Init(const std::string &folder) = 0;
virtual std::shared_ptr<VType>& GetEdges(const VType &vertex,
size_t &from,
size_t &to) = 0;
virtual std::shared_ptr<VType>& GetRevEdges(const VType &vertex,
size_t &from,
size_t &to) = 0;
//virtual void GetCommonEdges(const std::vector<VType> &vertices,
// const std::vector<VType> &common) = 0;
//virtual void GetCommonRevEdges(const std::vector<VType> &vertices,
// const std::vector<VType> &common) = 0;
virtual std::vector<VType> GetAllVertices() = 0;
virtual size_t GetDegree(const VType &vertex) = 0;
virtual size_t GetRevDegree(const VType &vertex) = 0;
virtual size_t GetNumV() = 0;
virtual size_t GetNumE() = 0;
protected:
VType mNumE;
VType mNumV;
};
template<typename VType>
class GraphAdj : public Graph<VType>
{
public:
GraphAdj() : mIsInitialized(false) {}
~GraphAdj() {}
virtual void Init(const std::string &folder);
virtual std::shared_ptr<VType>& GetEdges(const VType &vertex, size_t &from, size_t &to);
virtual std::shared_ptr<VType>& GetRevEdges(const VType &vertex, size_t &from, size_t &to);
virtual std::vector<VType> GetAllVertices();
virtual size_t GetDegree(const VType &vertex);
virtual size_t GetRevDegree(const VType &vertex);
virtual size_t GetNumV() { return mNumV; };
virtual size_t GetNumE() { return mNumE; };
private:
std::map<VType, std::shared_ptr<VType>> mGraph;
std::map<VType, std::shared_ptr<VType>> mRevGraph;
VType mNumE;
VType mNumV;
bool mIsInitialized;
std::shared_ptr<VType> mNull;
};
template<typename VType>
class GraphCSR : public Graph<VType>
{
public:
GraphCSR() : mIsInitialized(false) {}
~GraphCSR() {}
virtual void Init(const std::string &folder);
virtual std::shared_ptr<VType>& GetEdges(const VType &vertex, size_t &from, size_t &to);
virtual std::shared_ptr<VType>& GetRevEdges(const VType &vertex, size_t &from, size_t &to);
virtual std::vector<VType> GetAllVertices();
virtual size_t GetDegree(const VType &vertex);
virtual size_t GetRevDegree(const VType &vertex);
virtual size_t GetNumV() { return mNumV; };
virtual size_t GetNumE() { return mNumE; };
private:
VType ReadOneFile(const std::string &file, std::shared_ptr<VType> &data);
private:
std::shared_ptr<VType> mVIdx;
std::shared_ptr<VType> mEIdx;
std::shared_ptr<VType> mRevVIdx;
std::shared_ptr<VType> mRevEIdx;
VType mNumE;
VType mNumV;
bool mIsInitialized;
};
template<typename VType>
void GraphAdj<VType>::Init(const std::string &folder)
{
if(mIsInitialized) return;
double start = omp_get_wtime();
#pragma omp parallel num_threads(2)
{
int tid = omp_get_thread_num();
if(tid == 0)
{
GraphTool<VType>::ReadOneFile(folder+"/graph.bin", folder+"/graphLen.bin", mGraph);
}
if(tid == 1)
{
GraphTool<VType>::ReadOneFile(folder+"/revGraph.bin", folder+"/revGraphLen.bin", mRevGraph);
}
}
double end = omp_get_wtime();
mNumV = 0;
for(auto iter = mGraph.begin(); iter != mGraph.end(); ++iter)
{
mNumV = std::max(mNumV, iter->first+1);
mNumE += iter->second.get()[0];
}
std::cout<<"Graph load time:"<<end-start<<" numV: "<<mNumV<<" numE: "<<mNumE<<std::endl;
mIsInitialized = true;
}
template<typename VType>
std::shared_ptr<VType>& GraphAdj<VType>::GetEdges(const VType &vertex, size_t &from, size_t &to)
{
from =0; to = 0;
auto iter = mGraph.find(vertex);
if(iter == mGraph.end()) return mNull;
from = 1;
to = iter->second.get()[0] + 1;
return iter->second;
}
template<typename VType>
std::shared_ptr<VType>& GraphAdj<VType>::GetRevEdges(const VType &vertex, size_t &from, size_t &to)
{
from =0; to = 0;
auto iter = mRevGraph.find(vertex);
if(iter == mRevGraph.end()) return mNull;
from = 1;
to = iter->second.get()[0] + 1;
return iter->second;
}
template<typename VType>
std::vector<VType> GraphAdj<VType>::GetAllVertices()
{
std::vector<VType> ret;
for(auto iter = mGraph.begin(); iter != mGraph.end(); ++iter)
ret.push_back(iter->first);
return ret;
}
template<typename VType>
size_t GraphAdj<VType>::GetDegree(const VType &vertex)
{
auto iter = mGraph.find(vertex);
if(iter == mGraph.end()) return 0;
return (size_t)mGraph.find(vertex)->second.get()[0];
}
template<typename VType>
size_t GraphAdj<VType>::GetRevDegree(const VType &vertex)
{
auto iter = mRevGraph.find(vertex);
if(iter == mRevGraph.end()) return 0;
return (size_t)mRevGraph.find(vertex)->second.get()[0];
}
template<typename VType>
void GraphCSR<VType>::Init(const std::string &folder)
{
if(mIsInitialized) return;
double start = omp_get_wtime();
#pragma omp parallel num_threads(4)
{
int tid = omp_get_thread_num();
if(tid == 0)
{
mNumV = GraphTool<VType>::ReadOneFile(folder+"/vIdx.bin", mVIdx);
}
if(tid == 1)
{
mNumE = GraphTool<VType>::ReadOneFile(folder+"/eIdx.bin", mEIdx);
}
if(tid == 2)
{
mNumV = GraphTool<VType>::ReadOneFile(folder+"/revVIdx.bin", mRevVIdx);
}
if(tid == 3)
{
mNumE = GraphTool<VType>::ReadOneFile(folder+"/revEIdx.bin", mRevEIdx);
}
}
double end = omp_get_wtime();
std::cout<<"Graph load time:"<<end-start<<" numV: "<<mNumV<<" numE: "<<mNumE<<std::endl;
mIsInitialized = true;
}
template<typename VType>
std::shared_ptr<VType>& GraphCSR<VType>::GetEdges(const VType &vertex, size_t &from, size_t &to)
{
from = vertex == 0 ? 0 : mVIdx.get()[vertex-1];
to = mVIdx.get()[vertex];
return mEIdx;
}
template<typename VType>
std::shared_ptr<VType>& GraphCSR<VType>::GetRevEdges(const VType &vertex, size_t &from, size_t &to)
{
from = vertex == 0 ? 0 : mRevVIdx.get()[vertex-1];
to = mRevVIdx.get()[vertex];
return mRevEIdx;
}
template<typename VType>
std::vector<VType> GraphCSR<VType>::GetAllVertices()
{
std::vector<VType> ret;
for(size_t i=0; i<mNumV; i++)
ret.push_back(mVIdx.get()[i]);
return ret;
}
template<typename VType>
size_t GraphCSR<VType>::GetDegree(const VType &vertex)
{
VType from = vertex == 0 ? 0 : mVIdx.get()[vertex-1];
VType to = mVIdx.get()[vertex];
return (size_t)(to-from);
}
template<typename VType>
size_t GraphCSR<VType>::GetRevDegree(const VType &vertex)
{
VType from = vertex == 0 ? 0 : mRevVIdx.get()[vertex-1];
VType to = mRevVIdx.get()[vertex];
return (size_t)(to-from);
}
} // namespace query
} // namespace graph
} // namespace odps
} // namespace apsara
#endif
|
info.c | // RUN: %libomptarget-compile-nvptx64-nvidia-cuda -gline-tables-only && env LIBOMPTARGET_INFO=31 %libomptarget-run-nvptx64-nvidia-cuda 2>&1 | %fcheck-nvptx64-nvidia-cuda -allow-empty -check-prefix=INFO
// REQUIRES: nvptx64-nvidia-cuda
#include <stdio.h>
#include <omp.h>
#define N 64
extern void __tgt_set_info_flag(unsigned);
int main() {
int A[N];
int B[N];
int C[N];
int val = 1;
// INFO: CUDA device 0 info: Device supports up to {{.*}} CUDA blocks and {{.*}} threads with a warp size of {{.*}}
// INFO: Libomptarget device 0 info: Entering OpenMP data region at info.c:{{[0-9]+}}:1 with 3 arguments:
// INFO: Libomptarget device 0 info: alloc(A[0:64])[256]
// INFO: Libomptarget device 0 info: tofrom(B[0:64])[256]
// INFO: Libomptarget device 0 info: to(C[0:64])[256]
// INFO: Libomptarget device 0 info: Creating new map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, Name=A[0:64]
// INFO: Libomptarget device 0 info: Creating new map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, Name=B[0:64]
// INFO: Libomptarget device 0 info: Creating new map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, Name=C[0:64]
// INFO: Libomptarget device 0 info: OpenMP Host-Device pointer mappings after block at info.c:{{[0-9]+}}:1:
// INFO: Libomptarget device 0 info: Host Ptr Target Ptr Size (B) RefCount Declaration
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 C[0:64] at info.c:{{[0-9]+}}:7
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 B[0:64] at info.c:{{[0-9]+}}:7
// INFO: Libomptarget device 0 info: {{.*}} {{.*}} 256 1 A[0:64] at info.c:{{[0-9]+}}:7
// INFO: Libomptarget device 0 info: Entering OpenMP kernel at info.c:{{[0-9]+}}:1 with 1 arguments:
// INFO: Libomptarget device 0 info: firstprivate(val)[4]
// INFO: CUDA device 0 info: Launching kernel {{.*}} with {{.*}} and {{.*}} threads in {{.*}} mode
// INFO: Libomptarget device 0 info: OpenMP Host-Device pointer mappings after block at info.c:{{[0-9]+}}:1:
// INFO: Libomptarget device 0 info: Host Ptr Target Ptr Size (B) RefCount Declaration
// INFO: Libomptarget device 0 info: 0x{{.*}} 0x{{.*}} 256 1 C[0:64] at info.c:{{[0-9]+}}:7
// INFO: Libomptarget device 0 info: 0x{{.*}} 0x{{.*}} 256 1 B[0:64] at info.c:{{[0-9]+}}:7
// INFO: Libomptarget device 0 info: 0x{{.*}} 0x{{.*}} 256 1 A[0:64] at info.c:{{[0-9]+}}:7
// INFO: Libomptarget device 0 info: Exiting OpenMP data region at info.c:{{[0-9]+}}:1
// INFO: Libomptarget device 0 info: Removing map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, Name=C[0:64]
// INFO: Libomptarget device 0 info: Removing map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, Name=B[0:64]
// INFO: Libomptarget device 0 info: Removing map entry with HstPtrBegin={{.*}}, TgtPtrBegin={{.*}}, Size=256, Name=A[0:64]
#pragma omp target data map(alloc:A[0:N]) map(tofrom:B[0:N]) map(to:C[0:N])
#pragma omp target firstprivate(val)
{ val = 1; }
__tgt_set_info_flag(0x0);
// INFO-NOT: Libomptarget device 0 info: {{.*}}
#pragma omp target
{ }
return 0;
}
|
3loops.c | /* Test if the loop index variables are treated as private variables
* */
#include <stdio.h>
#if defined (_OPENMP)
#include <omp.h>
#endif
int main(void)
{
int i,jj,kkk;
double a[10][9][8];
#pragma omp parallel for
for(i=0;i<10;i++){
for(jj=0;jj<9;jj++){
for (kkk=0;kkk<8;kkk++){
a[i][jj][kkk]=9.9;
// printf("a[%d][%d][%d]=%f ",i,jj,kkk,a[i][jj][kkk]);
}
}
}
return 0;
}
|
main.c | #include <math.h>
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
/* #define DEBUG 1 */
#ifdef DEBUG
#define debug(M, ...) printf(M, ##__VA_ARGS__)
#else
#define debug(M, ...)
#endif
/* const int spp = 64; */
/* const int spp = 4; */
const int spp = 1024;
const int maxDepth = 8;
const int width = 512;
const int height = 512;
const double invertedPi = 1.0 / 3.1415926535;
const double twoPi = 2.0 * 3.1415926535;
const double epsilon = 1e-14;
typedef struct SimpleHit SimpleHit;
typedef struct Object Object;
typedef struct Material Material;
typedef struct Ray Ray;
typedef struct Triple Triple;
typedef struct Camera Camera;
typedef struct Plane Plane;
typedef struct Triangle Triangle;
struct Triple {
double v[3];
};
Triple BLACK;
Triple MAGENTA;
struct SimpleHit {
double t;
Object* object;
Triple normal;
};
struct Camera {
Triple eye;
Triple focal;
double viewDist;
Triple up;
Triple w;
Triple u;
Triple v;
Triple wMultViewDist;
};
Triple orientedHemiDir(double u1, double u2, Triple *normal, double exp);
SimpleHit intersectObjects(int numObjects, Object* objects, Ray *ray);
Camera newCamera(Triple eye, Triple focal, double viewDist, Triple up);
void cameraCalcOrthonormalBasis(Camera* camera);
Ray cameraSpawnRay(Camera* camera, double x, double y);
double clamp(double x);
unsigned char gammaTransform(double x);
void render(int numObjects, Object* objects, Camera* camera, char* buffer);
void writePPM(char* path, char* buffer);
void normalize(Triple* v);
Triple radiance(int numObjects, Object* objects, Ray* ray, int depth);
Triple diRadiance(int numObjects, Object* objects, Ray* ray, Triple* normal, Material* mat, int depth);
double doubleRand() {
// TODO Implement our own less random but faster rand().
return (double)rand() / (double)RAND_MAX;
}
Triple scale(Triple* inp, double x) {
return (Triple){
{inp->v[0] * x, inp->v[1] * x, inp->v[2] * x}
};
}
void scalePointer(Triple* inp, double x) {
inp->v[0] *= x;
inp->v[1] *= x;
inp->v[2] *= x;
}
Triple multiplyParts(Triple *x1, Triple *x2) {
return (Triple){
{x1->v[0] * x2->v[0], x1->v[1] * x2->v[1], x1->v[2] * x2->v[2]}
};
}
Triple add(Triple* x1, Triple* x2) {
return (Triple){
{x1->v[0] + x2->v[0], x1->v[1] + x2->v[1], x1->v[2] + x2->v[2]}
};
}
void addPointer(Triple* x1, Triple* x2) {
x1->v[0] += x2->v[0];
x1->v[1] += x2->v[1];
x1->v[2] += x2->v[2];
}
Triple subtract(Triple* x1, Triple* x2) {
return (Triple){
{x1->v[0] - x2->v[0], x1->v[1] - x2->v[1], x1->v[2] - x2->v[2]}
};
}
void subtractPointer(Triple* x1, Triple* x2) {
x1->v[0] -= x2->v[0];
x1->v[1] -= x2->v[1];
x1->v[2] -= x2->v[2];
}
double innerProduct(Triple* x1, Triple* x2) {
return (x1->v[0] * x2->v[0]) + (x1->v[1] * x2->v[1]) + (x1->v[2] * x2->v[2]);
}
Triple crossProduct(Triple* x1, Triple* x2) {
return (Triple){{
(x1->v[1] * x2->v[2]) - (x1->v[2] * x2->v[1]),
(x1->v[2] * x2->v[0]) - (x1->v[0] * x2->v[2]),
(x1->v[0] * x2->v[1]) - (x1->v[1] * x2->v[0])
}};
}
typedef struct {
Triple dir;
double pdf;
} SampleHit;
struct Ray {
Triple org;
Triple dir;
};
Triple rayHit(Ray* ray, double t) {
Triple scaled = scale(&ray->dir, t);
return add(&ray->org, &scaled);
}
struct Material {
void* data;
Triple (*f)(Material* material, Triple *wi, Triple *wo, Triple *normal);
SampleHit (*sampleF)(Material* material, Triple *normal, Triple *wo);
Triple (*emiss)(Material *material);
bool (*dielectric)(Material* material);
};
struct Object {
void* data;
Material* material;
SimpleHit (*intersect)(Object *object, Ray *ray);
Triple (*normal)(Object *object, Ray *ray, double t);
void (*print)(Object *object);
};
typedef struct {
Triple col;
Triple emiss;
bool dielectric;
} Emitter;
Triple emitF(Material* material, Triple *wi, Triple *wo, Triple *normal);
SampleHit emitSampleF(Material* material, Triple *normal, Triple *wo);
Triple emitEmiss(Material *materal);
Triple emitF(Material* material, Triple *wi, Triple *wo, Triple *normal) {
return scale(&((Emitter *)material)->col, invertedPi);
}
SampleHit emitSampleF(Material* material, Triple *normal, Triple *wo) {
Triple wi = orientedHemiDir(doubleRand(), doubleRand(), normal, 0.0);
double inner = innerProduct(normal, &wi);
double pdf = inner * invertedPi;
return (SampleHit){
.dir = wi,
.pdf = pdf
};
}
Triple emitEmiss(Material *material) {
Emitter* emitter = (Emitter*)(material->data);
return emitter->emiss;
}
bool emitDielectric(Material *material) {
Emitter* emitter = (Emitter*)(material->data);
return emitter->dielectric;
}
Material emitterBase;
void emitterInit() {
emitterBase = (Material){
.f = emitF,
.sampleF = emitSampleF,
.emiss = emitEmiss,
.dielectric = emitDielectric
};
}
Material newEmitter(Triple emiss, bool dielectric) {
Emitter* emitterData = malloc(sizeof(Emitter));
emitterData->col = emiss;
emitterData->emiss = emiss;
emitterData->dielectric = dielectric;
Material emitter = emitterBase;
emitter.data = emitterData;
return emitter;
}
typedef struct {
Triple col;
Triple emiss;
bool dielectric;
} Diffuse;
Triple diffuseF(Material* material, Triple *wi, Triple *wo, Triple *normal);
SampleHit diffuseSampleF(Material* material, Triple *normal, Triple *wo);
Triple diffuseEmiss(Material* materal);
Triple diffuseF(Material* material, Triple *wi, Triple *wo, Triple *normal) {
return scale(&((Diffuse*)material->data)->col, invertedPi);
}
SampleHit diffuseSampleF(Material* material, Triple *normal, Triple *wo) {
Triple wi = orientedHemiDir(doubleRand(), doubleRand(), normal, 0.0);
double inner = innerProduct(normal, &wi);
double pdf = inner * invertedPi;
return (SampleHit){
.dir = wi,
.pdf = pdf
};
}
Triple diffuseEmiss(Material *material) {
Diffuse* diffuse = (Diffuse*)(material->data);
return diffuse->emiss;
}
bool diffuseDielectric(Material *material) {
Diffuse* diffuse = (Diffuse*)(material->data);
return diffuse->dielectric;
}
Material diffuseBase;
void diffuseInit() {
diffuseBase = (Material){
.f = diffuseF,
.sampleF = diffuseSampleF,
.emiss = diffuseEmiss,
.dielectric = diffuseDielectric
};
}
Material newDiffuse(Triple color, Triple emiss, bool dielectric) {
Diffuse* diffuseData = malloc(sizeof(Diffuse));
diffuseData->col = color;
diffuseData->emiss = emiss;
diffuseData->dielectric = dielectric;
Material diffuse = diffuseBase;
diffuse.data = diffuseData;
return diffuse;
}
typedef struct {
Triple col;
Triple emiss;
bool dielectric;
} Specular;
Triple specularF(Material* material, Triple *wi, Triple *wo, Triple *normal);
SampleHit specularSampleF(Material* material, Triple *normal, Triple *wo);
Triple specularEmiss(Material* materal);
Triple specularF(Material* material, Triple *wi, Triple *wo, Triple *normal) {
return ((Specular*)material->data)->col;
}
SampleHit specularSampleF(Material* material, Triple *normal, Triple *wo) {
Triple inverse = scale(wo, -1);
Triple normalDoubled = scale(normal, 2);
scalePointer(&normalDoubled, innerProduct(normal, wo));
addPointer(&inverse, &normalDoubled);
normalize(&inverse);
double pdf = innerProduct(normal, &inverse);
return (SampleHit){
.dir = inverse,
.pdf = pdf
};
}
Triple specularEmiss(Material *material) {
return ((Specular*)(material->data))->emiss;
}
bool specularDielectric(Material *material) {
return ((Specular*)(material->data))->dielectric;
}
Material specularBase;
void specularInit() {
specularBase = (Material){
.f = specularF,
.sampleF = specularSampleF,
.emiss = specularEmiss,
.dielectric = specularDielectric
};
}
Material newSpecular(Triple color, Triple emiss, bool dielectric) {
Specular* specularData = malloc(sizeof(Specular));
specularData->col = color;
specularData->emiss = emiss;
specularData->dielectric = dielectric;
Material specular = specularBase;
specular.data = specularData;
return specular;
}
typedef struct {
Triple col;
} Refractive;
Triple refractiveF(Material* material, Triple *wi, Triple *wo, Triple *normal);
SampleHit refractiveSampleF(Material* material, Triple *normal, Triple *wo);
Triple refractiveEmiss(Material* materal);
Triple refractiveF(Material* material, Triple *wi, Triple *wo, Triple *normal) {
return ((Refractive*)material->data)->col;
}
SampleHit refractiveSampleF(Material* material, Triple *normal, Triple *wo) {
Triple wi = scale(wo, -1);
Triple newNormal = scale(normal, 2);
scalePointer(&newNormal, innerProduct(normal, wo));
addPointer(&wi, &newNormal);
normalize(&wi);
return (SampleHit){
.dir = wi,
.pdf = innerProduct(normal, &wi)
};
}
Triple refractiveEmiss(Material *material) {
// TODO Worth supporting this?
return (Triple){0, 0, 0};
}
bool refractiveDielectric(Material *material) {
return true;
}
Material refractiveBase;
void refractiveInit() {
refractiveBase = (Material){
.f = refractiveF,
.sampleF = refractiveSampleF,
.emiss = refractiveEmiss,
.dielectric = refractiveDielectric
};
}
Material newRefractive(Triple color) {
Refractive* refractiveData = malloc(sizeof(Refractive));
refractiveData->col = color;
Material refractive = refractiveBase;
refractive.data = refractiveData;
return refractive;
}
typedef struct {
Triple pos;
double rad;
double invRad;
} Sphere;
SimpleHit sphereIntersect(Object *object, Ray *ray) {
Sphere* sphere = (Sphere*)(object->data);
Triple op = subtract(&sphere->pos, &ray->org);
double b = innerProduct(&op, &ray->dir);
double deter = b * b - innerProduct(&op, &op) + sphere->rad * sphere->rad;
if (deter < 0.0) {
return (SimpleHit){.t = INFINITY};
}
deter = sqrt(deter);
double t = b - deter;
if (t > epsilon) {
return (SimpleHit){.t = t, .object = object, .normal = object->normal(object, ray, t)};
}
t = b + deter;
if (t > epsilon) {
return (SimpleHit){.t = t, .object = object, .normal = object->normal(object, ray, t)};
}
return (SimpleHit){.t = INFINITY};
}
Triple sphereNormal(Object *object, Ray *ray, double t) {
Sphere* sphere = (Sphere*)(object->data);
Triple hitPoint = rayHit(ray, t);
Triple unnormalized = subtract(&hitPoint, &sphere->pos);
return scale(&unnormalized, sphere->invRad);
}
void spherePrint(Object *object) {
Sphere* sphere = (Sphere*)(object->data);
debug(
"Sphere - center: (%f, %f, %f), radius: %f",
sphere->pos.v[0], sphere->pos.v[1], sphere->pos.v[2],
sphere->rad);
}
Object sphereBase;
void sphereInit() {
sphereBase = (Object){
.intersect = sphereIntersect,
.normal = sphereNormal,
.print = spherePrint
};
}
Object newSphere(Triple position, double radius, Material* material) {
Sphere* sphereData = malloc(sizeof(Sphere));
sphereData->pos = position;
sphereData->rad = radius;
sphereData->invRad = 1.0 / radius;
Object sphere = sphereBase;
sphere.data = sphereData;
sphere.material = material;
return sphere;
}
struct Plane {
Triple pos;
Triple normal;
};
SimpleHit planeIntersect(Object *object, Ray *ray) {
// Based on equation at https://www.cl.cam.ac.uk/teaching/1999/AGraphHCI/SMAG/node2.html
Plane* plane = (Plane*)(object->data);
double bottom = innerProduct(&plane->normal, &ray->dir);
if (bottom > -epsilon && bottom < epsilon) {
return (SimpleHit){.t = INFINITY};
}
Triple posSubOrg = subtract(&plane->pos, &ray->org);
double top = innerProduct(&plane->normal, &posSubOrg);
top /= bottom;
if (top < epsilon) {
return (SimpleHit){.t = INFINITY};
}
return (SimpleHit){.t = top, .object = object, .normal = plane->normal};
}
Triple planeNormal(Object *object, Ray *ray, double t) {
return ((Plane*)(object->data))->normal;
}
void planePrint(Object *object) {
Plane* plane = (Plane*)(object->data);
debug("Plane - point: (%f, %f, %f), normal: (%f, %f, %f)",
plane->pos.v[0], plane->pos.v[1], plane->pos.v[2],
plane->normal.v[0], plane->normal.v[1], plane->normal.v[2]);
}
Object planeBase;
void planeInit() {
planeBase = (Object){
.intersect = planeIntersect,
.normal = planeNormal,
.print = planePrint
};
}
Object newPlane(Triple position, Triple normal, Material* material) {
Plane* planeData = malloc(sizeof(Plane));
planeData->pos = position;
normalize(&normal);
planeData->normal = normal;
Object plane = planeBase;
plane.data = planeData;
plane.material = material;
return plane;
}
struct Triangle {
Triple v[3];
Triple a;
Triple b;
Triple normal;
};
SimpleHit triangleIntersect(Object *object, Ray *ray) {
// Based on Möller-Trumbore implementation at:
// http://www.scratchapixel.com/lessons/3d-basic-rendering/ray-tracing-rendering-a-triangle/moller-trumbore-ray-triangle-intersection
Triangle* triangle = (Triangle*)(object->data);
Triple pvec = crossProduct(&ray->dir, &triangle->b);
double det = innerProduct(&triangle->a, &pvec);
if (abs(det) < epsilon) {
return (SimpleHit){.t = INFINITY};
}
double invDet = 1.0 / det;
Triple tvec = subtract(&ray->org, &triangle->v[0]);
double u = innerProduct(&tvec, &pvec) * invDet;
if (u < 0 || u > 1) {
return (SimpleHit){.t = INFINITY};
}
Triple qvec = crossProduct(&tvec, &triangle->a);
double v = innerProduct(&ray->dir, &qvec) * invDet;
if (v < 0 || u + v > 1) {
return (SimpleHit){.t = INFINITY};
}
double t = innerProduct(&triangle->b, &qvec) * invDet;
if (t > -epsilon && t < epsilon) {
return (SimpleHit){.t = INFINITY};
}
return (SimpleHit){.t = t, .object = object, .normal = triangle->normal};
}
Triple triangleNormal(Object *object, Ray *ray, double t) {
return ((Triangle*)(object->data))->normal;
}
void trianglePrint(Object *object) {
Triangle* triangle = (Triangle*)(object->data);
debug("Triangle - (%f, %f, %f), (%f, %f, %f), (%f, %f, %f)",
triangle->v[0].v[0], triangle->v[0].v[1], triangle->v[0].v[2],
triangle->v[1].v[0], triangle->v[1].v[1], triangle->v[1].v[2],
triangle->v[2].v[0], triangle->v[2].v[1], triangle->v[2].v[2]);
}
Object triangleBase;
void triangleInit() {
triangleBase = (Object){
.intersect = triangleIntersect,
.normal = triangleNormal,
.print = trianglePrint
};
}
Object newTriangle(Triple v0, Triple v1, Triple v2, Material* material) {
Triangle* triangleData = malloc(sizeof(Triangle));
triangleData->v[0] = v0;
triangleData->v[1] = v1;
triangleData->v[2] = v2;
triangleData->a = subtract(&v1, &v0);
triangleData->b = subtract(&v2, &v0);
triangleData->normal = crossProduct(&triangleData->a, &triangleData->b);
normalize(&triangleData->normal);
Object triangle = triangleBase;
triangle.data = triangleData;
triangle.material = material;
return triangle;
}
Triple sampleHemi(double u1, double u2, double exp) {
double z = 1.0 - u1;
double phi = twoPi * u2;
double theta = 1.0 - (z * z);
if (theta < 0.0) {
theta = 0.0;
}
theta = sqrt(theta);
return (Triple){{theta * cos(phi), theta * sin(phi), z}};
}
void normalize(Triple* v) {
double s = sqrt((v->v[0] * v->v[0]) + (v->v[1] * v->v[1]) + (v->v[2] * v->v[2]));
v->v[0] /= s;
v->v[1] /= s;
v->v[2] /= s;
}
Triple orientedHemiDir(double u1, double u2, Triple *normal, double exp) {
Triple p = sampleHemi(u1, u2, exp);
Triple randVector = (Triple){{doubleRand(), doubleRand(), doubleRand()}};
Triple v = crossProduct(&randVector, normal);
normalize(&v);
Triple u = crossProduct(&v, normal);
normalize(&u);
Triple f1 = scale(&u, p.v[0]);
Triple f2 = scale(&v, p.v[1]);
Triple f3 = scale(normal, p.v[2]);
Triple result = add(&f1, &f2);
// HACK Is it safe to have the receiver also be an arg here?
result = add(&result, &f3);
normalize(&result);
return result;
}
Triple orientNormal(Triple *normal, Triple *wo) {
// TODO Should this be using epsilon.
if (innerProduct(normal, wo) < 0) {
return scale(normal, -1);
}
Triple newNormal = *normal;
return newNormal;
}
int main() {
BLACK = (Triple){{0, 0, 0}};
MAGENTA = (Triple){{1.0, 0, 1.0}};
emitterInit();
diffuseInit();
specularInit();
refractiveInit();
sphereInit();
planeInit();
triangleInit();
Material whiteLight = newEmitter((Triple){1.25, 1.125, 0.875}, false);
// Wall, ceiling, and floor materials.
Material whiteDiffuse = newDiffuse((Triple){1, 1, 1}, (Triple){0, 0, 0}, false);
Material greenDiffuse = newDiffuse((Triple){0, 1, 0}, (Triple){0, 0, 0}, false);
Material redDiffuse = newDiffuse((Triple){1, 0, 0}, (Triple){0, 0, 0}, false);
Material blueDiffuse = newDiffuse((Triple){0, 0, 1}, (Triple){0, 0, 0}, false);
// Unique materials.
Material whiteMirror = newSpecular((Triple){1, 1, 1}, (Triple){0, 0, 0}, false);
Material cyanSpecular = newSpecular((Triple){0.2, 0.6, 0.6}, (Triple){0, 0, 0}, false);
Material glass = newRefractive((Triple){0.8, 0.999, 0.1});
Material pinkDiff = newDiffuse((Triple){0.9, 0.25, 0.9}, (Triple){0, 0, 0}, false);
const int numObjects = 12;
Object objects[12] = {
newSphere((Triple){0, -40, 0}, 24.0, (Material*)&whiteLight),
// Ceiling.
newPlane((Triple){0, -20, 0}, (Triple){0, 1, 0}, (Material*)&whiteDiffuse),
// Back wall.
newPlane((Triple){0, 0, 20}, (Triple){0, 0, -1}, (Material*)&whiteDiffuse),
// Front wall.
newPlane((Triple){0, 0, -80}, (Triple){0, 0, 1}, (Material*)&whiteDiffuse),
// Floor.
newPlane((Triple){0, 20, 0}, (Triple){0, -1, 0}, (Material*)&whiteDiffuse),
// Left wall.
newPlane((Triple){-20, 0, 0}, (Triple){1, 0, 0}, (Material*)&greenDiffuse),
// Right wall.
newPlane((Triple){20, 0, 0}, (Triple){-1, 0, 0}, (Material*)&redDiffuse),
// Blue ball back-right.
newSphere((Triple){-6, 0, 14}, 6, (Material*)&blueDiffuse),
// White specular back-left.
newSphere((Triple){8, 0, 12}, 8, (Material*)&whiteMirror),
// Plane cutting back-left corner.
newPlane((Triple){16, -16, 0}, (Triple){1, -1, 1}, (Material*)&cyanSpecular),
// First refractive.
newSphere((Triple){-8, 10, 0}, 8, (Material*)&glass),
// Vertical triangle, front-right.
newTriangle((Triple){-19, 4, -10}, (Triple){-10, 19, -10}, (Triple){-19, 19, -20}, &pinkDiff),
};
Camera camera = newCamera(
(Triple){{0, 0, -60}},
(Triple){{0, 0, 0}},
400,
(Triple){{0, 1, 0}});
///////// START TEST CODE
/* Sphere* sp = (Sphere*)(objects[0].data); */
/* Ray test = (Ray){ */
/* .org = (Triple){{0, 0, 0}}, */
/* .dir = (Triple){{-8, 10, 0}} */
/* }; */
/* Ray test = (Ray){ */
/* .org = (Triple){{-3.002440, 3.753050, 0.000000}}, */
/* .dir = (Triple){{-0.624695, 0.780869, 0.000000}} */
/* }; */
/* normalize(&test.dir); */
/* printf("norm dir: %f, %f, %f\n", test.dir.v[0], test.dir.v[1], test.dir.v[2]); */
/* Triple rad = radiance(numObjects, objects, &test, 0); */
/* printf("rad: %f, %f, %f\n", rad.v[0], rad.v[1], rad.v[2]); */
/* return 0; */
/* SimpleHit hit = intersectObjects(numObjects, objects, &test); */
/* printf("First hit: %f\n", hit.t); */
//////// END TEST CODE
char* buffer = malloc(sizeof(char) * width * height * 3);
render(numObjects, objects, &camera, buffer);
writePPM("output.ppm", buffer);
free(buffer);
}
void writePPM(char* path, char* buffer) {
FILE* out = fopen(path, "wb");
fprintf(out, "P6\n %d\n %d\n 255\n", width, height);
fwrite(buffer, sizeof(char) * width * height * 3, 1, out);
fclose(out);
}
SimpleHit intersectObjects(int numObjects, Object* objects, Ray *ray) {
SimpleHit firstHit = (SimpleHit){.t = INFINITY};
Sphere* sp = (Sphere*)(objects[0].data);
SimpleHit hit;
for (int i = 0; i < numObjects; i++) {
hit = objects[i].intersect(&objects[i], ray);
if (hit.t < firstHit.t) {
firstHit = hit;
}
}
return firstHit;
}
Triple radiance(int numObjects, Object* objects, Ray* ray, int depth) {
if (depth > maxDepth) {
return BLACK;
}
SimpleHit hit = intersectObjects(numObjects, objects, ray);
if (isinf(hit.t)) {
return BLACK;
}
Material* mat = hit.object->material;
if (!mat->dielectric(mat)) {
Triple wo = scale(&ray->dir, -1);
Triple normal = orientNormal(&hit.normal, &wo);
SampleHit sample = mat->sampleF(mat, &normal, &wo);
Triple f = mat->f(mat, &sample.dir, &wo, &normal);
Ray newRay = (Ray){.org = rayHit(ray, hit.t), .dir = sample.dir};
Triple nextDepth = radiance(numObjects, objects, &newRay, depth + 1);
Triple nextColor = multiplyParts(&nextDepth, &f);
Triple result = scale(&nextColor, innerProduct(&sample.dir, &normal) / sample.pdf);
Triple emiss = mat->emiss(mat);
return add(&result, &emiss);
}
Ray newRay = (Ray){.org = rayHit(ray, hit.t), .dir = ray->dir};
return diRadiance(numObjects, objects, &newRay, &hit.normal, mat, depth);
}
Triple diRadiance(int numObjects, Object* objects, Ray* ray, Triple* normal, Material* mat, int depth) {
Triple doubleNormal = scale(normal, 2);
scalePointer(&doubleNormal, innerProduct(normal, &ray->dir));
Triple reflDir = subtract(&ray->dir, &doubleNormal);
Triple orientedNormal = orientNormal(normal, &ray->dir);
scalePointer(&orientedNormal, -1);
// TODO BUGBUG Epsilon check here?
bool into = innerProduct(normal, &orientedNormal) > 0.0;
double nc = 1.0;
double nt = 1.5;
double ddn = innerProduct(&ray->dir, &orientedNormal);
double nnt = into ? nc / nt : nt / nc;
double cos2t = 1 - nnt * nnt * (1 - ddn * ddn);
// TODO BUGBUG Another epsilon check?
if (cos2t < 0.0) {
Ray newRay = (Ray){.org = ray->org, .dir = reflDir};
Triple result = mat->emiss(mat);
Triple nextResult = radiance(numObjects, objects, &newRay, depth + 1);
addPointer(&result, &nextResult);
return result;
}
double intoTerm = into ? 1.0 : -1.0;
Triple tdir = scale(&ray->dir, nnt);
Triple normTerm = scale(normal, intoTerm);
scalePointer(&normTerm, (ddn * nnt + sqrt(cos2t)));
subtractPointer(&tdir, &normTerm);
normalize(&tdir);
double a = nt - nc;
double b = nt + nc;
double r0 = a * a / (b * b);
double c = into ? 1 + ddn : 1 - innerProduct(&tdir, normal);
double re = r0 + (1 - r0) * pow(c, 4);
double tr = 1 - re;
double p = 0.25 + 0.5 * re;
double rp = re / p;
double tp = tr / (1 - p);
Triple result;
if (depth <= 2) {
Ray reflectRay = (Ray){.org = ray->org, .dir = reflDir};
result = radiance(numObjects, objects, &reflectRay, depth + 1);
scalePointer(&result, re);
Ray refractRay = (Ray){.org = ray->org, .dir = tdir};
Triple refractRad = radiance(numObjects, objects, &refractRay, depth + 1);
scalePointer(&refractRad, tr);
addPointer(&result, &refractRad);
} else {
if (doubleRand() < p) {
Ray reflectRay = (Ray){.org = ray->org, .dir = reflDir};
result = radiance(numObjects, objects, &reflectRay, depth + 1);
scalePointer(&result, rp);
} else {
Ray refractRay = (Ray){.org = ray->org, .dir = tdir};
result = radiance(numObjects, objects, &refractRay, depth + 1);
scalePointer(&result, tp);
}
}
// TODO Is this the right thing to do? It looks alright :/
SampleHit sample = mat->sampleF(mat, normal, &ray->dir);
scalePointer(&result, innerProduct(&sample.dir, normal) / sample.pdf);
Triple emiss = mat->emiss(mat);
addPointer(&result, &emiss);
return result;
}
void render(int numObjects, Object* objects, Camera* camera, char* buffer) {
cameraCalcOrthonormalBasis(camera);
double sppInv = 1.0 / (double)spp;
double halfWidth = (double)width / 2;
double halfHeight = (double)height / 2;
#pragma omp parallel for schedule(dynamic, 1)
for (int y = 0; y < height; y++) {
double dY = (double)y;
for (int x = 0; x < width; x++) {
Triple px = (Triple){{0, 0, 0}};
for (int s = 0; s < spp; s++) {
double sx = ((double)x + doubleRand()) - halfWidth;
double sy = (dY + doubleRand()) - halfHeight;
Ray ray = cameraSpawnRay(camera, sx, sy);
Triple rad = radiance(numObjects, objects, &ray, 0);
Triple scaled = scale(&rad, sppInv);
addPointer(&px, &scaled);
}
int i = ((y * width) + x) * 3;
buffer[i] = gammaTransform(px.v[0]);
buffer[i + 1] = gammaTransform(px.v[1]);
buffer[i + 2] = gammaTransform(px.v[2]);
}
}
}
Camera newCamera(Triple eye, Triple focal, double viewDist, Triple up) {
return (Camera){
.eye = eye,
.focal = focal,
.viewDist = viewDist,
.up = up
};
}
void cameraCalcOrthonormalBasis(Camera* camera) {
Triple w1 = subtract(&camera->eye, &camera->focal);
normalize(&w1);
camera->w = w1;
camera->wMultViewDist = scale(&camera->w, camera->viewDist);
Triple u1 = crossProduct(&camera->up, &camera->w);
normalize(&u1);
camera->u = u1;
camera->v = crossProduct(&camera->w, &camera->u);
}
Ray cameraSpawnRay(Camera* camera, double x, double y) {
Triple dir = scale(&camera->u, x);
Triple vMultY = scale(&camera->v, y);
addPointer(&dir, &vMultY);
subtractPointer(&dir, &camera->wMultViewDist);
normalize(&dir);
return (Ray){
.org = camera->eye,
.dir = dir
};
}
double clamp(double x) {
if (x < 0) {
return 0;
}
if (x > 1) {
return 1;
}
return x;
}
unsigned char gammaTransform(double x) {
return (unsigned char)floor(pow(clamp(x), 1 / 2.2) * 255 + 0.5);
}
|
house_cmap.h | #pragma omp parallel for schedule(dynamic,1) reduction(+:counter)
for(vidType v0 = 0; v0 < g.V(); v0++) {
auto tid = omp_get_thread_num();
auto &cmap = cmaps[tid];
auto y0 = g.N(v0);
for (auto u : y0) cmap[u] = 1;
for (auto v1 : y0) {
if (v1 >= v0) break;
auto y1 = g.N(v1);
for (auto v2 : y1) {
if (cmap[v2] != 1) continue;
for (auto v3 : y1) {
if (v3 == v0 || v3 == v2) continue;
for (auto v4 : g.N(v3)) {
if (v4 == v1 || v4 == v2) continue;
if (cmap[v4] == 1) counter ++;
}
}
}
}
for (auto u : y0) cmap[u] = 0;
}
|
sync.c | #include "pyactpol.h"
#include <gsl/gsl_linalg.h>
/* sync.c
*
* scan-synchronous mode fitting and removal.
*/
static int fit_poly(float *x, double *y, int n, int order, int samp,
double *fit);
static int fit_sine(double *t, double *y, int n, double f,
double *fit, double *coeff);
/* Find the amplitude and phase of the synchronous signal in a set of
* detectors together with the phase of the azimuth scan.
*
* az should already have the mean removed. ctime should start at 0
* to minimize rounding errors. nwin should be the number of samples
* to use for the amplitude fit; i.e. an round number of azimuth scans.
*/
static int mbSyncFit(double* az, double* ctime,
float* data, int ndata,
int *dets, int ndets,
float *sync_amp, float* sync_phase,
float *sync_rms,
float *phaseAz,
float f,
int order, int samp, int nT)
{
int n, nwin;
double sampRate, thetaAz;
// Calculate size of data window to do analysis
sampRate = (double)ndata / (ctime[ndata-1] - ctime[0]);
nwin = floor( (double)nT / f * sampRate );
if (nwin > ndata) {
nwin = ndata;
}
double *fit_az = malloc(nwin * sizeof(double));
double *coeff_az = malloc(2 * sizeof(double));
// Obtain phase of azimuth scan
if (fit_sine(ctime, az, nwin, f, fit_az, coeff_az) != 0)
return 1;
if (coeff_az[0] > 0) thetaAz = atan(coeff_az[1]/coeff_az[0]);
else thetaAz = atan(coeff_az[1]/coeff_az[0]) + M_PI;
*phaseAz = thetaAz;
free(fit_az);
free(coeff_az);
float *tcopy = malloc(nwin * sizeof(float));
for (int j=0; j<nwin; j++)
tcopy[j] = ctime[j] - ctime[0];
// Process all selected detectors
#pragma omp parallel private(n)
{
double *y = malloc(nwin * sizeof(double));
double *fit_1f = malloc(nwin * sizeof(double));
double *fit_syn = malloc(nwin * sizeof(double));
double *coeff_syn = malloc(2 * sizeof(double));
#pragma omp for
for (n = 0; n < ndets; n++) {
int i;
double A, theta, rms;
float *det_data = data + ndata * dets[n];
// Obtain copy of data
for (i = 0; i < nwin; i++)
y[i] = det_data[i];
// Fit and subtract polynomial
fit_poly(tcopy, y, nwin, order, samp, fit_1f);
for (i = 0; i < nwin; i++)
y[i] -= fit_1f[i];
// Fit sinusoidal to obtain amplitude and phase
fit_sine(ctime, y, nwin, f, fit_syn, coeff_syn);
A = sqrt(coeff_syn[0]*coeff_syn[0] + coeff_syn[1]*coeff_syn[1]);
if (A == 0) theta = 0.0;
else if (coeff_syn[0] > 0) theta = atan(coeff_syn[1]/coeff_syn[0]);
else theta = atan(coeff_syn[1]/coeff_syn[0]) + M_PI;
sync_amp[n] = A;
sync_phase[n] = theta;
// Find fit RMS error
rms = 0.0;
for (i = 0; i < nwin; i++)
rms += (y[i] - fit_syn[i])*(y[i] - fit_syn[i]);
sync_rms[n] = sqrt(rms/nwin);
} /* pragma omp for */
free(y);
free(fit_1f);
free(fit_syn);
free(coeff_syn);
} /* pragma omp parallel */
return 0;
}
/// Fit sinusoidal of a given frequency to data.
/// \param t Independent variable (time).
/// \param y Dependent variable (data).
/// \param n Number of elements in data.
/// \param f Frequency of the sinusoid.
/// \param fit Vector of fitted data (sinusoid).
/// \param coeff Coefficients of sinusoid (A*cos(th) + B*sin(th)).
static int fit_sine(double *t, double *y, int n, double f,
double *fit, double *coeff) {
int k;
double temp_c, temp_s;
double cos2, sin2, sincos, ycos, ysin;
cos2 = sin2 = sincos = ycos = ysin = 0.0;
for (k = 0; k < n; k++) {
temp_c = cos(2*M_PI*f*t[k]);
temp_s = sin(2*M_PI*f*t[k]);
cos2 += temp_c*temp_c;
sin2 += temp_s*temp_s;
sincos += temp_s*temp_c;
ycos += y[k]*temp_c;
ysin += y[k]*temp_s;
}
coeff[0] = (sin2*ycos - sincos*ysin) / (cos2*sin2 - sincos*sincos);
coeff[1] = (cos2*ysin - sincos*ycos) / (cos2*sin2 - sincos*sincos);
// Evaluate result
for (k = 0; k < n; k++) {
fit[k] = cos(2*M_PI*f*t[k])*coeff[0] + sin(2*M_PI*f*t[k])*coeff[1];
}
return 0;
}
/// Fit polynomial to data to remove common mode.
/// \param x Independent variable.
/// \param y Dependent variable (data).
/// \param n Number of elements in data.
/// \param order Order of polinomial to fit.
/// \param samp Number of samples to take from data to do the fit.
/// \param fit vector with evaluated polynomial
static int fit_poly(float *x, double *y, int n, int order, int samp,
double *fit) {
int i, j, k;
int N;
double *coeff;
double **A, *pows;
double temp;
gsl_vector *g_Ab = gsl_vector_calloc(order+1);
gsl_matrix *g_AA = gsl_matrix_alloc(order+1,order+1);
gsl_vector *g_x = gsl_vector_alloc(order+1);
gsl_permutation *g_p = gsl_permutation_alloc(order+1);
int signum = 0;
int status = 1; // failure.
if (order <= 0) {
print_error("Invalid 'order=%i' in fit_poly\n", order);
return 1;
}
// Initializa Matrices and Vectors
coeff = malloc((order+1) * sizeof(double));
N = n/samp;
if (N < order) {
print_error("Fitting 'order=%i' with 'N=%i' data points in fit_poly\n", order, N);
return 1;
}
A = (double **)malloc((order+1) * sizeof(double *));
A[0] = (double *)malloc(N * (order+1) * sizeof(double));
for (i = 0; i < order+1; i++){
A[i] = A[0] + N * i;
for( j = 0; j < N; j++ ) A[i][j] = 0.0;
}
pows = (double *)malloc((2*order+1) * sizeof(double));
for (i = 0; i < 2*order+1; i++)
pows[i] = 0.0;
// Generate A matrix
for (i = 0; i < order+1; i++)
for (k = 0; k < N; k++) {
A[i][k] = 1.0;
for (j = 0; j < i; j++)
A[i][k] *= x[k*samp + samp/2];
}
// Generate AA as transpose(A)*A
for (i = 2*order; i > 0; i -= 2) {
for (k = 0; k < N; k++) {
pows[i] += A[i/2][k]*A[i/2][k];
pows[i-1] += A[i/2][k]*A[i/2-1][k];
}
}
for (k = 0; k < N; k++)
pows[0]++;
for (i = 0; i < order+1; i++)
for (j = 0; j < order+1; j++)
g_AA->data[i*g_AA->tda+j] = pows[i+j];
/* for (i = 0; i < order+1; i++) {
for (j = 0; j <= i; j++) {
for (k = 0; k < N; k++)
AA[i][j] += A[j][k]*A[i][k];
}
}*/
// Generate Ab as transpose(A)*y
for (i = 0; i < order+1; i++) {
for (k = 0; k < N; k++) {
temp = 0.0;
for (j = 0; j < samp; j++)
temp += y[k*samp + j];
g_Ab->data[i] += A[i][k] * temp / (float)samp;
}
}
// Solve system
if ((status = gsl_linalg_LU_decomp(g_AA, g_p, &signum))!=0)
goto exit_now;
if ((status = gsl_linalg_LU_solve(g_AA, g_p, g_Ab, g_x))!=0)
goto exit_now;
// Copy out...
for (i = 0; i < order+1; i++)
coeff[i] = g_x->data[i];
// Evaluate result
for (k = 0; k < n; k++) {
fit[k] = 0.0;
for (i = 0; i < order+1; i++) {
temp = 1.0;
for (j = 0; j < i; j++)
temp *= x[k];
fit[k] += temp * coeff[i];
}
}
/*
for (k = 0; k < n; k++)
fit[k] = 0.0;
for (i = 0; i < order+1; i++)
for (k = 0; k < n; k++)
fit[k] += A[i][k] * Ab[i];
*/
status = 0;
exit_now:
free(A[0]);
free(A);
free(pows);
free(coeff);
gsl_vector_free(g_Ab);
gsl_vector_free(g_x);
gsl_matrix_free(g_AA);
gsl_permutation_free(g_p);
return status;
}
PyDoc_STRVAR(get_sync_amps__doc__,
"get_sync_amps(data, dets, az, ctime)\n"
"\n"
"The arrays must be C-ordered with dimensions like:\n"
" data [ *,n_data] (float)\n"
" dets [n_det] (int)\n"
" az [n_data] (double)\n"
" ctime[n_data] (double)\n"
"\n"
"Returns (az_phase, amps_cos, amps_sin)."
);
static PyObject *get_sync_amps(PyObject *self, PyObject *args)
{
PyArrayObject *data_array;
PyArrayObject *dets_array;
PyArrayObject *az_array;
PyArrayObject *ctime_array;
double scan_freq;
int order, samp, nT;
if (!PyArg_ParseTuple(args, "O!O!O!O!diii",
&PyArray_Type, &data_array,
&PyArray_Type, &dets_array,
&PyArray_Type, &az_array,
&PyArray_Type, &ctime_array,
&scan_freq,
&order,
&samp,
&nT
))
po_raise("invalid arguments.");
// Types and ordering
ASSERT_CARRAY_TYPE_NDIM(data_array, NPY_FLOAT32, 2);
ASSERT_CARRAY_TYPE_NDIM(dets_array, NPY_INT32, 1);
ASSERT_CARRAY_TYPE_NDIM(az_array, NPY_FLOAT64, 1);
ASSERT_CARRAY_TYPE_NDIM(ctime_array, NPY_FLOAT64, 1);
int ndata = PyArray_DIMS(data_array)[1];
int ndet = PyArray_DIMS(dets_array)[0];
po_assert(PyArray_DIMS(az_array)[0] == ndata);
po_assert(PyArray_DIMS(ctime_array)[0] == ndata);
float *data = PyArray_DATA(data_array);
double *az = PyArray_DATA(az_array);
double *ctime = PyArray_DATA(ctime_array);
int *dets = PyArray_DATA(dets_array);
// We've been so thorough, it would be a shame to segfault now.
for (int i=0; i<ndet; i++)
po_assert(dets[i] >= 0 &&
dets[i] < PyArray_DIMS(data_array)[0]);
// And, places for the results.
npy_intp ndet_ = ndet;
PyArrayObject *amp_array = (PyArrayObject*)
PyArray_SimpleNew(1, &ndet_, NPY_FLOAT32);
PyArrayObject *phase_array = (PyArrayObject*)
PyArray_SimpleNew(1, &ndet_, NPY_FLOAT32);
PyArrayObject *rms_array = (PyArrayObject*)
PyArray_SimpleNew(1, &ndet_, NPY_FLOAT32);
float phaseAz = -1.;
mbSyncFit(az, ctime,
data, ndata,
dets, ndet,
PyArray_DATA(amp_array),
PyArray_DATA(phase_array),
PyArray_DATA(rms_array),
&phaseAz,
scan_freq, order, samp, nT);
return Py_BuildValue("NNNf",
amp_array,
phase_array,
rms_array,
phaseAz);
}
PyMethodDef pyactpol_sync_methods[] = {
{"get_sync_amps", get_sync_amps, METH_VARARGS,
get_sync_amps__doc__},
{NULL, NULL, 0, NULL} /* Sentinel */
};
|
CT_OMP_IMPL.c | /*
* _CT_OMP_IMPL_C_
*
* Copyright (C) 2017-2021 Tactical Computing Laboratories, LLC
* All Rights Reserved
* contact@tactcomplabs.com
*
* See LICENSE in the top level directory for licensing details
*/
#include <omp.h>
#include <stdint.h>
/* OpenMP Benchmark Implementations
*
* Benchmark implementations are in the form:
*
* void BENCHTYPE_ATOMTYPE( uint64_t *ARRAY, uint64_t *IDX,
* unsigned long long iters,
* unsigned long long pes )
*
*/
void RAND_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
for( i=start; i<(start+iters); i++ ){
__atomic_fetch_add( &ARRAY[IDX[i]], (uint64_t)(0x1), __ATOMIC_RELAXED );
}
}
}
void RAND_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
for( i=start; i<(start+iters); i++ ){
__atomic_compare_exchange_n( &ARRAY[IDX[i]], &ARRAY[IDX[i]], ARRAY[IDX[i]],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
void STRIDE1_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
for( i=start; i<(start+iters); i++ ){
__atomic_fetch_add( &ARRAY[i], (uint64_t)(0xF), __ATOMIC_RELAXED );
}
}
}
void STRIDE1_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
for( i=start; i<(start+iters); i++ ){
__atomic_compare_exchange_n( &ARRAY[i], &ARRAY[i], ARRAY[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
void STRIDEN_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes,
uint64_t stride ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
for( i=start; i<(start+iters); i+=stride ){
__atomic_fetch_add( &ARRAY[i], (uint64_t)(0xF), __ATOMIC_RELAXED );
}
}
}
void STRIDEN_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes,
uint64_t stride ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
for( i=start; i<(start+iters); i+=stride ){
__atomic_compare_exchange_n( &ARRAY[i], &ARRAY[i], ARRAY[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
void PTRCHASE_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
for( i=0; i<iters; i++ ){
start = __atomic_fetch_add( &IDX[start],
(uint64_t)(0x00ull),
__ATOMIC_RELAXED );
}
}
}
void PTRCHASE_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
#pragma omp parallel private(start,i)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
for( i=0; i<iters; i++ ){
__atomic_compare_exchange_n( &IDX[start], &start, IDX[start],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
void SG_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
uint64_t src = 0;
uint64_t dest = 0;
uint64_t val = 0;
#pragma omp parallel private(start,i,src,dest,val)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
for( i=start; i<(start+iters); i++ ){
src = __atomic_fetch_add( &IDX[i], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
dest = __atomic_fetch_add( &IDX[i+1], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
val = __atomic_fetch_add( &ARRAY[src], (uint64_t)(0x01ull), __ATOMIC_RELAXED );
__atomic_fetch_add( &ARRAY[dest], val, __ATOMIC_RELAXED );
}
}
}
void SG_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
uint64_t src = 0;
uint64_t dest = 0;
uint64_t val = 0;
#pragma omp parallel private(start,i,src,dest,val)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
val = 0x00ull;
src = 0x00ull;
dest = 0x00ull;
for( i=start; i<(start+iters); i++ ){
__atomic_compare_exchange_n( &IDX[i], &src, IDX[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &IDX[i+1], &dest, IDX[i+1],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[src], &val, ARRAY[src],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[dest], &ARRAY[dest], val,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
void CENTRAL_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
#pragma omp parallel private(i)
{
for( i=0; i<iters; i++ ){
__atomic_fetch_add( &ARRAY[0], (uint64_t)(0x1), __ATOMIC_RELAXED );
}
}
}
void CENTRAL_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
#pragma omp parallel private(i)
{
for( i=0; i<iters; i++ ){
__atomic_compare_exchange_n( &ARRAY[0], &ARRAY[0], ARRAY[0],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
void SCATTER_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
uint64_t dest = 0;
uint64_t val = 0;
#pragma omp parallel private(start,i,dest,val)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
for( i=start; i<(start+iters); i++ ){
dest = __atomic_fetch_add( &IDX[i+1], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
val = __atomic_fetch_add( &ARRAY[i], (uint64_t)(0x01ull), __ATOMIC_RELAXED );
__atomic_fetch_add( &ARRAY[dest], val, __ATOMIC_RELAXED );
}
}
}
void SCATTER_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
uint64_t dest = 0;
uint64_t val = 0;
#pragma omp parallel private(start,i,dest,val)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
dest = 0x00ull;
val = 0x00ull;
for( i=start; i<(start+iters); i++ ){
__atomic_compare_exchange_n( &IDX[i+1], &dest, IDX[i+1],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[i], &val, ARRAY[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[dest], &ARRAY[dest], val,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
void GATHER_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
uint64_t dest = 0;
uint64_t val = 0;
#pragma omp parallel private(start,i,dest,val)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
for( i=start; i<(start+iters); i++ ){
dest = __atomic_fetch_add( &IDX[i+1], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
val = __atomic_fetch_add( &ARRAY[dest], (uint64_t)(0x01ull), __ATOMIC_RELAXED );
__atomic_fetch_add( &ARRAY[i], val, __ATOMIC_RELAXED );
}
}
}
void GATHER_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
uint64_t i = 0;
uint64_t start = 0;
uint64_t dest = 0;
uint64_t val = 0;
#pragma omp parallel private(start,i,dest,val)
{
start = (uint64_t)(omp_get_thread_num()) * iters;
dest = 0x00ull;
val = 0x00ull;
for( i=start; i<(start+iters); i++ ){
__atomic_compare_exchange_n( &IDX[i+1], &dest, IDX[i+1],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[dest], &val, ARRAY[dest],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[i], &ARRAY[i], val,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
/* EOF */
|
zvjsvd.c | #include "zvjsvd.h"
#include "znormx.h"
#include "zscale.h"
#include "znorm2.h"
#include "dznrm2.h"
#include "zdpscl.h"
#include "zbjac2.h"
#include "zjrotf.h"
#include "zjrot.h"
#include "dswp.h"
#include "vecdef.h"
#include "defops.h"
#ifdef JTRACE
#include "timer.h"
#endif /* JTRACE */
#ifdef DBL_MAX_ROT_EXP
#error DBL_MAX_ROT_EXP already defined
#else /* !DBL_MAX_ROT_EXP */
#define DBL_MAX_ROT_EXP 1021
#endif /* ?DBL_MAX_ROT_EXP */
fint zvjsvd_(const fnat m[static restrict 1], const fnat n[static restrict 1], double Gr[static restrict VDL], const fnat ldGr[static restrict 1], double Gi[static restrict VDL], const fnat ldGi[static restrict 1], double Vr[static restrict VDL], const fnat ldVr[static restrict 1], double Vi[static restrict VDL], const fnat ldVi[static restrict 1], double eS[static restrict 1], double fS[static restrict 1], const unsigned js[static restrict 1], const unsigned stp[static restrict 1], const unsigned swp[static restrict 1], double work[static restrict VDL], unsigned iwork[static restrict 1])
{
const fnat n_2 = (*n >> 1u);
if (IS_NOT_VFPENV)
return -18;
if (!*n)
return 0;
if (*m < *n)
return -1;
if (*m & VDL_1)
return -1;
if (*n & 1u)
return -2;
if (n_2 & VDL_1)
return -2;
if (IS_NOT_ALIGNED(Gr))
return -3;
if (*ldGr < *m)
return -4;
if (*ldGr & VDL_1)
return -4;
if (IS_NOT_ALIGNED(Gi))
return -5;
if (*ldGi < *m)
return -6;
if (*ldGi & VDL_1)
return -6;
if (IS_NOT_ALIGNED(Vr))
return -7;
if (*ldVr < *n)
return -8;
if (*ldVr & VDL_1)
return -8;
if (IS_NOT_ALIGNED(Vi))
return -9;
if (*ldVi < *n)
return -10;
if (*ldVi & VDL_1)
return -10;
if (IS_NOT_ALIGNED(work))
return -16;
#ifdef JTRACE
FILE *const jtr = fopen((const char*)work, "w");
if (!jtr)
return -13;
(void)fprintf(jtr, "M=");
(void)fflush(jtr);
#endif /* JTRACE */
double M = znormx_(m, n, Gr, ldGr, Gi, ldGi);
if (!(M <= DBL_MAX))
return -19;
if (copysign(1.0, M) == -1.0)
return -20;
#ifdef JTRACE
(void)fprintf(jtr, "%#.17e\n", M);
(void)fflush(jtr);
#endif /* JTRACE */
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,Vr,ldVr,Vi,ldVi,eS,fS)
#endif /* _OPENMP */
for (fnat j = 0u; j < *n; ++j) {
register const VD z = _mm512_setzero_pd();
double *const Vrj = Vr + j * (size_t)(*ldVr);
double *const Vij = Vi + j * (size_t)(*ldVi);
for (fnat i = 0u; i < *n; i += VDL) {
_mm512_store_pd((Vrj + i), z);
_mm512_store_pd((Vij + i), z);
}
fS[j] = Vrj[j] = 1.0;
eS[j] = -HUGE_VAL;
}
if (M == 0.0)
return 0;
const double M_m = (DBL_MAX / ((*m << 2u) * M_SQRT2));
double es = 0.0, fs = 0.0;
dbl2ef(M_m, &es, &fs);
const int DBL_MAX_NRM_EXP = (int)es;
dbl2ef(M, &es, &fs);
int eM = (int)es;
int sR = DBL_MAX_ROT_EXP - eM - 1;
int sN = DBL_MAX_NRM_EXP - eM - 1;
#ifdef JTRACE
(void)fprintf(jtr, "eM=%d, sR=%d, sN=%d, M=", eM, sR, sN);
(void)fflush(jtr);
#endif /* JTRACE */
if (sN) {
*(fint*)&es = sN;
if (zscale_(m, n, Gr, ldGr, Gi, ldGi, (const fint*)&es) < 0)
return -21;
M = scalbn(M, sN);
}
int sT = sN;
#ifdef JTRACE
(void)fprintf(jtr, "%#.17e\n", M);
(void)fflush(jtr);
#endif /* JTRACE */
const fnat n_16 = (n_2 >> VDLlg);
double *const a11 = work;
double *const a22 = a11 + n_2;
double *const a21r = a22 + n_2;
double *const a21i = a21r + n_2;
double *const c = a21i + n_2;
double *const cat = c + n_2;
double *const sat = cat + n_2;
double *const l1 = sat + n_2;
double *const l2 = l1 + n_2;
double *const w = l2 + n_2;
unsigned *const p = iwork;
unsigned *const pc = p + n_16;
if (*swp) {
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(l1,n)
#endif /* _OPENMP */
for (fnat i = 0u; i < *n; ++i)
l1[i] = 1.0;
}
// see LAPACK's ZGESVJ
const double tol = sqrt((double)(*m)) * scalbn(DBL_EPSILON, -1);
unsigned sw = 0u;
#ifdef JTRACE
unsigned rd[2u] = { 0u, 0u };
const uint64_t hz = tsc_get_freq_hz_(rd);
long double Tn = 0.0L, Tp = 0.0L, Ta = 0.0L, Te = 0.0L, Tr = 0.0L;
uint64_t T = UINT64_C(0);
#endif /* JTRACE */
while (sw < *swp) {
size_t swt = 0u;
for (unsigned st = 0u; st < *stp; ++st) {
// rescale according to M if necessary and update M
dbl2ef(M, &es, &fs);
eM = (int)es;
sR = DBL_MAX_ROT_EXP - eM - 1;
sN = DBL_MAX_NRM_EXP - eM - 1;
if (sR < 0) {
#ifdef JTRACE
(void)fprintf(jtr, "sweep=%u, step=%u, eM=%d, sR=%d, sN=%d, M=", sw, st, eM, sR, sN);
(void)fflush(jtr);
#endif /* JTRACE */
*(fint*)&es = sN;
if (zscale_(m, n, Gr, ldGr, Gi, ldGi, (const fint*)&es) < 0)
return -22;
M = scalbn(M, sN);
sT += sN;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(l1,n)
#endif /* _OPENMP */
for (fnat i = 0u; i < *n; ++i)
l1[i] = 1.0;
#ifdef JTRACE
(void)fprintf(jtr, "%#.17e\n", M);
(void)fflush(jtr);
#endif /* JTRACE */
}
// compute the norms, overflow-aware
const unsigned *const r = js + st * (size_t)(*n);
double nM = -0.0;
bool overflow = false;
do {
#ifdef JTRACE
T = rdtsc_beg(rd);
#endif /* JTRACE */
nM = 0.0;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,r,m,Gr,ldGr,Gi,ldGi,eS,fS,a11,a22,cat,sat,l1) reduction(max:nM)
#endif /* _OPENMP */
for (fnat pq = 0u; pq < *n; pq += 2u) {
const fnat _pq = (pq >> 1u);
if (!(nM <= DBL_MAX)) {
a11[_pq] = NAN;
a22[_pq] = NAN;
continue;
}
const fnat pq_ = pq + 1u;
const size_t _p = r[pq];
const size_t _q = r[pq_];
if (l1[_p] == 1.0) {
double *const Grp = Gr + _p * (*ldGr);
double *const Gip = Gi + _p * (*ldGi);
nM = fmax(nM, fmin((a11[_pq] = znorm2_(m, Grp, Gip, (eS + _p), (fS + _p), (cat + _pq), (sat + _pq))), HUGE_VAL));
if (!(nM <= DBL_MAX)) {
a22[_pq] = NAN;
continue;
}
}
if (l1[_q] == 1.0) {
double *const Grq = Gr + _q * (*ldGr);
double *const Giq = Gi + _q * (*ldGi);
nM = fmax(nM, fmin((a22[_pq] = znorm2_(m, Grq, Giq, (eS + _q), (fS + _q), (cat + _pq), (sat + _pq))), HUGE_VAL));
}
}
#ifdef JTRACE
Tn += tsc_lap(hz, T, rdtsc_end(rd));
#endif /* JTRACE */
if (overflow = !(nM <= DBL_MAX)) {
#ifdef JTRACE
(void)fprintf(jtr, "sweep=%u, step=%u, M=", sw, st);
(void)fflush(jtr);
#endif /* JTRACE */
*(fint*)&es = sN;
if (zscale_(m, n, Gr, ldGr, Gi, ldGi, (const fint*)&es) < 0)
return -23;
M = scalbn(M, sN);
sT += sN;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(l1,n)
#endif /* _OPENMP */
for (fnat i = 0u; i < *n; ++i)
l1[i] = 1.0;
#ifdef JTRACE
(void)fprintf(jtr, "%#.17e\n", M);
(void)fflush(jtr);
#endif /* JTRACE */
}
} while (overflow);
// scaled dot-products
#ifdef JTRACE
T = rdtsc_beg(rd);
#endif /* JTRACE */
nM = 0.0;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,r,m,Gr,ldGr,Gi,ldGi,eS,fS,l1,l2) reduction(min:nM)
#endif /* _OPENMP */
for (fnat pq = 0u; pq < *n; pq += 2u) {
const fnat _pq = (pq >> 1u);
if (!(nM >= 0.0)) {
l1[_pq] = NAN;
l2[_pq] = NAN;
continue;
}
const fnat pq_ = pq + 1u;
const size_t _p = r[pq];
const size_t _q = r[pq_];
// pack the norms
const double e2[2u] = { eS[_q], eS[_p] };
const double f2[2u] = { fS[_q], fS[_p] };
double *const Grp = Gr + _p * (*ldGr);
double *const Gip = Gi + _p * (*ldGi);
double *const Grq = Gr + _q * (*ldGr);
double *const Giq = Gi + _q * (*ldGi);
const double complex z = zdpscl_(m, Grq, Giq, Grp, Gip, e2, f2);
l1[_pq] = creal(z);
l2[_pq] = cimag(z);
if (!(isfinite(l2[_pq])))
nM = fmin(nM, -25.0);
if (!(isfinite(l1[_pq])))
nM = fmin(nM, -24.0);
}
#ifdef JTRACE
Tp += tsc_lap(hz, T, rdtsc_end(rd));
#endif /* JTRACE */
if (!(nM >= 0.0)) {
#ifdef JTRACE
(void)fprintf(jtr, "sweep=%u, step=%u\n", sw, st);
(void)fflush(jtr);
#endif /* JTRACE */
return (fint)nM;
}
// repack data
#ifdef JTRACE
T = rdtsc_beg(rd);
#endif /* JTRACE */
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n,r,eS,fS,c,cat,sat,w)
#endif /* _OPENMP */
for (fnat pq = 0u; pq < *n; pq += 2u) {
const fnat pq_ = pq + 1u;
const fnat _pq = (pq >> 1u);
const size_t _p = r[pq];
const size_t _q = r[pq_];
c[_pq] = eS[_p];
w[_pq] = eS[_q];
cat[_pq] = fS[_p];
sat[_pq] = fS[_q];
}
fnat stt = 0u;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(n_2,a11,a22,a21r,a21i,c,cat,sat,l1,l2,w,p,pc,tol) reduction(+:stt)
#endif /* _OPENMP */
for (fnat i = 0u; i < n_2; i += VDL) {
const fnat j = (i >> VDLlg);
// convergence check
register VD _a21r = _mm512_load_pd(l1 + i);
register VD _a21i = _mm512_load_pd(l2 + i);
register const VD _zero = _mm512_set1_pd(-0.0);
register const VD zero = _mm512_setzero_pd();
register const VD one = _mm512_set1_pd(1.0);
register const VD _tol = _mm512_set1_pd(tol);
register VD _a21_ /* = _mm512_hypot_pd(_a21r, _a21i) */;
VDHYPOT(_a21_, _a21r, _a21i);
pc[j] = MD2U(_mm512_cmple_pd_mask(_tol, _a21_));
if (p[j] = _mm_popcnt_u32(pc[j])) {
stt += p[j];
// Grammian pre-scaling into the double precision range
register const VD f1 = _mm512_load_pd(cat + i);
register const VD f2 = _mm512_load_pd(sat + i);
register const VD e1 = _mm512_load_pd(c + i);
register const VD e2 = _mm512_load_pd(w + i);
register VD f12 = _mm512_div_pd(f1, f2);
register VD e12 = _mm512_sub_pd(e1, e2);
register VD f21 = _mm512_div_pd(f2, f1);
register VD e21 = _mm512_sub_pd(e2, e1);
e12 = _mm512_add_pd(e12, _mm512_getexp_pd(f12));
f12 = VDMANT(f12);
e21 = _mm512_add_pd(e21, _mm512_getexp_pd(f21));
f21 = VDMANT(f21);
register const MD c12 = VDEFLE(e12,e21,f12,f21);
register const VD mxe = _mm512_set1_pd(DBL_MAX_FIN_EXP);
register const VD E = _mm512_mask_blend_pd(c12, e12, e21);
register const VD d = _mm512_min_pd(_mm512_sub_pd(mxe, E), zero);
e12 = _mm512_add_pd(e12, d);
e21 = _mm512_add_pd(e21, d);
register const VD _a11 = _mm512_scalef_pd(f12, e12);
register const VD _a22 = _mm512_scalef_pd(f21, e21);
_a21r = _mm512_scalef_pd(_a21r, d);
_a21i = _mm512_scalef_pd(_a21i, d);
_mm512_store_pd((a11 + i), _a11);
_mm512_store_pd((a22 + i), _a22);
_mm512_store_pd((a21r + i), _a21r);
_mm512_store_pd((a21i + i), _a21i);
}
}
swt += stt;
#ifdef JTRACE
Ta += tsc_lap(hz, T, rdtsc_end(rd));
T = rdtsc_beg(rd);
#endif /* JTRACE */
const fint _n_2 =
#ifdef USE_SECANTS
-(fint)n_2
#else /* !USE_SECANTS */
(fint)n_2
#endif /* ?USE_SECANTS */
;
if (zbjac2i(&_n_2, a11, a22, a21r, a21i, c, cat, sat, l1, l2, p) < 0)
return -26;
#ifdef JTRACE
Te += tsc_lap(hz, T, rdtsc_end(rd));
T = rdtsc_beg(rd);
#endif /* JTRACE */
fnat np = 0u; // number of swaps
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(a11,a22,a21r,eS,fS,p,pc,r,n_2) reduction(+:np)
#endif /* _OPENMP */
for (fnat i = 0u; i < n_2; i += VDL) {
const fnat j = (i >> VDLlg);
unsigned trans = (pc[j] & 0xFFu);
unsigned perm = (p[j] & 0xFFu);
for (fnat k = 0u; k < VDL; ++k) {
const fnat l = (i + k);
const fnat pq = (l << 1u);
const uint64_t _p = r[pq];
const uint64_t _q = r[pq + 1u];
*(uint64_t*)(a11 + l) = _p;
*(uint64_t*)(a22 + l) = _q;
if (trans & 1u) {
if (perm & 1u) {
a21r[l] = -2.0;
++np;
}
else // no swap
a21r[l] = 2.0;
}
else if (efcmp((eS + _p), (fS + _p), (eS + _q), (fS + _q)) < 0) {
a21r[l] = eS[_p];
eS[_p] = eS[_q];
eS[_q] = a21r[l];
a21r[l] = fS[_p];
fS[_p] = fS[_q];
fS[_q] = a21r[l];
a21r[l] = -1.0;
++np;
}
else // no swap
a21r[l] = 1.0;
trans >>= 1u;
perm >>= 1u;
}
}
nM = 0.0;
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(m,n,Gr,ldGr,Gi,ldGi,Vr,ldVr,Vi,ldVi,a11,a22,a21r,a21i,c,cat,sat,l1,n_2) reduction(max:nM)
#endif /* _OPENMP */
for (fnat i = 0u; i < n_2; ++i) {
const size_t _p = *(const uint64_t*)(a11 + i);
const size_t _q = *(const uint64_t*)(a22 + i);
l1[_q] = l1[_p] = 0.0;
if (!(nM <= DBL_MAX)) {
a21i[i] = NAN;
continue;
}
double _c, _cat, _sat;
fint _m, _n;
if (a21r[i] == -2.0) {
_m = -(fint)*m;
_n = -(fint)*n;
_c = c[i];
_cat = cat[i];
_sat = sat[i];
}
else if (a21r[i] == -1.0) {
double *const Gr_p = Gr + _p * (*ldGr);
double *const Gr_q = Gr + _q * (*ldGr);
if (_m = dswp_(m, Gr_p, Gr_q)) {
a21i[i] = (double)_m;
nM = HUGE_VAL;
continue;
}
double *const Gi_p = Gi + _p * (*ldGi);
double *const Gi_q = Gi + _q * (*ldGi);
if (_m = dswp_(m, Gi_p, Gi_q)) {
a21i[i] = (double)_m;
nM = HUGE_VAL;
continue;
}
double *const Vr_p = Vr + _p * (*ldVr);
double *const Vr_q = Vr + _q * (*ldVr);
if (_n = dswp_(n, Vr_p, Vr_q)) {
a21i[i] = (double)_n;
nM = HUGE_VAL;
continue;
}
double *const Vi_p = Vi + _p * (*ldVi);
double *const Vi_q = Vi + _q * (*ldVi);
if (_n = dswp_(n, Vi_p, Vi_q)) {
a21i[i] = (double)_n;
nM = HUGE_VAL;
continue;
}
nM = fmax(nM, (a21i[i] = 0.0));
continue;
}
else if (a21r[i] == 1.0) {
nM = fmax(nM, (a21i[i] = 0.0));
continue;
}
else if (a21r[i] == 2.0) {
_m = (fint)*m;
_n = (fint)*n;
_c = c[i];
_cat = cat[i];
_sat = sat[i];
}
else { // should never happen
a21i[i] = NAN;
nM = HUGE_VAL;
continue;
}
a21i[i] = zjrot_(&_m, (Gr + _p * (*ldGr)), (Gi + _p * (*ldGi)), (Gr + _q * (*ldGr)), (Gi + _q * (*ldGi)), &_c, &_cat, &_sat);
if (!(a21i[i] >= 0.0) || !(a21i[i] <= DBL_MAX)) {
nM = a21i[i] = HUGE_VAL;
continue;
}
else // no overflow
nM = fmax(nM, a21i[i]);
if (_m = zjrotf_(&_n, (Vr + _p * (*ldVr)), (Vi + _p * (*ldVi)), (Vr + _q * (*ldVr)), (Vi + _q * (*ldVi)), &_c, &_cat, &_sat)) {
a21i[i] = (double)_m;
nM = HUGE_VAL;
continue;
}
l1[_q] = l1[_p] = 1.0;
}
M = fmax(M, nM);
#ifdef JTRACE
Tr += tsc_lap(hz, T, rdtsc_end(rd));
#endif /* JTRACE */
if (!(M <= DBL_MAX)) {
#ifdef JTRACE
(void)fprintf(jtr, "sweep=%u, step=%u\n", sw, st);
(void)fflush(jtr);
#endif /* JTRACE */
return -27;
}
}
if (!swt)
break;
++sw;
}
if (sw < *swp) {
#ifdef _OPENMP
#pragma omp parallel for default(none) shared(m,n,Gr,ldGr,Gi,ldGi,eS,fS,sT)
#endif /* _OPENMP */
for (fnat j = 0u; j < *n; ++j) {
double *const Gr_j = Gr + j * (size_t)(*ldGr);
double *const Gi_j = Gi + j * (size_t)(*ldGi);
register const VD _f = _mm512_set1_pd(fS[j]);
register const VD _s = _mm512_set1_pd(-(eS[j]));
for (fnat i = 0u; i < *m; i += VDL) {
double *const Gr_ij = Gr_j + i;
double *const Gi_ij = Gi_j + i;
_mm512_store_pd(Gr_ij, _mm512_scalef_pd(_mm512_div_pd(_mm512_load_pd(Gr_ij), _f), _s));
_mm512_store_pd(Gi_ij, _mm512_scalef_pd(_mm512_div_pd(_mm512_load_pd(Gi_ij), _f), _s));
}
eS[j] -= sT;
}
}
#ifdef JTRACE
(void)fprintf(jtr, "sT=%d, M=%#.17e\n", sT, M);
(void)fprintf(jtr, "Tn=%15.9Lf, Tp=%15.9Lf, Ta=%15.9Lf, Te=%15.9Lf, Tr=%15.9Lf\n", Tn, Tp, Ta, Te, Tr);
(void)fclose(jtr);
#endif /* JTRACE */
return (fint)sw;
}
|
SplineC2ROMP.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2019 QMCPACK developers.
//
// File developed by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//////////////////////////////////////////////////////////////////////////////////////
/** @file SplineC2ROMP.h
*
* Adoptor classes to handle complex-to-(real,complex) with arbitrary precision
*/
#ifndef QMCPLUSPLUS_EINSPLINE_C2R_OMP_H
#define QMCPLUSPLUS_EINSPLINE_C2R_OMP_H
#include <memory>
#include <OhmmsSoA/Container.h>
#include <spline2/MultiBspline.hpp>
#include <spline2/MultiBsplineEval.hpp>
#include <spline2/MultiBsplineEval_OMPoffload.hpp>
#include "QMCWaveFunctions/BsplineFactory/SplineAdoptorBase.h"
#include "OpenMP/OMPallocator.hpp"
#include "Platforms/PinnedAllocator.h"
#include "QMCWaveFunctions/BsplineFactory/contraction_helper.hpp"
#include "Utilities/FairDivide.h"
namespace qmcplusplus
{
namespace C2R
{
template<typename ST, typename TT>
inline void assign_v(ST x,
ST y,
ST z,
TT* restrict results_scratch_ptr,
size_t orb_size,
const ST* restrict offload_scratch_ptr,
const ST* restrict myKcart_ptr,
size_t myKcart_padded_size,
size_t first_spo,
int nComplexBands,
int first,
int last)
{
// protect last
if (last > orb_size)
last = orb_size;
const ST* restrict kx = myKcart_ptr;
const ST* restrict ky = myKcart_ptr + myKcart_padded_size;
const ST* restrict kz = myKcart_ptr + myKcart_padded_size * 2;
const ST* restrict val = offload_scratch_ptr;
TT* restrict psi_s = results_scratch_ptr;
#ifdef ENABLE_OFFLOAD
#pragma omp for
#else
#pragma omp simd
#endif
for (size_t j = first; j < last; j++)
{
const size_t jr = j << 1;
const size_t ji = jr + 1;
//phase
ST s, c, p = -(x * kx[j] + y * ky[j] + z * kz[j]);
sincos(p, &s, &c);
const ST val_r = val[jr];
const ST val_i = val[ji];
const size_t psiIndex = first_spo + j + (j < nComplexBands ? j : nComplexBands);
psi_s[psiIndex] = val_r * c - val_i * s;
if (j < nComplexBands)
psi_s[psiIndex + 1] = val_i * c + val_r * s;
}
}
/** assign_vgl
*/
template<typename ST, typename TT>
inline void assign_vgl(ST x,
ST y,
ST z,
TT* restrict results_scratch_ptr,
const ST* mKK_ptr,
size_t orb_size,
const ST* restrict offload_scratch_ptr,
size_t spline_padded_size,
const ST symGGt[6],
const ST G[9],
const ST* myKcart_ptr,
size_t myKcart_padded_size,
size_t first_spo,
int nComplexBands,
int first,
int last)
{
// protect last
if (last > orb_size)
last = orb_size;
constexpr ST two(2);
const ST &g00 = G[0], &g01 = G[1], &g02 = G[2],
&g10 = G[3], &g11 = G[4], &g12 = G[5],
&g20 = G[6], &g21 = G[7], &g22 = G[8];
const ST* restrict k0 = myKcart_ptr;
const ST* restrict k1 = myKcart_ptr + myKcart_padded_size;
const ST* restrict k2 = myKcart_ptr + myKcart_padded_size * 2;
const ST* restrict val = offload_scratch_ptr;
const ST* restrict g0 = offload_scratch_ptr + spline_padded_size;
const ST* restrict g1 = offload_scratch_ptr + spline_padded_size * 2;
const ST* restrict g2 = offload_scratch_ptr + spline_padded_size * 3;
const ST* restrict h00 = offload_scratch_ptr + spline_padded_size * 4;
const ST* restrict h01 = offload_scratch_ptr + spline_padded_size * 5;
const ST* restrict h02 = offload_scratch_ptr + spline_padded_size * 6;
const ST* restrict h11 = offload_scratch_ptr + spline_padded_size * 7;
const ST* restrict h12 = offload_scratch_ptr + spline_padded_size * 8;
const ST* restrict h22 = offload_scratch_ptr + spline_padded_size * 9;
TT* restrict psi = results_scratch_ptr;
TT* restrict dpsi = results_scratch_ptr + orb_size;
TT* restrict d2psi = results_scratch_ptr + orb_size * 4;
#ifdef ENABLE_OFFLOAD
#pragma omp for
#else
#pragma omp simd
#endif
for (size_t j = first; j < last; j++)
{
const size_t jr = j << 1;
const size_t ji = jr + 1;
const ST kX = k0[j];
const ST kY = k1[j];
const ST kZ = k2[j];
const ST val_r = val[jr];
const ST val_i = val[ji];
//phase
ST s, c, p = -(x * kX + y * kY + z * kZ);
sincos(p, &s, &c);
//dot(PrimLattice.G,myG[j])
const ST dX_r = g00 * g0[jr] + g01 * g1[jr] + g02 * g2[jr];
const ST dY_r = g10 * g0[jr] + g11 * g1[jr] + g12 * g2[jr];
const ST dZ_r = g20 * g0[jr] + g21 * g1[jr] + g22 * g2[jr];
const ST dX_i = g00 * g0[ji] + g01 * g1[ji] + g02 * g2[ji];
const ST dY_i = g10 * g0[ji] + g11 * g1[ji] + g12 * g2[ji];
const ST dZ_i = g20 * g0[ji] + g21 * g1[ji] + g22 * g2[ji];
// \f$\nabla \psi_r + {\bf k}\psi_i\f$
const ST gX_r = dX_r + val_i * kX;
const ST gY_r = dY_r + val_i * kY;
const ST gZ_r = dZ_r + val_i * kZ;
const ST gX_i = dX_i - val_r * kX;
const ST gY_i = dY_i - val_r * kY;
const ST gZ_i = dZ_i - val_r * kZ;
const ST lcart_r = SymTrace(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], symGGt);
const ST lcart_i = SymTrace(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], symGGt);
const ST lap_r = lcart_r + mKK_ptr[j] * val_r + two * (kX * dX_i + kY * dY_i + kZ * dZ_i);
const ST lap_i = lcart_i + mKK_ptr[j] * val_i - two * (kX * dX_r + kY * dY_r + kZ * dZ_r);
const size_t psiIndex = first_spo + j + (j < nComplexBands ? j : nComplexBands);
//this will be fixed later
psi[psiIndex] = c * val_r - s * val_i;
d2psi[psiIndex] = c * lap_r - s * lap_i;
//this will go way with Determinant
dpsi[psiIndex * 3] = c * gX_r - s * gX_i;
dpsi[psiIndex * 3 + 1] = c * gY_r - s * gY_i;
dpsi[psiIndex * 3 + 2] = c * gZ_r - s * gZ_i;
if (j < nComplexBands)
{
psi[psiIndex + 1] = c * val_i + s * val_r;
d2psi[psiIndex + 1] = c * lap_i + s * lap_r;
dpsi[psiIndex * 3 + 3] = c * gX_i + s * gX_r;
dpsi[psiIndex * 3 + 4] = c * gY_i + s * gY_r;
dpsi[psiIndex * 3 + 5] = c * gZ_i + s * gZ_r;
}
}
}
} // namespace C2R
/** adoptor class to match std::complex<ST> spline with TT real SPOs
* @tparam ST precision of spline
* @tparam TT precision of SPOs
* @tparam D dimension
*
* Requires temporage storage and multiplication of phase vectors
* Internal storage use double sized arrays of ST type, aligned and padded.
*/
template<typename ST, typename TT>
struct SplineC2ROMP : public SplineAdoptorBase<ST, 3>
{
static const int ALIGN = QMC_CLINE;
template<typename DT>
using OffloadAllocator = OMPallocator<DT, aligned_allocator<DT>>;
template<typename DT>
using OffloadPinnedAllocator = OMPallocator<DT, PinnedAlignedAllocator<DT>>;
static const int D = 3;
using Base = SplineAdoptorBase<ST, 3>;
using SplineType = typename bspline_traits<ST, 3>::SplineType;
using BCType = typename bspline_traits<ST, 3>::BCType;
using DataType = ST;
using PointType = typename Base::PointType;
using SingleSplineType = typename Base::SingleSplineType;
using vContainer_type = Vector<ST, aligned_allocator<ST>>;
using gContainer_type = VectorSoaContainer<ST, 3>;
using hContainer_type = VectorSoaContainer<ST, 6>;
using ghContainer_type = VectorSoaContainer<ST, 10>;
using Base::first_spo;
using Base::GGt;
using Base::kPoints;
using Base::last_spo;
using Base::MakeTwoCopies;
using Base::offset;
using Base::PrimLattice;
///number of complex bands
int nComplexBands;
///multi bspline set
std::shared_ptr<MultiBspline<ST, ALIGN, OffloadAllocator<ST>>> SplineInst;
vContainer_type mKK;
VectorSoaContainer<ST, 3> myKcart;
vContainer_type myV;
vContainer_type myL;
gContainer_type myG;
hContainer_type myH;
ghContainer_type mygH;
///thread private ratios for reduction when using nested threading, numVP x numThread
Matrix<TT, OffloadPinnedAllocator<TT>> ratios_private;
///offload scratch space, dynamically resized to the maximal need
Vector<ST, OffloadPinnedAllocator<ST>> offload_scratch;
///result scratch space, dynamically resized to the maximal need
Vector<TT, OffloadPinnedAllocator<TT>> results_scratch;
///psiinv and position scratch space, used to avoid allocation on the fly and faster transfer
Vector<TT, OffloadPinnedAllocator<TT>> psiinv_pos_copy;
///position scratch space, used to avoid allocation on the fly and faster transfer
Vector<ST, OffloadPinnedAllocator<ST>> mw_pos_copy;
///the following pointers are used for keep and access the data on device
///cloned objects copy the pointer by value without the need of mapping to the device
///Thus master_PrimLattice_G_ptr is different from PrimLattice.G.data() in cloned objects
///mKK data pointer
const ST* master_mKK_ptr;
///myKcart data pointer
const ST* master_myKcart_ptr;
///PrimLattice.G data pointer
const ST* master_PrimLattice_G_ptr;
///GGt data pointer
const ST* master_GGt_ptr;
SplineC2ROMP() : Base(), nComplexBands(0)
{
this->is_complex = true;
this->is_soa_ready = true;
this->AdoptorName = "SplineC2ROMPAdoptor";
this->KeyWord = "SplineC2ROMP";
}
~SplineC2ROMP()
{
if (SplineInst.use_count() == 1)
{
// clean up mapping by the last owner
const auto* MultiSpline = SplineInst->getSplinePtr();
PRAGMA_OFFLOAD("omp target exit data map(delete:MultiSpline[0:1])")
PRAGMA_OFFLOAD("omp target exit data map(delete:master_mKK_ptr[0:mKK.size()])")
PRAGMA_OFFLOAD("omp target exit data map(delete:master_myKcart_ptr[0:myKcart.capacity()*3])")
PRAGMA_OFFLOAD("omp target exit data map(delete:master_PrimLattice_G_ptr[0:9])")
PRAGMA_OFFLOAD("omp target exit data map(delete:master_GGt_ptr[0:9])")
}
}
inline void resizeStorage(size_t n, size_t nvals)
{
Base::init_base(n);
size_t npad = getAlignedSize<ST>(2 * n);
myV.resize(npad);
myG.resize(npad);
myL.resize(npad);
myH.resize(npad);
mygH.resize(npad);
}
void bcast_tables(Communicate* comm) { chunked_bcast(comm, SplineInst->getSplinePtr()); }
void gather_tables(Communicate* comm)
{
if (comm->size() == 1)
return;
const int Nbands = kPoints.size();
const int Nbandgroups = comm->size();
offset.resize(Nbandgroups + 1, 0);
FairDivideLow(Nbands, Nbandgroups, offset);
for (size_t ib = 0; ib < offset.size(); ib++)
offset[ib] = offset[ib] * 2;
gatherv(comm, SplineInst->getSplinePtr(), SplineInst->getSplinePtr()->z_stride, offset);
}
template<typename GT, typename BCT>
void create_spline(GT& xyz_g, BCT& xyz_bc)
{
resize_kpoints();
SplineInst = std::make_shared<MultiBspline<ST, ALIGN, OffloadAllocator<ST>>>();
SplineInst->create(xyz_g, xyz_bc, myV.size());
app_log() << "MEMORY " << SplineInst->sizeInByte() / (1 << 20) << " MB allocated "
<< "for the coefficients in 3D spline orbital representation" << std::endl;
}
/// this routine can not be called from threaded region
void finalizeConstruction()
{
// map the SplineInst->getSplinePtr() structure to GPU
auto* MultiSpline = SplineInst->getSplinePtr();
PRAGMA_OFFLOAD("omp target enter data map(alloc:MultiSpline[0:1])")
auto* restrict coefs = MultiSpline->coefs;
// attach pointers on the device to achieve deep copy
PRAGMA_OFFLOAD("omp target map(always, to: MultiSpline[0:1], coefs[0:MultiSpline->coefs_size])")
{
MultiSpline->coefs = coefs;
}
// transfer static data to GPU
master_mKK_ptr = mKK.data();
PRAGMA_OFFLOAD("omp target enter data map(alloc:master_mKK_ptr[0:mKK.size()])")
PRAGMA_OFFLOAD("omp target update to(master_mKK_ptr[0:mKK.size()])")
master_myKcart_ptr = myKcart.data();
PRAGMA_OFFLOAD("omp target enter data map(alloc:master_myKcart_ptr[0:myKcart.capacity()*3])")
PRAGMA_OFFLOAD("omp target update to(master_myKcart_ptr[0:myKcart.capacity()*3])")
master_PrimLattice_G_ptr = PrimLattice.G.data();
PRAGMA_OFFLOAD("omp target enter data map(alloc:master_PrimLattice_G_ptr[0:9])")
PRAGMA_OFFLOAD("omp target update to(master_PrimLattice_G_ptr[0:9])")
master_GGt_ptr = GGt.data();
PRAGMA_OFFLOAD("omp target enter data map(alloc:master_GGt_ptr[0:9])")
PRAGMA_OFFLOAD("omp target update to(master_GGt_ptr[0:9])")
/* debug pointers
std::cout << "Ye debug mapping" << std::endl;
std::cout << "SplineInst = " << SplineInst << std::endl;
std::cout << "MultiSpline = " << MultiSpline << std::endl;
std::cout << "master_mKK_ptr = " << master_mKK_ptr << std::endl;
std::cout << "master_myKcart_ptr = " << master_myKcart_ptr << std::endl;
std::cout << "master_PrimLattice_G_ptr = " << master_PrimLattice_G_ptr << std::endl;
std::cout << "master_GGt_ptr = " << master_GGt_ptr << std::endl;
std::cout << "this = " << this << std::endl;
*/
}
inline void flush_zero() { SplineInst->flush_zero(); }
/** remap kPoints to pack the double copy */
inline void resize_kpoints()
{
#ifndef QMC_CUDA
// GPU CUDA code doesn't allow a change of the ordering
nComplexBands = this->remap_kpoints();
#endif
int nk = kPoints.size();
mKK.resize(nk);
myKcart.resize(nk);
for (size_t i = 0; i < nk; ++i)
{
mKK[i] = -dot(kPoints[i], kPoints[i]);
myKcart(i) = kPoints[i];
}
}
inline void set_spline(SingleSplineType* spline_r, SingleSplineType* spline_i, int twist, int ispline, int level)
{
SplineInst->copy_spline(spline_r, 2 * ispline);
SplineInst->copy_spline(spline_i, 2 * ispline + 1);
}
bool read_splines(hdf_archive& h5f)
{
std::ostringstream o;
o << "spline_" << SplineAdoptorBase<ST, D>::MyIndex;
einspline_engine<SplineType> bigtable(SplineInst->getSplinePtr());
return h5f.readEntry(bigtable, o.str().c_str()); //"spline_0");
}
bool write_splines(hdf_archive& h5f)
{
std::ostringstream o;
o << "spline_" << SplineAdoptorBase<ST, D>::MyIndex;
einspline_engine<SplineType> bigtable(SplineInst->getSplinePtr());
return h5f.writeEntry(bigtable, o.str().c_str()); //"spline_0");
}
template<typename VV>
inline void assign_v(const PointType& r, const vContainer_type& myV, VV& psi, int first, int last) const
{
// protect last
last = last > kPoints.size() ? kPoints.size() : last;
const ST x = r[0], y = r[1], z = r[2];
const ST* restrict kx = myKcart.data(0);
const ST* restrict ky = myKcart.data(1);
const ST* restrict kz = myKcart.data(2);
TT* restrict psi_s = psi.data() + first_spo;
#pragma omp simd
for (size_t j = first; j < std::min(nComplexBands, last); j++)
{
ST s, c;
const size_t jr = j << 1;
const size_t ji = jr + 1;
const ST val_r = myV[jr];
const ST val_i = myV[ji];
sincos(-(x * kx[j] + y * ky[j] + z * kz[j]), &s, &c);
psi_s[jr] = val_r * c - val_i * s;
psi_s[ji] = val_i * c + val_r * s;
}
psi_s += nComplexBands;
#pragma omp simd
for (size_t j = std::max(nComplexBands, first); j < last; j++)
{
ST s, c;
const ST val_r = myV[2 * j];
const ST val_i = myV[2 * j + 1];
sincos(-(x * kx[j] + y * ky[j] + z * kz[j]), &s, &c);
psi_s[j] = val_r * c - val_i * s;
}
}
template<typename VV>
inline void evaluate_v(const ParticleSet& P, const int iat, VV& psi)
{
const PointType& r = P.activeR(iat);
PointType ru(PrimLattice.toUnit_floor(r));
if (true)
{
#pragma omp parallel
{
int first, last;
FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last);
spline2::evaluate3d(SplineInst->getSplinePtr(), ru, myV, first, last);
assign_v(r, myV, psi, first / 2, last / 2);
}
}
else
{
const int ChunkSizePerTeam = 128;
const int NumTeams = (myV.size() + ChunkSizePerTeam - 1) / ChunkSizePerTeam;
const auto padded_size = myV.size();
if (offload_scratch.size() < padded_size)
offload_scratch.resize(padded_size);
// Ye: need to extract sizes and pointers before entering target region
const auto orb_size = psi.size();
const auto* spline_ptr = SplineInst->getSplinePtr();
auto* offload_scratch_ptr = offload_scratch.data();
auto* psi_ptr = psi.data();
const auto x = r[0], y = r[1], z = r[2];
const auto rux = ru[0], ruy = ru[1], ruz = ru[2];
const auto myKcart_padded_size = myKcart.capacity();
auto* myKcart_ptr = master_myKcart_ptr;
const size_t first_spo_local = first_spo;
const int nComplexBands_local = nComplexBands;
PRAGMA_OFFLOAD("omp target teams distribute num_teams(NumTeams) thread_limit(ChunkSizePerTeam) \
map(always, from: psi_ptr[0:orb_size])")
for (int team_id = 0; team_id < NumTeams; team_id++)
{
const int first = ChunkSizePerTeam * team_id;
const int last = (first + ChunkSizePerTeam) > padded_size ? padded_size : first + ChunkSizePerTeam;
int ix, iy, iz;
ST a[4], b[4], c[4];
spline2::computeLocationAndFractional(spline_ptr, rux, ruy, ruz, ix, iy, iz, a, b, c);
PRAGMA_OFFLOAD("omp parallel")
{
spline2offload::evaluate_v_impl_v2(spline_ptr, ix, iy, iz, a, b, c, offload_scratch_ptr + first, first, last);
C2R::assign_v(x, y, z, psi_ptr, orb_size, offload_scratch_ptr, myKcart_ptr, myKcart_padded_size,
first_spo_local, nComplexBands_local, first / 2, last / 2);
}
}
}
}
template<typename VV, typename RT>
inline void evaluateDetRatios(const VirtualParticleSet& VP, VV& psi, const VV& psiinv, std::vector<RT>& ratios)
{
const int nVP = VP.getTotalNum();
if(psiinv_pos_copy.size() < psiinv.size() + nVP * 6)
psiinv_pos_copy.resize(psiinv.size() + nVP * 6);
// stage psiinv to psiinv_pos_copy
std::copy_n(psiinv.data(), psiinv.size(), psiinv_pos_copy.data());
// pack particle positions
auto* restrict pos_scratch = psiinv_pos_copy.data() + psiinv.size();
for (int iat = 0; iat < nVP; ++iat)
{
const PointType& r = VP.activeR(iat);
PointType ru(PrimLattice.toUnit_floor(r));
pos_scratch[iat * 6] = r[0];
pos_scratch[iat * 6 + 1] = r[1];
pos_scratch[iat * 6 + 2] = r[2];
pos_scratch[iat * 6 + 3] = ru[0];
pos_scratch[iat * 6 + 4] = ru[1];
pos_scratch[iat * 6 + 5] = ru[2];
}
const int ChunkSizePerTeam = 128;
const int NumTeams = (myV.size() + ChunkSizePerTeam - 1) / ChunkSizePerTeam;
if (ratios_private.size() < NumTeams * nVP)
ratios_private.resize(nVP, NumTeams);
const auto padded_size = myV.size();
if (offload_scratch.size() < padded_size * nVP)
offload_scratch.resize(padded_size * nVP);
const auto orb_size = psiinv.size();
if (results_scratch.size() < orb_size * nVP)
results_scratch.resize(orb_size * nVP);
// Ye: need to extract sizes and pointers before entering target region
const auto* spline_ptr = SplineInst->getSplinePtr();
auto* offload_scratch_ptr = offload_scratch.data();
auto* results_scratch_ptr = results_scratch.data();
const auto myKcart_padded_size = myKcart.capacity();
auto* myKcart_ptr = master_myKcart_ptr;
auto* psiinv_ptr = psiinv_pos_copy.data();
auto* ratios_private_ptr = ratios_private.data();
const size_t first_spo_local = first_spo;
const int nComplexBands_local = nComplexBands;
PRAGMA_OFFLOAD("omp target teams distribute collapse(2) num_teams(NumTeams*nVP) thread_limit(ChunkSizePerTeam) \
map(always, to: psiinv_ptr[0:psiinv_pos_copy.size()]) \
map(always, from: ratios_private_ptr[0:NumTeams*nVP])")
for (int iat = 0; iat < nVP; iat++)
for (int team_id = 0; team_id < NumTeams; team_id++)
{
const int first = ChunkSizePerTeam * team_id;
const int last = (first + ChunkSizePerTeam) > padded_size ? padded_size : first + ChunkSizePerTeam;
const int first_cplx = first / 2;
const int last_cplx = orb_size < last / 2 ? orb_size : last / 2;
const int first_real = first_cplx + std::min(nComplexBands_local, first_cplx);
const int last_real = last_cplx + std::min(nComplexBands_local, last_cplx);
auto* restrict offload_scratch_iat_ptr = offload_scratch_ptr + padded_size * iat;
auto* restrict psi_iat_ptr = results_scratch_ptr + orb_size * iat;
auto* restrict pos_scratch = psiinv_ptr + orb_size;
int ix, iy, iz;
ST a[4], b[4], c[4];
spline2::computeLocationAndFractional(spline_ptr,
ST(pos_scratch[iat * 6 + 3]),
ST(pos_scratch[iat * 6 + 4]),
ST(pos_scratch[iat * 6 + 5]),
ix, iy, iz, a, b, c);
TT sum(0);
PRAGMA_OFFLOAD("omp parallel")
{
spline2offload::evaluate_v_impl_v2(spline_ptr, ix, iy, iz, a, b, c,
offload_scratch_iat_ptr + first, first,
last);
C2R::assign_v(ST(pos_scratch[iat * 6]), ST(pos_scratch[iat * 6 + 1]), ST(pos_scratch[iat * 6 + 2]),
psi_iat_ptr, orb_size, offload_scratch_iat_ptr, myKcart_ptr, myKcart_padded_size,
first_spo_local, nComplexBands_local, first / 2, last / 2);
PRAGMA_OFFLOAD("omp for reduction(+:sum)")
for (int i = first_real; i < last_real; i++)
sum += psi_iat_ptr[i] * psiinv_ptr[i];
}
ratios_private_ptr[iat * NumTeams + team_id] = sum;
}
// do the reduction manually
for (int iat = 0; iat < nVP; ++iat)
{
ratios[iat] = TT(0);
for (int tid = 0; tid < NumTeams; tid++)
ratios[iat] += ratios_private[iat][tid];
}
}
/** assign_vgl_from_l can be used when myL is precomputed and myV,myG,myL in cartesian
*/
template<typename VV, typename GV>
inline void assign_vgl_from_l(const PointType& r, VV& psi, GV& dpsi, VV& d2psi)
{
constexpr ST two(2);
const ST x = r[0], y = r[1], z = r[2];
const ST* restrict k0 = myKcart.data(0);
ASSUME_ALIGNED(k0);
const ST* restrict k1 = myKcart.data(1);
ASSUME_ALIGNED(k1);
const ST* restrict k2 = myKcart.data(2);
ASSUME_ALIGNED(k2);
const ST* restrict g0 = myG.data(0);
ASSUME_ALIGNED(g0);
const ST* restrict g1 = myG.data(1);
ASSUME_ALIGNED(g1);
const ST* restrict g2 = myG.data(2);
ASSUME_ALIGNED(g2);
const size_t N = kPoints.size();
#pragma omp simd
for (size_t j = 0; j < nComplexBands; j++)
{
const size_t jr = j << 1;
const size_t ji = jr + 1;
const ST kX = k0[j];
const ST kY = k1[j];
const ST kZ = k2[j];
const ST val_r = myV[jr];
const ST val_i = myV[ji];
//phase
ST s, c;
sincos(-(x * kX + y * kY + z * kZ), &s, &c);
//dot(PrimLattice.G,myG[j])
const ST dX_r = g0[jr];
const ST dY_r = g1[jr];
const ST dZ_r = g2[jr];
const ST dX_i = g0[ji];
const ST dY_i = g1[ji];
const ST dZ_i = g2[ji];
// \f$\nabla \psi_r + {\bf k}\psi_i\f$
const ST gX_r = dX_r + val_i * kX;
const ST gY_r = dY_r + val_i * kY;
const ST gZ_r = dZ_r + val_i * kZ;
const ST gX_i = dX_i - val_r * kX;
const ST gY_i = dY_i - val_r * kY;
const ST gZ_i = dZ_i - val_r * kZ;
const ST lap_r = myL[jr] + mKK[j] * val_r + two * (kX * dX_i + kY * dY_i + kZ * dZ_i);
const ST lap_i = myL[ji] + mKK[j] * val_i - two * (kX * dX_r + kY * dY_r + kZ * dZ_r);
//this will be fixed later
const size_t psiIndex = first_spo + jr;
psi[psiIndex] = c * val_r - s * val_i;
psi[psiIndex + 1] = c * val_i + s * val_r;
d2psi[psiIndex] = c * lap_r - s * lap_i;
d2psi[psiIndex + 1] = c * lap_i + s * lap_r;
//this will go way with Determinant
dpsi[psiIndex][0] = c * gX_r - s * gX_i;
dpsi[psiIndex][1] = c * gY_r - s * gY_i;
dpsi[psiIndex][2] = c * gZ_r - s * gZ_i;
dpsi[psiIndex + 1][0] = c * gX_i + s * gX_r;
dpsi[psiIndex + 1][1] = c * gY_i + s * gY_r;
dpsi[psiIndex + 1][2] = c * gZ_i + s * gZ_r;
}
#pragma omp simd
for (size_t j = nComplexBands; j < N; j++)
{
const size_t jr = j << 1;
const size_t ji = jr + 1;
const ST kX = k0[j];
const ST kY = k1[j];
const ST kZ = k2[j];
const ST val_r = myV[jr];
const ST val_i = myV[ji];
//phase
ST s, c;
sincos(-(x * kX + y * kY + z * kZ), &s, &c);
//dot(PrimLattice.G,myG[j])
const ST dX_r = g0[jr];
const ST dY_r = g1[jr];
const ST dZ_r = g2[jr];
const ST dX_i = g0[ji];
const ST dY_i = g1[ji];
const ST dZ_i = g2[ji];
// \f$\nabla \psi_r + {\bf k}\psi_i\f$
const ST gX_r = dX_r + val_i * kX;
const ST gY_r = dY_r + val_i * kY;
const ST gZ_r = dZ_r + val_i * kZ;
const ST gX_i = dX_i - val_r * kX;
const ST gY_i = dY_i - val_r * kY;
const ST gZ_i = dZ_i - val_r * kZ;
const size_t psiIndex = first_spo + nComplexBands + j;
psi[psiIndex] = c * val_r - s * val_i;
//this will be fixed later
dpsi[psiIndex][0] = c * gX_r - s * gX_i;
dpsi[psiIndex][1] = c * gY_r - s * gY_i;
dpsi[psiIndex][2] = c * gZ_r - s * gZ_i;
const ST lap_r = myL[jr] + mKK[j] * val_r + two * (kX * dX_i + kY * dY_i + kZ * dZ_i);
const ST lap_i = myL[ji] + mKK[j] * val_i - two * (kX * dX_r + kY * dY_r + kZ * dZ_r);
d2psi[psiIndex] = c * lap_r - s * lap_i;
}
}
template<typename VV, typename GV>
inline void evaluate_vgl(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, VV& d2psi)
{
const PointType& r = P.activeR(iat);
PointType ru(PrimLattice.toUnit_floor(r));
const int ChunkSizePerTeam = 128;
const int NumTeams = (myV.size() + ChunkSizePerTeam - 1) / ChunkSizePerTeam;
const auto padded_size = myV.size();
if (offload_scratch.size() < padded_size * 10)
offload_scratch.resize(padded_size * 10);
const auto orb_size = psi.size();
if (results_scratch.size() < orb_size * 5)
results_scratch.resize(orb_size * 5);
// Ye: need to extract sizes and pointers before entering target region
const auto* spline_ptr = SplineInst->getSplinePtr();
auto* offload_scratch_ptr = offload_scratch.data();
auto* results_scratch_ptr = results_scratch.data();
const auto x = r[0], y = r[1], z = r[2];
const auto rux = ru[0], ruy = ru[1], ruz = ru[2];
const auto myKcart_padded_size = myKcart.capacity();
auto* mKK_ptr = master_mKK_ptr;
auto* GGt_ptr = master_GGt_ptr;
auto* PrimLattice_G_ptr = master_PrimLattice_G_ptr;
auto* myKcart_ptr = master_myKcart_ptr;
const size_t first_spo_local = first_spo;
const int nComplexBands_local = nComplexBands;
PRAGMA_OFFLOAD("omp target teams distribute num_teams(NumTeams) thread_limit(ChunkSizePerTeam) \
map(always, from: results_scratch_ptr[0:orb_size*5])")
for (int team_id = 0; team_id < NumTeams; team_id++)
{
const int first = ChunkSizePerTeam * team_id;
const int last = (first + ChunkSizePerTeam) > padded_size ? padded_size : first + ChunkSizePerTeam;
int ix, iy, iz;
ST a[4], b[4], c[4], da[4], db[4], dc[4], d2a[4], d2b[4], d2c[4];
spline2::computeLocationAndFractional(spline_ptr, rux, ruy, ruz, ix, iy, iz, a, b, c, da, db, dc, d2a, d2b, d2c);
const ST G[9] = {PrimLattice_G_ptr[0], PrimLattice_G_ptr[1], PrimLattice_G_ptr[2],
PrimLattice_G_ptr[3], PrimLattice_G_ptr[4], PrimLattice_G_ptr[5],
PrimLattice_G_ptr[6], PrimLattice_G_ptr[7], PrimLattice_G_ptr[8]};
const ST symGGt[6] = {GGt_ptr[0], GGt_ptr[1] + GGt_ptr[3], GGt_ptr[2] + GGt_ptr[6],
GGt_ptr[4], GGt_ptr[5] + GGt_ptr[7], GGt_ptr[8]};
PRAGMA_OFFLOAD("omp parallel")
{
spline2offload::evaluate_vgh_impl_v2(spline_ptr,
ix, iy, iz,
a, b, c,
da, db, dc,
d2a, d2b, d2c,
offload_scratch_ptr + first,
offload_scratch_ptr + padded_size + first,
offload_scratch_ptr + padded_size * 4 + first, padded_size, first, last);
C2R::assign_vgl(x, y, z, results_scratch_ptr, mKK_ptr, orb_size, offload_scratch_ptr, padded_size, symGGt,
G, myKcart_ptr, myKcart_padded_size, first_spo_local, nComplexBands_local,
first / 2, last / 2);
}
}
for (size_t i = 0; i < orb_size; i++)
{
psi[i] = results_scratch[i];
dpsi[i][0] = results_scratch[orb_size + i * 3];
dpsi[i][1] = results_scratch[orb_size + i * 3 + 1];
dpsi[i][2] = results_scratch[orb_size + i * 3 + 2];
d2psi[i] = results_scratch[orb_size * 4 + i];
}
}
template<typename VV, typename GV>
inline void mw_evaluate_vgl(const std::vector<SplineC2ROMP*>& sa_list,
const std::vector<ParticleSet*>& P_list,
int iat,
const std::vector<VV*>& psi_v_list,
const std::vector<GV*>& dpsi_v_list,
const std::vector<VV*>& d2psi_v_list)
{
const int nwalkers = sa_list.size();
if (mw_pos_copy.size() < nwalkers * 6)
mw_pos_copy.resize(nwalkers * 6);
// pack particle positions
for (int iw = 0; iw < nwalkers; ++iw)
{
const PointType& r = P_list[iw]->activeR(iat);
PointType ru(PrimLattice.toUnit_floor(r));
mw_pos_copy[iw * 6] = r[0];
mw_pos_copy[iw * 6 + 1] = r[1];
mw_pos_copy[iw * 6 + 2] = r[2];
mw_pos_copy[iw * 6 + 3] = ru[0];
mw_pos_copy[iw * 6 + 4] = ru[1];
mw_pos_copy[iw * 6 + 5] = ru[2];
}
const int ChunkSizePerTeam = 128;
const int NumTeams = (myV.size() + ChunkSizePerTeam - 1) / ChunkSizePerTeam;
const auto padded_size = myV.size();
if (offload_scratch.size() < padded_size * nwalkers * 10)
offload_scratch.resize(padded_size * nwalkers * 10);
const auto orb_size = psi_v_list[0]->size();
if (results_scratch.size() < orb_size * nwalkers * 5)
results_scratch.resize(orb_size * nwalkers * 5);
// Ye: need to extract sizes and pointers before entering target region
const auto* spline_ptr = SplineInst->getSplinePtr();
auto* pos_copy_ptr = mw_pos_copy.data();
auto* offload_scratch_ptr = offload_scratch.data();
auto* results_scratch_ptr = results_scratch.data();
const auto myKcart_padded_size = myKcart.capacity();
auto* mKK_ptr = master_mKK_ptr;
auto* GGt_ptr = master_GGt_ptr;
auto* PrimLattice_G_ptr = master_PrimLattice_G_ptr;
auto* myKcart_ptr = master_myKcart_ptr;
const size_t first_spo_local = first_spo;
const int nComplexBands_local = nComplexBands;
PRAGMA_OFFLOAD("omp target teams distribute collapse(2) num_teams(NumTeams*nwalkers) thread_limit(ChunkSizePerTeam) \
map(always, to: pos_copy_ptr[0:nwalkers*6]) \
map(always, from: results_scratch_ptr[0:orb_size*nwalkers*5])")
for (int iw = 0; iw < nwalkers; iw++)
for (int team_id = 0; team_id < NumTeams; team_id++)
{
const int first = ChunkSizePerTeam * team_id;
const int last = (first + ChunkSizePerTeam) > padded_size ? padded_size : first + ChunkSizePerTeam;
const int first_cplx = first / 2;
const int last_cplx = orb_size < last / 2 ? orb_size : last / 2;
const int first_real = first_cplx + std::min(nComplexBands_local, first_cplx);
const int last_real = last_cplx + std::min(nComplexBands_local, last_cplx);
auto* restrict offload_scratch_iw_ptr = offload_scratch_ptr + padded_size * iw * 10;
auto* restrict psi_iw_ptr = results_scratch_ptr + orb_size * iw * 5;
int ix, iy, iz;
ST a[4], b[4], c[4], da[4], db[4], dc[4], d2a[4], d2b[4], d2c[4];
spline2::computeLocationAndFractional(spline_ptr, pos_copy_ptr[iw * 6 + 3], pos_copy_ptr[iw * 6 + 4], pos_copy_ptr[iw * 6 + 5], ix, iy, iz, a, b, c, da, db, dc, d2a, d2b, d2c);
const ST G[9] = {PrimLattice_G_ptr[0], PrimLattice_G_ptr[1], PrimLattice_G_ptr[2],
PrimLattice_G_ptr[3], PrimLattice_G_ptr[4], PrimLattice_G_ptr[5],
PrimLattice_G_ptr[6], PrimLattice_G_ptr[7], PrimLattice_G_ptr[8]};
const ST symGGt[6] = {GGt_ptr[0], GGt_ptr[1] + GGt_ptr[3], GGt_ptr[2] + GGt_ptr[6],
GGt_ptr[4], GGt_ptr[5] + GGt_ptr[7], GGt_ptr[8]};
PRAGMA_OFFLOAD("omp parallel")
{
spline2offload::evaluate_vgh_impl_v2(spline_ptr,
ix, iy, iz,
a, b, c,
da, db, dc,
d2a, d2b, d2c,
offload_scratch_iw_ptr + first,
offload_scratch_iw_ptr + padded_size + first,
offload_scratch_iw_ptr + padded_size * 4 + first, padded_size, first, last);
C2R::assign_vgl(pos_copy_ptr[iw * 6], pos_copy_ptr[iw * 6 + 1], pos_copy_ptr[iw * 6 + 2], psi_iw_ptr, mKK_ptr, orb_size, offload_scratch_iw_ptr, padded_size, symGGt,
G, myKcart_ptr, myKcart_padded_size, first_spo_local, nComplexBands_local,
first / 2, last / 2);
}
}
// do the reduction manually
for (int iw = 0; iw < nwalkers; ++iw)
{
auto* restrict results_iw_ptr = results_scratch_ptr + orb_size * iw * 5;
auto& psi_v(*psi_v_list[iw]);
auto& dpsi_v(*dpsi_v_list[iw]);
auto& d2psi_v(*d2psi_v_list[iw]);
for (size_t i = 0; i < orb_size; i++)
{
psi_v[i] = results_iw_ptr[i];
dpsi_v[i][0] = results_iw_ptr[orb_size + i * 3];
dpsi_v[i][1] = results_iw_ptr[orb_size + i * 3 + 1];
dpsi_v[i][2] = results_iw_ptr[orb_size + i * 3 + 2];
d2psi_v[i] = results_iw_ptr[orb_size * 4 + i];
}
}
}
template<typename VV, typename GV, typename GGV>
void assign_vgh(const PointType& r, VV& psi, GV& dpsi, GGV& grad_grad_psi, int first, int last) const
{
// protect last
last = last > kPoints.size() ? kPoints.size() : last;
const ST g00 = PrimLattice.G(0), g01 = PrimLattice.G(1), g02 = PrimLattice.G(2), g10 = PrimLattice.G(3),
g11 = PrimLattice.G(4), g12 = PrimLattice.G(5), g20 = PrimLattice.G(6), g21 = PrimLattice.G(7),
g22 = PrimLattice.G(8);
const ST x = r[0], y = r[1], z = r[2];
const ST* restrict k0 = myKcart.data(0);
const ST* restrict k1 = myKcart.data(1);
const ST* restrict k2 = myKcart.data(2);
const ST* restrict g0 = myG.data(0);
const ST* restrict g1 = myG.data(1);
const ST* restrict g2 = myG.data(2);
const ST* restrict h00 = myH.data(0);
const ST* restrict h01 = myH.data(1);
const ST* restrict h02 = myH.data(2);
const ST* restrict h11 = myH.data(3);
const ST* restrict h12 = myH.data(4);
const ST* restrict h22 = myH.data(5);
#pragma omp simd
for (size_t j = first; j < std::min(nComplexBands, last); j++)
{
int jr = j << 1;
int ji = jr + 1;
const ST kX = k0[j];
const ST kY = k1[j];
const ST kZ = k2[j];
const ST val_r = myV[jr];
const ST val_i = myV[ji];
//phase
ST s, c;
sincos(-(x * kX + y * kY + z * kZ), &s, &c);
//dot(PrimLattice.G,myG[j])
const ST dX_r = g00 * g0[jr] + g01 * g1[jr] + g02 * g2[jr];
const ST dY_r = g10 * g0[jr] + g11 * g1[jr] + g12 * g2[jr];
const ST dZ_r = g20 * g0[jr] + g21 * g1[jr] + g22 * g2[jr];
const ST dX_i = g00 * g0[ji] + g01 * g1[ji] + g02 * g2[ji];
const ST dY_i = g10 * g0[ji] + g11 * g1[ji] + g12 * g2[ji];
const ST dZ_i = g20 * g0[ji] + g21 * g1[ji] + g22 * g2[ji];
// \f$\nabla \psi_r + {\bf k}\psi_i\f$
const ST gX_r = dX_r + val_i * kX;
const ST gY_r = dY_r + val_i * kY;
const ST gZ_r = dZ_r + val_i * kZ;
const ST gX_i = dX_i - val_r * kX;
const ST gY_i = dY_i - val_r * kY;
const ST gZ_i = dZ_i - val_r * kZ;
const size_t psiIndex = first_spo + jr;
psi[psiIndex] = c * val_r - s * val_i;
dpsi[psiIndex][0] = c * gX_r - s * gX_i;
dpsi[psiIndex][1] = c * gY_r - s * gY_i;
dpsi[psiIndex][2] = c * gZ_r - s * gZ_i;
psi[psiIndex + 1] = c * val_i + s * val_r;
dpsi[psiIndex + 1][0] = c * gX_i + s * gX_r;
dpsi[psiIndex + 1][1] = c * gY_i + s * gY_r;
dpsi[psiIndex + 1][2] = c * gZ_i + s * gZ_r;
const ST h_xx_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g00, g01, g02, g00, g01, g02) +
kX * (gX_i + dX_i);
const ST h_xy_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g00, g01, g02, g10, g11, g12) +
kX * (gY_i + dY_i);
const ST h_xz_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g00, g01, g02, g20, g21, g22) +
kX * (gZ_i + dZ_i);
const ST h_yx_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g10, g11, g12, g00, g01, g02) +
kY * (gX_i + dX_i);
const ST h_yy_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g10, g11, g12, g10, g11, g12) +
kY * (gY_i + dY_i);
const ST h_yz_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g10, g11, g12, g20, g21, g22) +
kY * (gZ_i + dZ_i);
const ST h_zx_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g20, g21, g22, g00, g01, g02) +
kZ * (gX_i + dX_i);
const ST h_zy_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g20, g21, g22, g10, g11, g12) +
kZ * (gY_i + dY_i);
const ST h_zz_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g20, g21, g22, g20, g21, g22) +
kZ * (gZ_i + dZ_i);
const ST h_xx_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g00, g01, g02, g00, g01, g02) -
kX * (gX_r + dX_r);
const ST h_xy_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g00, g01, g02, g10, g11, g12) -
kX * (gY_r + dY_r);
const ST h_xz_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g00, g01, g02, g20, g21, g22) -
kX * (gZ_r + dZ_r);
const ST h_yx_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g10, g11, g12, g00, g01, g02) -
kY * (gX_r + dX_r);
const ST h_yy_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g10, g11, g12, g10, g11, g12) -
kY * (gY_r + dY_r);
const ST h_yz_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g10, g11, g12, g20, g21, g22) -
kY * (gZ_r + dZ_r);
const ST h_zx_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g20, g21, g22, g00, g01, g02) -
kZ * (gX_r + dX_r);
const ST h_zy_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g20, g21, g22, g10, g11, g12) -
kZ * (gY_r + dY_r);
const ST h_zz_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g20, g21, g22, g20, g21, g22) -
kZ * (gZ_r + dZ_r);
grad_grad_psi[psiIndex][0] = c * h_xx_r - s * h_xx_i;
grad_grad_psi[psiIndex][1] = c * h_xy_r - s * h_xy_i;
grad_grad_psi[psiIndex][2] = c * h_xz_r - s * h_xz_i;
grad_grad_psi[psiIndex][3] = c * h_yx_r - s * h_yx_i;
grad_grad_psi[psiIndex][4] = c * h_yy_r - s * h_yy_i;
grad_grad_psi[psiIndex][5] = c * h_yz_r - s * h_yz_i;
grad_grad_psi[psiIndex][6] = c * h_zx_r - s * h_zx_i;
grad_grad_psi[psiIndex][7] = c * h_zy_r - s * h_zy_i;
grad_grad_psi[psiIndex][8] = c * h_zz_r - s * h_zz_i;
grad_grad_psi[psiIndex + 1][0] = c * h_xx_i + s * h_xx_r;
grad_grad_psi[psiIndex + 1][1] = c * h_xy_i + s * h_xy_r;
grad_grad_psi[psiIndex + 1][2] = c * h_xz_i + s * h_xz_r;
grad_grad_psi[psiIndex + 1][3] = c * h_yx_i + s * h_yx_r;
grad_grad_psi[psiIndex + 1][4] = c * h_yy_i + s * h_yy_r;
grad_grad_psi[psiIndex + 1][5] = c * h_yz_i + s * h_yz_r;
grad_grad_psi[psiIndex + 1][6] = c * h_zx_i + s * h_zx_r;
grad_grad_psi[psiIndex + 1][7] = c * h_zy_i + s * h_zy_r;
grad_grad_psi[psiIndex + 1][8] = c * h_zz_i + s * h_zz_r;
}
#pragma omp simd
for (size_t j = std::max(nComplexBands, first); j < last; j++)
{
int jr = j << 1;
int ji = jr + 1;
const ST kX = k0[j];
const ST kY = k1[j];
const ST kZ = k2[j];
const ST val_r = myV[jr];
const ST val_i = myV[ji];
//phase
ST s, c;
sincos(-(x * kX + y * kY + z * kZ), &s, &c);
//dot(PrimLattice.G,myG[j])
const ST dX_r = g00 * g0[jr] + g01 * g1[jr] + g02 * g2[jr];
const ST dY_r = g10 * g0[jr] + g11 * g1[jr] + g12 * g2[jr];
const ST dZ_r = g20 * g0[jr] + g21 * g1[jr] + g22 * g2[jr];
const ST dX_i = g00 * g0[ji] + g01 * g1[ji] + g02 * g2[ji];
const ST dY_i = g10 * g0[ji] + g11 * g1[ji] + g12 * g2[ji];
const ST dZ_i = g20 * g0[ji] + g21 * g1[ji] + g22 * g2[ji];
// \f$\nabla \psi_r + {\bf k}\psi_i\f$
const ST gX_r = dX_r + val_i * kX;
const ST gY_r = dY_r + val_i * kY;
const ST gZ_r = dZ_r + val_i * kZ;
const ST gX_i = dX_i - val_r * kX;
const ST gY_i = dY_i - val_r * kY;
const ST gZ_i = dZ_i - val_r * kZ;
const size_t psiIndex = first_spo + nComplexBands + j;
psi[psiIndex] = c * val_r - s * val_i;
dpsi[psiIndex][0] = c * gX_r - s * gX_i;
dpsi[psiIndex][1] = c * gY_r - s * gY_i;
dpsi[psiIndex][2] = c * gZ_r - s * gZ_i;
const ST h_xx_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g00, g01, g02, g00, g01, g02) +
kX * (gX_i + dX_i);
const ST h_xy_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g00, g01, g02, g10, g11, g12) +
kX * (gY_i + dY_i);
const ST h_xz_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g00, g01, g02, g20, g21, g22) +
kX * (gZ_i + dZ_i);
const ST h_yx_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g10, g11, g12, g00, g01, g02) +
kY * (gX_i + dX_i);
const ST h_yy_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g10, g11, g12, g10, g11, g12) +
kY * (gY_i + dY_i);
const ST h_yz_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g10, g11, g12, g20, g21, g22) +
kY * (gZ_i + dZ_i);
const ST h_zx_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g20, g21, g22, g00, g01, g02) +
kZ * (gX_i + dX_i);
const ST h_zy_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g20, g21, g22, g10, g11, g12) +
kZ * (gY_i + dY_i);
const ST h_zz_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g20, g21, g22, g20, g21, g22) +
kZ * (gZ_i + dZ_i);
const ST h_xx_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g00, g01, g02, g00, g01, g02) -
kX * (gX_r + dX_r);
const ST h_xy_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g00, g01, g02, g10, g11, g12) -
kX * (gY_r + dY_r);
const ST h_xz_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g00, g01, g02, g20, g21, g22) -
kX * (gZ_r + dZ_r);
const ST h_yx_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g10, g11, g12, g00, g01, g02) -
kY * (gX_r + dX_r);
const ST h_yy_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g10, g11, g12, g10, g11, g12) -
kY * (gY_r + dY_r);
const ST h_yz_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g10, g11, g12, g20, g21, g22) -
kY * (gZ_r + dZ_r);
const ST h_zx_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g20, g21, g22, g00, g01, g02) -
kZ * (gX_r + dX_r);
const ST h_zy_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g20, g21, g22, g10, g11, g12) -
kZ * (gY_r + dY_r);
const ST h_zz_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g20, g21, g22, g20, g21, g22) -
kZ * (gZ_r + dZ_r);
grad_grad_psi[psiIndex][0] = c * h_xx_r - s * h_xx_i;
grad_grad_psi[psiIndex][1] = c * h_xy_r - s * h_xy_i;
grad_grad_psi[psiIndex][2] = c * h_xz_r - s * h_xz_i;
grad_grad_psi[psiIndex][3] = c * h_yx_r - s * h_yx_i;
grad_grad_psi[psiIndex][4] = c * h_yy_r - s * h_yy_i;
grad_grad_psi[psiIndex][5] = c * h_yz_r - s * h_yz_i;
grad_grad_psi[psiIndex][6] = c * h_zx_r - s * h_zx_i;
grad_grad_psi[psiIndex][7] = c * h_zy_r - s * h_zy_i;
grad_grad_psi[psiIndex][8] = c * h_zz_r - s * h_zz_i;
}
}
template<typename VV, typename GV, typename GGV>
void evaluate_vgh(const ParticleSet& P, const int iat, VV& psi, GV& dpsi, GGV& grad_grad_psi)
{
const PointType& r = P.activeR(iat);
PointType ru(PrimLattice.toUnit_floor(r));
#pragma omp parallel
{
int first, last;
FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last);
spline2::evaluate3d_vgh(SplineInst->getSplinePtr(), ru, myV, myG, myH, first, last);
assign_vgh(r, psi, dpsi, grad_grad_psi, first / 2, last / 2);
}
}
template<typename VV, typename GV, typename GGV, typename GGGV>
void assign_vghgh(const PointType& r,
VV& psi,
GV& dpsi,
GGV& grad_grad_psi,
GGGV& grad_grad_grad_psi,
int first = 0,
int last = -1) const
{
// protect last
last = last < 0 ? kPoints.size() : (last > kPoints.size() ? kPoints.size() : last);
const ST g00 = PrimLattice.G(0), g01 = PrimLattice.G(1), g02 = PrimLattice.G(2), g10 = PrimLattice.G(3),
g11 = PrimLattice.G(4), g12 = PrimLattice.G(5), g20 = PrimLattice.G(6), g21 = PrimLattice.G(7),
g22 = PrimLattice.G(8);
const ST x = r[0], y = r[1], z = r[2];
const ST* restrict k0 = myKcart.data(0);
const ST* restrict k1 = myKcart.data(1);
const ST* restrict k2 = myKcart.data(2);
const ST* restrict g0 = myG.data(0);
const ST* restrict g1 = myG.data(1);
const ST* restrict g2 = myG.data(2);
const ST* restrict h00 = myH.data(0);
const ST* restrict h01 = myH.data(1);
const ST* restrict h02 = myH.data(2);
const ST* restrict h11 = myH.data(3);
const ST* restrict h12 = myH.data(4);
const ST* restrict h22 = myH.data(5);
const ST* restrict gh000 = mygH.data(0);
const ST* restrict gh001 = mygH.data(1);
const ST* restrict gh002 = mygH.data(2);
const ST* restrict gh011 = mygH.data(3);
const ST* restrict gh012 = mygH.data(4);
const ST* restrict gh022 = mygH.data(5);
const ST* restrict gh111 = mygH.data(6);
const ST* restrict gh112 = mygH.data(7);
const ST* restrict gh122 = mygH.data(8);
const ST* restrict gh222 = mygH.data(9);
//SIMD doesn't work quite right yet. Comment out until further debugging.
#pragma omp simd
for (size_t j = first; j < std::min(nComplexBands, last); j++)
{
int jr = j << 1;
int ji = jr + 1;
const ST kX = k0[j];
const ST kY = k1[j];
const ST kZ = k2[j];
const ST val_r = myV[jr];
const ST val_i = myV[ji];
//phase
ST s, c;
sincos(-(x * kX + y * kY + z * kZ), &s, &c);
//dot(PrimLattice.G,myG[j])
const ST dX_r = g00 * g0[jr] + g01 * g1[jr] + g02 * g2[jr];
const ST dY_r = g10 * g0[jr] + g11 * g1[jr] + g12 * g2[jr];
const ST dZ_r = g20 * g0[jr] + g21 * g1[jr] + g22 * g2[jr];
const ST dX_i = g00 * g0[ji] + g01 * g1[ji] + g02 * g2[ji];
const ST dY_i = g10 * g0[ji] + g11 * g1[ji] + g12 * g2[ji];
const ST dZ_i = g20 * g0[ji] + g21 * g1[ji] + g22 * g2[ji];
// \f$\nabla \psi_r + {\bf k}\psi_i\f$
const ST gX_r = dX_r + val_i * kX;
const ST gY_r = dY_r + val_i * kY;
const ST gZ_r = dZ_r + val_i * kZ;
const ST gX_i = dX_i - val_r * kX;
const ST gY_i = dY_i - val_r * kY;
const ST gZ_i = dZ_i - val_r * kZ;
const size_t psiIndex = first_spo + jr;
psi[psiIndex] = c * val_r - s * val_i;
dpsi[psiIndex][0] = c * gX_r - s * gX_i;
dpsi[psiIndex][1] = c * gY_r - s * gY_i;
dpsi[psiIndex][2] = c * gZ_r - s * gZ_i;
psi[psiIndex + 1] = c * val_i + s * val_r;
dpsi[psiIndex + 1][0] = c * gX_i + s * gX_r;
dpsi[psiIndex + 1][1] = c * gY_i + s * gY_r;
dpsi[psiIndex + 1][2] = c * gZ_i + s * gZ_r;
//intermediates for computation of hessian. \partial_i \partial_j phi in cartesian coordinates.
const ST f_xx_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g00, g01, g02, g00, g01, g02);
const ST f_xy_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g00, g01, g02, g10, g11, g12);
const ST f_xz_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g00, g01, g02, g20, g21, g22);
const ST f_yy_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g10, g11, g12, g10, g11, g12);
const ST f_yz_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g10, g11, g12, g20, g21, g22);
const ST f_zz_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g20, g21, g22, g20, g21, g22);
const ST f_xx_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g00, g01, g02, g00, g01, g02);
const ST f_xy_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g00, g01, g02, g10, g11, g12);
const ST f_xz_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g00, g01, g02, g20, g21, g22);
const ST f_yy_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g10, g11, g12, g10, g11, g12);
const ST f_yz_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g10, g11, g12, g20, g21, g22);
const ST f_zz_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g20, g21, g22, g20, g21, g22);
const ST h_xx_r = f_xx_r + 2 * kX * dX_i - kX * kX * val_r;
const ST h_xy_r = f_xy_r + (kX * dY_i + kY * dX_i) - kX * kY * val_r;
const ST h_xz_r = f_xz_r + (kX * dZ_i + kZ * dX_i) - kX * kZ * val_r;
const ST h_yy_r = f_yy_r + 2 * kY * dY_i - kY * kY * val_r;
const ST h_yz_r = f_yz_r + (kY * dZ_i + kZ * dY_i) - kY * kZ * val_r;
const ST h_zz_r = f_zz_r + 2 * kZ * dZ_i - kZ * kZ * val_r;
const ST h_xx_i = f_xx_i - 2 * kX * dX_r - kX * kX * val_i;
const ST h_xy_i = f_xy_i - (kX * dY_r + kY * dX_r) - kX * kY * val_i;
const ST h_xz_i = f_xz_i - (kX * dZ_r + kZ * dX_r) - kX * kZ * val_i;
const ST h_yy_i = f_yy_i - 2 * kY * dY_r - kY * kY * val_i;
const ST h_yz_i = f_yz_i - (kZ * dY_r + kY * dZ_r) - kZ * kY * val_i;
const ST h_zz_i = f_zz_i - 2 * kZ * dZ_r - kZ * kZ * val_i;
grad_grad_psi[psiIndex][0] = c * h_xx_r - s * h_xx_i;
grad_grad_psi[psiIndex][1] = c * h_xy_r - s * h_xy_i;
grad_grad_psi[psiIndex][2] = c * h_xz_r - s * h_xz_i;
grad_grad_psi[psiIndex][3] = c * h_xy_r - s * h_xy_i;
grad_grad_psi[psiIndex][4] = c * h_yy_r - s * h_yy_i;
grad_grad_psi[psiIndex][5] = c * h_yz_r - s * h_yz_i;
grad_grad_psi[psiIndex][6] = c * h_xz_r - s * h_xz_i;
grad_grad_psi[psiIndex][7] = c * h_yz_r - s * h_yz_i;
grad_grad_psi[psiIndex][8] = c * h_zz_r - s * h_zz_i;
grad_grad_psi[psiIndex + 1][0] = c * h_xx_i + s * h_xx_r;
grad_grad_psi[psiIndex + 1][1] = c * h_xy_i + s * h_xy_r;
grad_grad_psi[psiIndex + 1][2] = c * h_xz_i + s * h_xz_r;
grad_grad_psi[psiIndex + 1][3] = c * h_xy_i + s * h_xy_r;
grad_grad_psi[psiIndex + 1][4] = c * h_yy_i + s * h_yy_r;
grad_grad_psi[psiIndex + 1][5] = c * h_yz_i + s * h_yz_r;
grad_grad_psi[psiIndex + 1][6] = c * h_xz_i + s * h_xz_r;
grad_grad_psi[psiIndex + 1][7] = c * h_yz_i + s * h_yz_r;
grad_grad_psi[psiIndex + 1][8] = c * h_zz_i + s * h_zz_r;
//These are the real and imaginary components of the third SPO derivative. _xxx denotes
// third derivative w.r.t. x, _xyz, a derivative with resepect to x,y, and z, and so on.
const ST f3_xxx_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g00, g01, g02, g00, g01, g02, g00, g01, g02);
const ST f3_xxy_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g00, g01, g02, g00, g01, g02, g10, g11, g12);
const ST f3_xxz_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g00, g01, g02, g00, g01, g02, g20, g21, g22);
const ST f3_xyy_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g00, g01, g02, g10, g11, g12, g10, g11, g12);
const ST f3_xyz_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g00, g01, g02, g10, g11, g12, g20, g21, g22);
const ST f3_xzz_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g00, g01, g02, g20, g21, g22, g20, g21, g22);
const ST f3_yyy_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g10, g11, g12, g10, g11, g12, g10, g11, g12);
const ST f3_yyz_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g10, g11, g12, g10, g11, g12, g20, g21, g22);
const ST f3_yzz_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g10, g11, g12, g20, g21, g22, g20, g21, g22);
const ST f3_zzz_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g20, g21, g22, g20, g21, g22, g20, g21, g22);
const ST f3_xxx_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g00, g01, g02, g00, g01, g02, g00, g01, g02);
const ST f3_xxy_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g00, g01, g02, g00, g01, g02, g10, g11, g12);
const ST f3_xxz_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g00, g01, g02, g00, g01, g02, g20, g21, g22);
const ST f3_xyy_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g00, g01, g02, g10, g11, g12, g10, g11, g12);
const ST f3_xyz_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g00, g01, g02, g10, g11, g12, g20, g21, g22);
const ST f3_xzz_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g00, g01, g02, g20, g21, g22, g20, g21, g22);
const ST f3_yyy_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g10, g11, g12, g10, g11, g12, g10, g11, g12);
const ST f3_yyz_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g10, g11, g12, g10, g11, g12, g20, g21, g22);
const ST f3_yzz_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g10, g11, g12, g20, g21, g22, g20, g21, g22);
const ST f3_zzz_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g20, g21, g22, g20, g21, g22, g20, g21, g22);
//Here is where we build up the components of the physical hessian gradient, namely, d^3/dx^3(e^{-ik*r}\phi(r)
const ST gh_xxx_r = f3_xxx_r + 3 * kX * f_xx_i - 3 * kX * kX * dX_r - kX * kX * kX * val_i;
const ST gh_xxx_i = f3_xxx_i - 3 * kX * f_xx_r - 3 * kX * kX * dX_i + kX * kX * kX * val_r;
const ST gh_xxy_r =
f3_xxy_r + (kY * f_xx_i + 2 * kX * f_xy_i) - (kX * kX * dY_r + 2 * kX * kY * dX_r) - kX * kX * kY * val_i;
const ST gh_xxy_i =
f3_xxy_i - (kY * f_xx_r + 2 * kX * f_xy_r) - (kX * kX * dY_i + 2 * kX * kY * dX_i) + kX * kX * kY * val_r;
const ST gh_xxz_r =
f3_xxz_r + (kZ * f_xx_i + 2 * kX * f_xz_i) - (kX * kX * dZ_r + 2 * kX * kZ * dX_r) - kX * kX * kZ * val_i;
const ST gh_xxz_i =
f3_xxz_i - (kZ * f_xx_r + 2 * kX * f_xz_r) - (kX * kX * dZ_i + 2 * kX * kZ * dX_i) + kX * kX * kZ * val_r;
const ST gh_xyy_r =
f3_xyy_r + (2 * kY * f_xy_i + kX * f_yy_i) - (2 * kX * kY * dY_r + kY * kY * dX_r) - kX * kY * kY * val_i;
const ST gh_xyy_i =
f3_xyy_i - (2 * kY * f_xy_r + kX * f_yy_r) - (2 * kX * kY * dY_i + kY * kY * dX_i) + kX * kY * kY * val_r;
const ST gh_xyz_r = f3_xyz_r + (kX * f_yz_i + kY * f_xz_i + kZ * f_xy_i) -
(kX * kY * dZ_r + kY * kZ * dX_r + kZ * kX * dY_r) - kX * kY * kZ * val_i;
const ST gh_xyz_i = f3_xyz_i - (kX * f_yz_r + kY * f_xz_r + kZ * f_xy_r) -
(kX * kY * dZ_i + kY * kZ * dX_i + kZ * kX * dY_i) + kX * kY * kZ * val_r;
const ST gh_xzz_r =
f3_xzz_r + (2 * kZ * f_xz_i + kX * f_zz_i) - (2 * kX * kZ * dZ_r + kZ * kZ * dX_r) - kX * kZ * kZ * val_i;
const ST gh_xzz_i =
f3_xzz_i - (2 * kZ * f_xz_r + kX * f_zz_r) - (2 * kX * kZ * dZ_i + kZ * kZ * dX_i) + kX * kZ * kZ * val_r;
const ST gh_yyy_r = f3_yyy_r + 3 * kY * f_yy_i - 3 * kY * kY * dY_r - kY * kY * kY * val_i;
const ST gh_yyy_i = f3_yyy_i - 3 * kY * f_yy_r - 3 * kY * kY * dY_i + kY * kY * kY * val_r;
const ST gh_yyz_r =
f3_yyz_r + (kZ * f_yy_i + 2 * kY * f_yz_i) - (kY * kY * dZ_r + 2 * kY * kZ * dY_r) - kY * kY * kZ * val_i;
const ST gh_yyz_i =
f3_yyz_i - (kZ * f_yy_r + 2 * kY * f_yz_r) - (kY * kY * dZ_i + 2 * kY * kZ * dY_i) + kY * kY * kZ * val_r;
const ST gh_yzz_r =
f3_yzz_r + (2 * kZ * f_yz_i + kY * f_zz_i) - (2 * kY * kZ * dZ_r + kZ * kZ * dY_r) - kY * kZ * kZ * val_i;
const ST gh_yzz_i =
f3_yzz_i - (2 * kZ * f_yz_r + kY * f_zz_r) - (2 * kY * kZ * dZ_i + kZ * kZ * dY_i) + kY * kZ * kZ * val_r;
const ST gh_zzz_r = f3_zzz_r + 3 * kZ * f_zz_i - 3 * kZ * kZ * dZ_r - kZ * kZ * kZ * val_i;
const ST gh_zzz_i = f3_zzz_i - 3 * kZ * f_zz_r - 3 * kZ * kZ * dZ_i + kZ * kZ * kZ * val_r;
grad_grad_grad_psi[psiIndex][0][0] = c * gh_xxx_r - s * gh_xxx_i;
grad_grad_grad_psi[psiIndex][0][1] = c * gh_xxy_r - s * gh_xxy_i;
grad_grad_grad_psi[psiIndex][0][2] = c * gh_xxz_r - s * gh_xxz_i;
grad_grad_grad_psi[psiIndex][0][3] = c * gh_xxy_r - s * gh_xxy_i;
grad_grad_grad_psi[psiIndex][0][4] = c * gh_xyy_r - s * gh_xyy_i;
grad_grad_grad_psi[psiIndex][0][5] = c * gh_xyz_r - s * gh_xyz_i;
grad_grad_grad_psi[psiIndex][0][6] = c * gh_xxz_r - s * gh_xxz_i;
grad_grad_grad_psi[psiIndex][0][7] = c * gh_xyz_r - s * gh_xyz_i;
grad_grad_grad_psi[psiIndex][0][8] = c * gh_xzz_r - s * gh_xzz_i;
grad_grad_grad_psi[psiIndex][1][0] = c * gh_xxy_r - s * gh_xxy_i;
grad_grad_grad_psi[psiIndex][1][1] = c * gh_xyy_r - s * gh_xyy_i;
grad_grad_grad_psi[psiIndex][1][2] = c * gh_xyz_r - s * gh_xyz_i;
grad_grad_grad_psi[psiIndex][1][3] = c * gh_xyy_r - s * gh_xyy_i;
grad_grad_grad_psi[psiIndex][1][4] = c * gh_yyy_r - s * gh_yyy_i;
grad_grad_grad_psi[psiIndex][1][5] = c * gh_yyz_r - s * gh_yyz_i;
grad_grad_grad_psi[psiIndex][1][6] = c * gh_xyz_r - s * gh_xyz_i;
grad_grad_grad_psi[psiIndex][1][7] = c * gh_yyz_r - s * gh_yyz_i;
grad_grad_grad_psi[psiIndex][1][8] = c * gh_yzz_r - s * gh_yzz_i;
grad_grad_grad_psi[psiIndex][2][0] = c * gh_xxz_r - s * gh_xxz_i;
grad_grad_grad_psi[psiIndex][2][1] = c * gh_xyz_r - s * gh_xyz_i;
grad_grad_grad_psi[psiIndex][2][2] = c * gh_xzz_r - s * gh_xzz_i;
grad_grad_grad_psi[psiIndex][2][3] = c * gh_xyz_r - s * gh_xyz_i;
grad_grad_grad_psi[psiIndex][2][4] = c * gh_yyz_r - s * gh_yyz_i;
grad_grad_grad_psi[psiIndex][2][5] = c * gh_yzz_r - s * gh_yzz_i;
grad_grad_grad_psi[psiIndex][2][6] = c * gh_xzz_r - s * gh_xzz_i;
grad_grad_grad_psi[psiIndex][2][7] = c * gh_yzz_r - s * gh_yzz_i;
grad_grad_grad_psi[psiIndex][2][8] = c * gh_zzz_r - s * gh_zzz_i;
grad_grad_grad_psi[psiIndex + 1][0][0] = c * gh_xxx_i + s * gh_xxx_r;
grad_grad_grad_psi[psiIndex + 1][0][1] = c * gh_xxy_i + s * gh_xxy_r;
grad_grad_grad_psi[psiIndex + 1][0][2] = c * gh_xxz_i + s * gh_xxz_r;
grad_grad_grad_psi[psiIndex + 1][0][3] = c * gh_xxy_i + s * gh_xxy_r;
grad_grad_grad_psi[psiIndex + 1][0][4] = c * gh_xyy_i + s * gh_xyy_r;
grad_grad_grad_psi[psiIndex + 1][0][5] = c * gh_xyz_i + s * gh_xyz_r;
grad_grad_grad_psi[psiIndex + 1][0][6] = c * gh_xxz_i + s * gh_xxz_r;
grad_grad_grad_psi[psiIndex + 1][0][7] = c * gh_xyz_i + s * gh_xyz_r;
grad_grad_grad_psi[psiIndex + 1][0][8] = c * gh_xzz_i + s * gh_xzz_r;
grad_grad_grad_psi[psiIndex + 1][1][0] = c * gh_xxy_i + s * gh_xxy_r;
grad_grad_grad_psi[psiIndex + 1][1][1] = c * gh_xyy_i + s * gh_xyy_r;
grad_grad_grad_psi[psiIndex + 1][1][2] = c * gh_xyz_i + s * gh_xyz_r;
grad_grad_grad_psi[psiIndex + 1][1][3] = c * gh_xyy_i + s * gh_xyy_r;
grad_grad_grad_psi[psiIndex + 1][1][4] = c * gh_yyy_i + s * gh_yyy_r;
grad_grad_grad_psi[psiIndex + 1][1][5] = c * gh_yyz_i + s * gh_yyz_r;
grad_grad_grad_psi[psiIndex + 1][1][6] = c * gh_xyz_i + s * gh_xyz_r;
grad_grad_grad_psi[psiIndex + 1][1][7] = c * gh_yyz_i + s * gh_yyz_r;
grad_grad_grad_psi[psiIndex + 1][1][8] = c * gh_yzz_i + s * gh_yzz_r;
grad_grad_grad_psi[psiIndex + 1][2][0] = c * gh_xxz_i + s * gh_xxz_r;
grad_grad_grad_psi[psiIndex + 1][2][1] = c * gh_xyz_i + s * gh_xyz_r;
grad_grad_grad_psi[psiIndex + 1][2][2] = c * gh_xzz_i + s * gh_xzz_r;
grad_grad_grad_psi[psiIndex + 1][2][3] = c * gh_xyz_i + s * gh_xyz_r;
grad_grad_grad_psi[psiIndex + 1][2][4] = c * gh_yyz_i + s * gh_yyz_r;
grad_grad_grad_psi[psiIndex + 1][2][5] = c * gh_yzz_i + s * gh_yzz_r;
grad_grad_grad_psi[psiIndex + 1][2][6] = c * gh_xzz_i + s * gh_xzz_r;
grad_grad_grad_psi[psiIndex + 1][2][7] = c * gh_yzz_i + s * gh_yzz_r;
grad_grad_grad_psi[psiIndex + 1][2][8] = c * gh_zzz_i + s * gh_zzz_r;
}
#pragma omp simd
for (size_t j = std::max(nComplexBands, first); j < last; j++)
{
int jr = j << 1;
int ji = jr + 1;
const ST kX = k0[j];
const ST kY = k1[j];
const ST kZ = k2[j];
const ST val_r = myV[jr];
const ST val_i = myV[ji];
//phase
ST s, c;
sincos(-(x * kX + y * kY + z * kZ), &s, &c);
//dot(PrimLattice.G,myG[j])
const ST dX_r = g00 * g0[jr] + g01 * g1[jr] + g02 * g2[jr];
const ST dY_r = g10 * g0[jr] + g11 * g1[jr] + g12 * g2[jr];
const ST dZ_r = g20 * g0[jr] + g21 * g1[jr] + g22 * g2[jr];
const ST dX_i = g00 * g0[ji] + g01 * g1[ji] + g02 * g2[ji];
const ST dY_i = g10 * g0[ji] + g11 * g1[ji] + g12 * g2[ji];
const ST dZ_i = g20 * g0[ji] + g21 * g1[ji] + g22 * g2[ji];
// \f$\nabla \psi_r + {\bf k}\psi_i\f$
const ST gX_r = dX_r + val_i * kX;
const ST gY_r = dY_r + val_i * kY;
const ST gZ_r = dZ_r + val_i * kZ;
const ST gX_i = dX_i - val_r * kX;
const ST gY_i = dY_i - val_r * kY;
const ST gZ_i = dZ_i - val_r * kZ;
const size_t psiIndex = first_spo + nComplexBands + j;
psi[psiIndex] = c * val_r - s * val_i;
dpsi[psiIndex][0] = c * gX_r - s * gX_i;
dpsi[psiIndex][1] = c * gY_r - s * gY_i;
dpsi[psiIndex][2] = c * gZ_r - s * gZ_i;
//intermediates for computation of hessian. \partial_i \partial_j phi in cartesian coordinates.
const ST f_xx_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g00, g01, g02, g00, g01, g02);
const ST f_xy_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g00, g01, g02, g10, g11, g12);
const ST f_xz_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g00, g01, g02, g20, g21, g22);
const ST f_yy_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g10, g11, g12, g10, g11, g12);
const ST f_yz_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g10, g11, g12, g20, g21, g22);
const ST f_zz_r = v_m_v(h00[jr], h01[jr], h02[jr], h11[jr], h12[jr], h22[jr], g20, g21, g22, g20, g21, g22);
const ST f_xx_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g00, g01, g02, g00, g01, g02);
const ST f_xy_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g00, g01, g02, g10, g11, g12);
const ST f_xz_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g00, g01, g02, g20, g21, g22);
const ST f_yy_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g10, g11, g12, g10, g11, g12);
const ST f_yz_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g10, g11, g12, g20, g21, g22);
const ST f_zz_i = v_m_v(h00[ji], h01[ji], h02[ji], h11[ji], h12[ji], h22[ji], g20, g21, g22, g20, g21, g22);
const ST h_xx_r = f_xx_r + 2 * kX * dX_i - kX * kX * val_r;
const ST h_xy_r = f_xy_r + (kX * dY_i + kY * dX_i) - kX * kY * val_r;
const ST h_xz_r = f_xz_r + (kX * dZ_i + kZ * dX_i) - kX * kZ * val_r;
const ST h_yy_r = f_yy_r + 2 * kY * dY_i - kY * kY * val_r;
const ST h_yz_r = f_yz_r + (kY * dZ_i + kZ * dY_i) - kY * kZ * val_r;
const ST h_zz_r = f_zz_r + 2 * kZ * dZ_i - kZ * kZ * val_r;
const ST h_xx_i = f_xx_i - 2 * kX * dX_r - kX * kX * val_i;
const ST h_xy_i = f_xy_i - (kX * dY_r + kY * dX_r) - kX * kY * val_i;
const ST h_xz_i = f_xz_i - (kX * dZ_r + kZ * dX_r) - kX * kZ * val_i;
const ST h_yy_i = f_yy_i - 2 * kY * dY_r - kY * kY * val_i;
const ST h_yz_i = f_yz_i - (kZ * dY_r + kY * dZ_r) - kZ * kY * val_i;
const ST h_zz_i = f_zz_i - 2 * kZ * dZ_r - kZ * kZ * val_i;
grad_grad_psi[psiIndex][0] = c * h_xx_r - s * h_xx_i;
grad_grad_psi[psiIndex][1] = c * h_xy_r - s * h_xy_i;
grad_grad_psi[psiIndex][2] = c * h_xz_r - s * h_xz_i;
grad_grad_psi[psiIndex][3] = c * h_xy_r - s * h_xy_i;
grad_grad_psi[psiIndex][4] = c * h_yy_r - s * h_yy_i;
grad_grad_psi[psiIndex][5] = c * h_yz_r - s * h_yz_i;
grad_grad_psi[psiIndex][6] = c * h_xz_r - s * h_xz_i;
grad_grad_psi[psiIndex][7] = c * h_yz_r - s * h_yz_i;
grad_grad_psi[psiIndex][8] = c * h_zz_r - s * h_zz_i;
//These are the real and imaginary components of the third SPO derivative. _xxx denotes
// third derivative w.r.t. x, _xyz, a derivative with resepect to x,y, and z, and so on.
const ST f3_xxx_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g00, g01, g02, g00, g01, g02, g00, g01, g02);
const ST f3_xxy_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g00, g01, g02, g00, g01, g02, g10, g11, g12);
const ST f3_xxz_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g00, g01, g02, g00, g01, g02, g20, g21, g22);
const ST f3_xyy_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g00, g01, g02, g10, g11, g12, g10, g11, g12);
const ST f3_xyz_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g00, g01, g02, g10, g11, g12, g20, g21, g22);
const ST f3_xzz_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g00, g01, g02, g20, g21, g22, g20, g21, g22);
const ST f3_yyy_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g10, g11, g12, g10, g11, g12, g10, g11, g12);
const ST f3_yyz_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g10, g11, g12, g10, g11, g12, g20, g21, g22);
const ST f3_yzz_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g10, g11, g12, g20, g21, g22, g20, g21, g22);
const ST f3_zzz_r = t3_contract(gh000[jr], gh001[jr], gh002[jr], gh011[jr], gh012[jr], gh022[jr], gh111[jr],
gh112[jr], gh122[jr], gh222[jr], g20, g21, g22, g20, g21, g22, g20, g21, g22);
const ST f3_xxx_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g00, g01, g02, g00, g01, g02, g00, g01, g02);
const ST f3_xxy_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g00, g01, g02, g00, g01, g02, g10, g11, g12);
const ST f3_xxz_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g00, g01, g02, g00, g01, g02, g20, g21, g22);
const ST f3_xyy_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g00, g01, g02, g10, g11, g12, g10, g11, g12);
const ST f3_xyz_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g00, g01, g02, g10, g11, g12, g20, g21, g22);
const ST f3_xzz_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g00, g01, g02, g20, g21, g22, g20, g21, g22);
const ST f3_yyy_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g10, g11, g12, g10, g11, g12, g10, g11, g12);
const ST f3_yyz_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g10, g11, g12, g10, g11, g12, g20, g21, g22);
const ST f3_yzz_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g10, g11, g12, g20, g21, g22, g20, g21, g22);
const ST f3_zzz_i = t3_contract(gh000[ji], gh001[ji], gh002[ji], gh011[ji], gh012[ji], gh022[ji], gh111[ji],
gh112[ji], gh122[ji], gh222[ji], g20, g21, g22, g20, g21, g22, g20, g21, g22);
//Here is where we build up the components of the physical hessian gradient, namely, d^3/dx^3(e^{-ik*r}\phi(r)
const ST gh_xxx_r = f3_xxx_r + 3 * kX * f_xx_i - 3 * kX * kX * dX_r - kX * kX * kX * val_i;
const ST gh_xxx_i = f3_xxx_i - 3 * kX * f_xx_r - 3 * kX * kX * dX_i + kX * kX * kX * val_r;
const ST gh_xxy_r =
f3_xxy_r + (kY * f_xx_i + 2 * kX * f_xy_i) - (kX * kX * dY_r + 2 * kX * kY * dX_r) - kX * kX * kY * val_i;
const ST gh_xxy_i =
f3_xxy_i - (kY * f_xx_r + 2 * kX * f_xy_r) - (kX * kX * dY_i + 2 * kX * kY * dX_i) + kX * kX * kY * val_r;
const ST gh_xxz_r =
f3_xxz_r + (kZ * f_xx_i + 2 * kX * f_xz_i) - (kX * kX * dZ_r + 2 * kX * kZ * dX_r) - kX * kX * kZ * val_i;
const ST gh_xxz_i =
f3_xxz_i - (kZ * f_xx_r + 2 * kX * f_xz_r) - (kX * kX * dZ_i + 2 * kX * kZ * dX_i) + kX * kX * kZ * val_r;
const ST gh_xyy_r =
f3_xyy_r + (2 * kY * f_xy_i + kX * f_yy_i) - (2 * kX * kY * dY_r + kY * kY * dX_r) - kX * kY * kY * val_i;
const ST gh_xyy_i =
f3_xyy_i - (2 * kY * f_xy_r + kX * f_yy_r) - (2 * kX * kY * dY_i + kY * kY * dX_i) + kX * kY * kY * val_r;
const ST gh_xyz_r = f3_xyz_r + (kX * f_yz_i + kY * f_xz_i + kZ * f_xy_i) -
(kX * kY * dZ_r + kY * kZ * dX_r + kZ * kX * dY_r) - kX * kY * kZ * val_i;
const ST gh_xyz_i = f3_xyz_i - (kX * f_yz_r + kY * f_xz_r + kZ * f_xy_r) -
(kX * kY * dZ_i + kY * kZ * dX_i + kZ * kX * dY_i) + kX * kY * kZ * val_r;
const ST gh_xzz_r =
f3_xzz_r + (2 * kZ * f_xz_i + kX * f_zz_i) - (2 * kX * kZ * dZ_r + kZ * kZ * dX_r) - kX * kZ * kZ * val_i;
const ST gh_xzz_i =
f3_xzz_i - (2 * kZ * f_xz_r + kX * f_zz_r) - (2 * kX * kZ * dZ_i + kZ * kZ * dX_i) + kX * kZ * kZ * val_r;
const ST gh_yyy_r = f3_yyy_r + 3 * kY * f_yy_i - 3 * kY * kY * dY_r - kY * kY * kY * val_i;
const ST gh_yyy_i = f3_yyy_i - 3 * kY * f_yy_r - 3 * kY * kY * dY_i + kY * kY * kY * val_r;
const ST gh_yyz_r =
f3_yyz_r + (kZ * f_yy_i + 2 * kY * f_yz_i) - (kY * kY * dZ_r + 2 * kY * kZ * dY_r) - kY * kY * kZ * val_i;
const ST gh_yyz_i =
f3_yyz_i - (kZ * f_yy_r + 2 * kY * f_yz_r) - (kY * kY * dZ_i + 2 * kY * kZ * dY_i) + kY * kY * kZ * val_r;
const ST gh_yzz_r =
f3_yzz_r + (2 * kZ * f_yz_i + kY * f_zz_i) - (2 * kY * kZ * dZ_r + kZ * kZ * dY_r) - kY * kZ * kZ * val_i;
const ST gh_yzz_i =
f3_yzz_i - (2 * kZ * f_yz_r + kY * f_zz_r) - (2 * kY * kZ * dZ_i + kZ * kZ * dY_i) + kY * kZ * kZ * val_r;
const ST gh_zzz_r = f3_zzz_r + 3 * kZ * f_zz_i - 3 * kZ * kZ * dZ_r - kZ * kZ * kZ * val_i;
const ST gh_zzz_i = f3_zzz_i - 3 * kZ * f_zz_r - 3 * kZ * kZ * dZ_i + kZ * kZ * kZ * val_r;
//[x][xx] //These are the unique entries
grad_grad_grad_psi[psiIndex][0][0] = c * gh_xxx_r - s * gh_xxx_i;
grad_grad_grad_psi[psiIndex][0][1] = c * gh_xxy_r - s * gh_xxy_i;
grad_grad_grad_psi[psiIndex][0][2] = c * gh_xxz_r - s * gh_xxz_i;
grad_grad_grad_psi[psiIndex][0][3] = c * gh_xxy_r - s * gh_xxy_i;
grad_grad_grad_psi[psiIndex][0][4] = c * gh_xyy_r - s * gh_xyy_i;
grad_grad_grad_psi[psiIndex][0][5] = c * gh_xyz_r - s * gh_xyz_i;
grad_grad_grad_psi[psiIndex][0][6] = c * gh_xxz_r - s * gh_xxz_i;
grad_grad_grad_psi[psiIndex][0][7] = c * gh_xyz_r - s * gh_xyz_i;
grad_grad_grad_psi[psiIndex][0][8] = c * gh_xzz_r - s * gh_xzz_i;
grad_grad_grad_psi[psiIndex][1][0] = c * gh_xxy_r - s * gh_xxy_i;
grad_grad_grad_psi[psiIndex][1][1] = c * gh_xyy_r - s * gh_xyy_i;
grad_grad_grad_psi[psiIndex][1][2] = c * gh_xyz_r - s * gh_xyz_i;
grad_grad_grad_psi[psiIndex][1][3] = c * gh_xyy_r - s * gh_xyy_i;
grad_grad_grad_psi[psiIndex][1][4] = c * gh_yyy_r - s * gh_yyy_i;
grad_grad_grad_psi[psiIndex][1][5] = c * gh_yyz_r - s * gh_yyz_i;
grad_grad_grad_psi[psiIndex][1][6] = c * gh_xyz_r - s * gh_xyz_i;
grad_grad_grad_psi[psiIndex][1][7] = c * gh_yyz_r - s * gh_yyz_i;
grad_grad_grad_psi[psiIndex][1][8] = c * gh_yzz_r - s * gh_yzz_i;
grad_grad_grad_psi[psiIndex][2][0] = c * gh_xxz_r - s * gh_xxz_i;
grad_grad_grad_psi[psiIndex][2][1] = c * gh_xyz_r - s * gh_xyz_i;
grad_grad_grad_psi[psiIndex][2][2] = c * gh_xzz_r - s * gh_xzz_i;
grad_grad_grad_psi[psiIndex][2][3] = c * gh_xyz_r - s * gh_xyz_i;
grad_grad_grad_psi[psiIndex][2][4] = c * gh_yyz_r - s * gh_yyz_i;
grad_grad_grad_psi[psiIndex][2][5] = c * gh_yzz_r - s * gh_yzz_i;
grad_grad_grad_psi[psiIndex][2][6] = c * gh_xzz_r - s * gh_xzz_i;
grad_grad_grad_psi[psiIndex][2][7] = c * gh_yzz_r - s * gh_yzz_i;
grad_grad_grad_psi[psiIndex][2][8] = c * gh_zzz_r - s * gh_zzz_i;
}
}
template<typename VV, typename GV, typename GGV, typename GGGV>
void evaluate_vghgh(const ParticleSet& P,
const int iat,
VV& psi,
GV& dpsi,
GGV& grad_grad_psi,
GGGV& grad_grad_grad_psi)
{
const PointType& r = P.activeR(iat);
PointType ru(PrimLattice.toUnit_floor(r));
#pragma omp parallel
{
int first, last;
FairDivideAligned(myV.size(), getAlignment<ST>(), omp_get_num_threads(), omp_get_thread_num(), first, last);
spline2::evaluate3d_vghgh(SplineInst->getSplinePtr(), ru, myV, myG, myH, mygH, first, last);
assign_vghgh(r, psi, dpsi, grad_grad_psi, grad_grad_grad_psi, first / 2, last / 2);
}
}
};
} // namespace qmcplusplus
#endif
|
ft_ao.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Fourier transformed AO pair
* \int e^{-i Gv \cdot r} i(r) * j(r) dr^3
*
* eval_gz, b, gxyz, gs:
* - when eval_gz is GTO_Gv_uniform_orth
* > b (reciprocal vectors) is diagonal 3x3 matrix
* > Gv k-space grids = dot(b.T,gxyz)
* > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv])
* > gs[3]: The number of G-vectors along each direction (nGv=gs[0]*gs[1]*gs[2]).
* - when eval_gz is GTO_Gv_uniform_nonorth
* > b is 3x3 matrix = 2\pi * scipy.linalg.inv(cell.lattice_vectors).T
* > Gv k-space grids = dot(b.T,gxyz)
* > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv])
* > gs[3]: The number of *positive* G-vectors along each direction.
* - when eval_gz is GTO_Gv_general
* only Gv is needed
* - when eval_gz is GTO_Gv_nonuniform_orth
* > b is the basic G value for each cartesian component
* Gx = b[:gs[0]]
* Gy = b[gs[0]:gs[0]+gs[1]]
* Gz = b[gs[0]+gs[1]:]
* > gs[3]: Number of basic G values along each direction.
* > gxyz[3,nGv] are used to index the basic G value
* > Gv is not used
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <complex.h>
#include "config.h"
#include "cint.h"
#include "gto/gto.h"
#include "gto/ft_ao.h"
#include "np_helper/np_helper.h"
#define SQRTPI 1.7724538509055160272981674833411451
#define EXP_CUTOFF 100
double CINTsquare_dist(const double *r1, const double *r2);
double CINTcommon_fac_sp(int l);
/*
* Pyscf-1.5 (and older) use libcint function CINTinit_int1e_EnvVars and
* CINTg1e_index_xyz. It's unsafe since the CINTEnvVars type was redefined
* in ft_ao.h. Copy the contents of CINTinit_int1e_EnvVars and
* CINTg1e_index_xyz here.
*/
#define IINC 0
#define JINC 1
#define GSHIFT 4
#define POS_E1 5
#define RYS_ROOTS 6
#define TENSOR 7
void GTO_ft_init1e_envs(CINTEnvVars *envs, int *ng, int *shls,
int *atm, int natm, int *bas, int nbas, double *env)
{
envs->natm = natm;
envs->nbas = nbas;
envs->atm = atm;
envs->bas = bas;
envs->env = env;
envs->shls = shls;
const int i_sh = shls[0];
const int j_sh = shls[1];
envs->i_l = bas(ANG_OF, i_sh);
envs->j_l = bas(ANG_OF, j_sh);
envs->x_ctr[0] = bas(NCTR_OF, i_sh);
envs->x_ctr[1] = bas(NCTR_OF, j_sh);
envs->nfi = (envs->i_l+1)*(envs->i_l+2)/2;
envs->nfj = (envs->j_l+1)*(envs->j_l+2)/2;
envs->nf = envs->nfi * envs->nfj;
envs->common_factor = 1;
envs->gbits = ng[GSHIFT];
envs->ncomp_e1 = ng[POS_E1];
envs->ncomp_tensor = ng[TENSOR];
envs->li_ceil = envs->i_l + ng[IINC];
envs->lj_ceil = envs->j_l + ng[JINC];
if (ng[RYS_ROOTS] > 0) {
envs->nrys_roots = ng[RYS_ROOTS];
} else {
envs->nrys_roots = (envs->li_ceil + envs->lj_ceil)/2 + 1;
}
envs->ri = env + atm(PTR_COORD, bas(ATOM_OF, i_sh));
envs->rj = env + atm(PTR_COORD, bas(ATOM_OF, j_sh));
int dli, dlj;
if (envs->li_ceil < envs->lj_ceil) {
dli = envs->li_ceil + 1;
dlj = envs->li_ceil + envs->lj_ceil + 1;
} else {
dli = envs->li_ceil + envs->lj_ceil + 1;
dlj = envs->lj_ceil + 1;
}
envs->g_stride_i = 1;
envs->g_stride_j = dli;
envs->g_size = dli * dlj;
envs->lk_ceil = 1;
envs->ll_ceil = 1;
envs->g_stride_k = 0;
envs->g_stride_l = 0;
}
void CINTcart_comp(int *nx, int *ny, int *nz, const int lmax);
static void _g2c_index_xyz(int *idx, const CINTEnvVars *envs)
{
int i_l = envs->i_l;
int j_l = envs->j_l;
int nfi = envs->nfi;
int nfj = envs->nfj;
int di = envs->g_stride_i;
int dj = envs->g_stride_j;
int i, j, n;
int ofx, ofjx;
int ofy, ofjy;
int ofz, ofjz;
int i_nx[CART_MAX], i_ny[CART_MAX], i_nz[CART_MAX];
int j_nx[CART_MAX], j_ny[CART_MAX], j_nz[CART_MAX];
CINTcart_comp(i_nx, i_ny, i_nz, i_l);
CINTcart_comp(j_nx, j_ny, j_nz, j_l);
ofx = 0;
ofy = envs->g_size;
ofz = envs->g_size * 2;
n = 0;
for (j = 0; j < nfj; j++) {
ofjx = ofx + dj * j_nx[j];
ofjy = ofy + dj * j_ny[j];
ofjz = ofz + dj * j_nz[j];
for (i = 0; i < nfi; i++) {
idx[n+0] = ofjx + di * i_nx[i];
idx[n+1] = ofjy + di * i_ny[i];
idx[n+2] = ofjz + di * i_nz[i];
n += 3;
}
}
}
static const int _LEN_CART[] = {
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 66, 78, 91, 105, 120, 136
};
static const int _CUM_LEN_CART[] = {
1, 4, 10, 20, 35, 56, 84, 120, 165, 220, 286, 364, 455, 560, 680, 816,
};
/*
* WHEREX_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if x > 0]
* WHEREY_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if y > 0]
* WHEREZ_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if z > 0]
*/
static const int _UPIDY[] = {
1,
3, 4,
6, 7, 8,
10, 11, 12, 13,
15, 16, 17, 18, 19,
21, 22, 23, 24, 25, 26,
28, 29, 30, 31, 32, 33, 34,
36, 37, 38, 39, 40, 41, 42, 43,
45, 46, 47, 48, 49, 50, 51, 52, 53,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76,
78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,
105,106,107,108,109,110,111,112,113,114,115,116,117,118,
120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,
};
static const int _UPIDZ[] = {
2,
4, 5,
7, 8, 9,
11, 12, 13, 14,
16, 17, 18, 19, 20,
22, 23, 24, 25, 26, 27,
29, 30, 31, 32, 33, 34, 35,
37, 38, 39, 40, 41, 42, 43, 44,
46, 47, 48, 49, 50, 51, 52, 53, 54,
56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104,
106,107,108,109,110,111,112,113,114,115,116,117,118,119,
121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,
};
/*
* _DOWN_XYZ, _DOWN_XYZ_ORDER, _DOWN1, _DOWN2 labels the index in the 1D
* recursive relation f_{i+1} = i/2a * f_{i-1} + X * f_{i}
* _DOWN_XYZ_ORDER i in i/2a
* _DOWN2 index of f_{i-1}
* _DOWN_XYZ index of X
* _DOWN1 index of f_{i}
*/
static const int _DOWN1[] = {
-1,
0, 0, 0,
0, 1, 2, 1, 2, 2,
0, 0, 0, 3, 4, 5, 3, 3, 5, 5,
0, 0, 0, 3, 2, 5, 6, 7, 8, 9, 6, 6, 8, 9, 9,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 11, 12, 13, 14, 10, 10, 12, 13, 14, 14,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 16, 17, 18, 19, 20, 15, 15, 17, 18, 19, 20, 20,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 22, 23, 24, 25, 26, 27, 21, 21, 23, 24, 25, 26, 27, 27,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 29, 30, 31, 32, 33, 34, 35, 28, 28, 30, 31, 32, 33, 34, 35, 35,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 36, 36, 38, 39, 40, 41, 42, 43, 44, 44,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 45, 45, 47, 48, 49, 50, 51, 52, 53, 54, 54,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 55, 55, 57, 58, 59, 60, 61, 62, 63, 64, 65, 65,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 66, 66, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 77,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 78, 78, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 90,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 91, 91, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 104,
0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 78, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 90, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 105, 105, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 119,
};
static const int _DOWN2[] = {
-1,
-1, -1, -1,
0, -1, -1, 0, -1, 0,
0, -1, -1, -1, -1, -1, 1, -1, -1, 2,
0, -1, -1, 3, -1, 5, -1, -1, -1, -1, 3, -1, 5, -1, 5,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, -1, -1, -1, -1, -1, 6, -1, 8, 9, -1, 9,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, -1, -1, -1, -1, -1, -1, 10, -1, 12, 13, 14, -1, 14,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, -1, -1, -1, -1, -1, -1, -1, 15, -1, 17, 18, 19, 20, -1, 20,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, -1, -1, -1, -1, -1, -1, -1, -1, 21, -1, 23, 24, 25, 26, 27, -1, 27,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, 28, -1, 30, 31, 32, 33, 34, 35, -1, 35,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 36, -1, 38, 39, 40, 41, 42, 43, 44, -1, 44,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, -1, 47, 48, 49, 50, 51, 52, 53, 54, -1, 54,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, 57, 58, 59, 60, 61, 62, 63, 64, 65, -1, 65,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, -1, 77,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, -1, 90,
0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, -1, 104, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, -1, 104,
};
static const int _DOWN_XYZ[] = {
2,
0, 1, 2,
0, 0, 0, 1, 1, 2,
0, 1, 2, 0, 0, 0, 1, 2, 1, 2,
0, 1, 2, 0, 1, 0, 0, 0, 0, 0, 1, 2, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
};
static const int _DOWN_XYZ_ORDER[] = {
0,
0, 0, 0,
1, 0, 0, 1, 0, 1,
2, 0, 0, 0, 0, 0, 2, 0, 0, 2,
3, 0, 0, 1, 0, 1, 0, 0, 0, 0, 3, 0, 1, 0, 3,
4, 0, 0, 2, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 4, 0, 2, 1, 0, 4,
5, 0, 0, 3, 0, 3, 2, 0, 0, 2, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 5, 0, 3, 2, 1, 0, 5,
6, 0, 0, 4, 0, 4, 3, 0, 0, 3, 2, 0, 2, 0, 2, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 6, 0, 4, 3, 2, 1, 0, 6,
7, 0, 0, 5, 0, 5, 4, 0, 0, 4, 3, 0, 3, 0, 3, 2, 0, 2, 2, 0, 2, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 5, 4, 3, 2, 1, 0, 7,
8, 0, 0, 6, 0, 6, 5, 0, 0, 5, 4, 0, 4, 0, 4, 3, 0, 3, 3, 0, 3, 2, 0, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 6, 5, 4, 3, 2, 1, 0, 8,
9, 0, 0, 7, 0, 7, 6, 0, 0, 6, 5, 0, 5, 0, 5, 4, 0, 4, 4, 0, 4, 3, 0, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 7, 6, 5, 4, 3, 2, 1, 0, 9,
10, 0, 0, 8, 0, 8, 7, 0, 0, 7, 6, 0, 6, 0, 6, 5, 0, 5, 5, 0, 5, 4, 0, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 8, 7, 6, 5, 4, 3, 2, 1, 0, 10,
11, 0, 0, 9, 0, 9, 8, 0, 0, 8, 7, 0, 7, 0, 7, 6, 0, 6, 6, 0, 6, 5, 0, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 11,
12, 0, 0, 10, 0, 10, 9, 0, 0, 9, 8, 0, 8, 0, 8, 7, 0, 7, 7, 0, 7, 6, 0, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 12,
13, 0, 0, 11, 0, 11, 10, 0, 0, 10, 9, 0, 9, 0, 9, 8, 0, 8, 8, 0, 8, 7, 0, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 13,
14, 0, 0, 12, 0, 12, 11, 0, 0, 11, 10, 0, 10, 0, 10, 9, 0, 9, 9, 0, 9, 8, 0, 8, 8, 8, 0, 8, 7, 0, 7, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 14,
};
#define WHEREX_IF_L_INC1(i) i
#define WHEREY_IF_L_INC1(i) _UPIDY[i]
#define WHEREZ_IF_L_INC1(i) _UPIDZ[i]
#define STARTX_IF_L_DEC1(i) 0
#define STARTY_IF_L_DEC1(i) ((i<2)?0:_LEN_CART[i-2])
#define STARTZ_IF_L_DEC1(i) (_LEN_CART[i-1]-1)
#define ADDR_IF_L_DEC1(l,m) _DOWN1[_CUM_LEN_CART[l-1]+m]
#define ADDR_IF_L_DEC2(l,m) _DOWN2[_CUM_LEN_CART[l-1]+m]
#define DEC1_XYZ(l,m) _DOWN_XYZ[_CUM_LEN_CART[l-1]+m]
#define DEC1_XYZ_ORDER(l,m) _DOWN_XYZ_ORDER[_CUM_LEN_CART[l-1]+m]
static int vrr1d_withGv(double complex *g, double *rijri, double aij,
double *Gv, int topl, size_t NGv)
{
int cumxyz = 1;
if (topl == 0) {
return cumxyz;
}
double *kx = Gv;
double *ky = kx + NGv;
double *kz = ky + NGv;
int i, n, m, l;
double a2;
double complex *p0, *p1, *p2, *dec1, *dec2;
double *ka2 = malloc(sizeof(double) * NGv*3);
double *kxa2 = ka2;
double *kya2 = kxa2 + NGv;
double *kza2 = kya2 + NGv;
a2 = .5 / aij;
for (n = 0; n < NGv; n++) {
kxa2[n] = kx[n] * a2;
kya2[n] = ky[n] * a2;
kza2[n] = kz[n] * a2;
}
p0 = g + NGv;
for (n = 0; n < NGv; n++) {
p0[ n] = (rijri[0] - kxa2[n]*_Complex_I) * g[n];
p0[NGv +n] = (rijri[1] - kya2[n]*_Complex_I) * g[n];
p0[NGv*2+n] = (rijri[2] - kza2[n]*_Complex_I) * g[n];
}
cumxyz += 3;
for (l = 1; l < topl; l++) {
p0 = g + cumxyz * NGv;
dec1 = p0 - _LEN_CART[l ] * NGv;
dec2 = dec1 - _LEN_CART[l-1] * NGv;
for (i = 0; i < _LEN_CART[l+1]; i++) {
m = DEC1_XYZ(l+1,i);
kxa2 = ka2 + m * NGv;
p1 = dec1 + ADDR_IF_L_DEC1(l+1,i) * NGv;
p2 = dec2 + ADDR_IF_L_DEC2(l+1,i) * NGv;
if (ADDR_IF_L_DEC2(l+1,i) < 0) {
for (n = 0; n < NGv; n++) {
p0[n] = (rijri[m]-kxa2[n]*_Complex_I)*p1[n];
}
} else {
a2 = .5/aij * DEC1_XYZ_ORDER(l+1,i);
for (n = 0; n < NGv; n++) {
p0[n] = a2*p2[n] + (rijri[m]-kxa2[n]*_Complex_I)*p1[n];
}
}
p0 += NGv;
}
cumxyz += _LEN_CART[l+1];
}
free(ka2);
return cumxyz;
}
/*
* if li = 3, lj = 1
* (10 + X*00 -> 01):
* gs + X*fs -> fp
*/
static void vrr2d_ket_inc1_withGv(double complex *out, const double complex *g,
double *rirj, int li, int lj, size_t NGv)
{
if (lj == 0) {
NPzcopy(out, g, _LEN_CART[li]*NGv);
return;
}
const int row_10 = _LEN_CART[li+1];
const int row_00 = _LEN_CART[li ];
const int col_00 = _LEN_CART[lj-1];
const double complex *g00 = g;
const double complex *g10 = g + row_00*col_00*NGv;
int i, j, n;
const double complex *p00, *p10;
double complex *p01 = out;
for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i) * NGv;
p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * NGv;
for (n = 0; n < NGv; n++) {
p01[n] = p10[n] + rirj[0] * p00[n];
}
p01 += NGv;
} }
for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i) * NGv;
p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * NGv;
for (n = 0; n < NGv; n++) {
p01[n] = p10[n] + rirj[1] * p00[n];
}
p01 += NGv;
} }
j = STARTZ_IF_L_DEC1(lj);
if (j < _LEN_CART[lj-1]) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i) * NGv;
p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * NGv;
for (n = 0; n < NGv; n++) {
p01[n] = p10[n] + rirj[2] * p00[n];
}
p01 += NGv;
} }
}
/*
* transpose i, j when storing into out
*/
static void vrr2d_inc1_swapij(double complex *out, const double complex *g,
double *rirj, int li, int lj, size_t NGv)
{
if (lj == 0) {
NPzcopy(out, g, _LEN_CART[li]*NGv);
return;
}
const int row_01 = _LEN_CART[lj];
const int row_10 = _LEN_CART[li+1];
const int row_00 = _LEN_CART[li ];
const int col_00 = _LEN_CART[lj-1];
const double complex *g00 = g;
const double complex *g10 = g + row_00*col_00*NGv;
int i, j, n;
const double complex *p00, *p10;
double complex *p01 = out;
for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i) * NGv;
p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * NGv;
p01 = out + i*row_01 * NGv;
for (n = 0; n < NGv; n++) {
p01[n] = p10[n] + rirj[0] * p00[n];
}
}
out += NGv;
}
for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i) * NGv;
p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * NGv;
p01 = out + i*row_01 * NGv;
for (n = 0; n < NGv; n++) {
p01[n] = p10[n] + rirj[1] * p00[n];
}
}
out += NGv;
}
j = STARTZ_IF_L_DEC1(lj);
if (j < _LEN_CART[lj-1]) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i) * NGv;
p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * NGv;
p01 = out + i*row_01 * NGv;
for (n = 0; n < NGv; n++) {
p01[n] = p10[n] + rirj[2] * p00[n];
}
}
}
}
static void vrr2d_withGv(double complex *out, double complex *g,
double complex *gbuf2, const int li, const int lj,
const double *ri, const double *rj, size_t NGv)
{
const int nmax = li + lj;
double complex *g00, *g01, *gswap, *pg00, *pg01;
int row_01, col_01, row_00, col_00;
int i, j;
double rirj[3];
rirj[0] = ri[0] - rj[0];
rirj[1] = ri[1] - rj[1];
rirj[2] = ri[2] - rj[2];
g00 = gbuf2;
g01 = g;
for (j = 1; j < lj; j++) {
gswap = g00;
g00 = g01;
g01 = gswap;
pg00 = g00;
pg01 = g01;
for (i = li; i <= nmax-j; i++) {
vrr2d_ket_inc1_withGv(pg01, pg00, rirj, i, j, NGv);
row_01 = _LEN_CART[i];
col_01 = _LEN_CART[j];
row_00 = _LEN_CART[i ];
col_00 = _LEN_CART[j-1];
pg00 += row_00*col_00 * NGv;
pg01 += row_01*col_01 * NGv;
}
}
vrr2d_ket_inc1_withGv(out, g01, rirj, li, lj, NGv);
}
/* (0,li+lj) => (li,lj) */
static void hrr2d_withGv(double complex *out, double complex *g,
double complex *gbuf2, const int li, const int lj,
const double *ri, const double *rj, size_t NGv)
{
const int nmax = li + lj;
double complex *g00, *g01, *gswap, *pg00, *pg01;
int row_01, col_01, row_00, col_00;
int i, j;
double rjri[3];
rjri[0] = rj[0] - ri[0];
rjri[1] = rj[1] - ri[1];
rjri[2] = rj[2] - ri[2];
g00 = gbuf2;
g01 = g;
for (i = 1; i < li; i++) {
gswap = g00;
g00 = g01;
g01 = gswap;
pg00 = g00;
pg01 = g01;
for (j = lj; j <= nmax-i; j++) {
vrr2d_ket_inc1_withGv(pg01, pg00, rjri, j, i, NGv);
row_01 = _LEN_CART[j];
col_01 = _LEN_CART[i];
row_00 = _LEN_CART[j ];
col_00 = _LEN_CART[i-1];
pg00 += row_00*col_00 * NGv;
pg01 += row_01*col_01 * NGv;
}
}
vrr2d_inc1_swapij(out, g01, rjri, lj, li, NGv);
}
/*
* Recursive relation
*/
static void aopair_rr_igtj_early(double complex *g, double ai, double aj,
CINTEnvVars *envs, FPtr_eval_gz eval_gz,
double complex fac, double *Gv, double *b,
int *gxyz, int *gs, size_t NGv, double *cache)
{
const int topl = envs->li_ceil + envs->lj_ceil;
const double aij = ai + aj;
const double *ri = envs->ri;
const double *rj = envs->rj;
double rij[3], rijri[3];
rij[0] = (ai * ri[0] + aj * rj[0]) / aij;
rij[1] = (ai * ri[1] + aj * rj[1]) / aij;
rij[2] = (ai * ri[2] + aj * rj[2]) / aij;
rijri[0] = rij[0] - ri[0];
rijri[1] = rij[1] - ri[1];
rijri[2] = rij[2] - ri[2];
(*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, NGv, cache);
vrr1d_withGv(g, rijri, aij, Gv, topl, NGv);
}
static void aopair_rr_iltj_early(double complex *g, double ai, double aj,
CINTEnvVars *envs, FPtr_eval_gz eval_gz,
double complex fac, double *Gv, double *b,
int *gxyz, int *gs, size_t NGv, double *cache)
{
const int topl = envs->li_ceil + envs->lj_ceil;
const double aij = ai + aj;
const double *ri = envs->ri;
const double *rj = envs->rj;
double rij[3], rijrj[3];
rij[0] = (ai * ri[0] + aj * rj[0]) / aij;
rij[1] = (ai * ri[1] + aj * rj[1]) / aij;
rij[2] = (ai * ri[2] + aj * rj[2]) / aij;
rijrj[0] = rij[0] - rj[0];
rijrj[1] = rij[1] - rj[1];
rijrj[2] = rij[2] - rj[2];
(*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, NGv, cache);
vrr1d_withGv(g, rijrj, aij, Gv, topl, NGv);
}
static void aopair_rr_igtj_lazy(double complex *g, double ai, double aj,
CINTEnvVars *envs, FPtr_eval_gz eval_gz,
double complex fac, double *Gv, double *b,
int *gxyz, int *gs, size_t NGv, double *cache)
{
const int nmax = envs->li_ceil + envs->lj_ceil;
const int lj = envs->lj_ceil;
const int dj = envs->g_stride_j;
const double aij = ai + aj;
const double a2 = .5 / aij;
const double *ri = envs->ri;
const double *rj = envs->rj;
double rij[3], rirj[3], rijri[3];
double complex *gx = g;
double complex *gy = gx + envs->g_size * NGv;
double complex *gz = gy + envs->g_size * NGv;
double *kx = Gv;
double *ky = kx + NGv;
double *kz = ky + NGv;
size_t off0, off1, off2;
int i, j, n, ptr;
double ia2;
rirj[0] = ri[0] - rj[0];
rirj[1] = ri[1] - rj[1];
rirj[2] = ri[2] - rj[2];
rij[0] = (ai * ri[0] + aj * rj[0]) / aij;
rij[1] = (ai * ri[1] + aj * rj[1]) / aij;
rij[2] = (ai * ri[2] + aj * rj[2]) / aij;
rijri[0] = rij[0] - ri[0];
rijri[1] = rij[1] - ri[1];
rijri[2] = rij[2] - ri[2];
for (n = 0; n < NGv; n++) {
gx[n] = 1;
gy[n] = 1;
}
(*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, NGv, cache);
if (nmax > 0) {
for (n = 0; n < NGv; n++) {
if (gz[n] != 0) {
gx[NGv+n] = (rijri[0] - kx[n]*a2*_Complex_I) * gx[n];
gy[NGv+n] = (rijri[1] - ky[n]*a2*_Complex_I) * gy[n];
gz[NGv+n] = (rijri[2] - kz[n]*a2*_Complex_I) * gz[n];
}
}
}
for (i = 1; i < nmax; i++) {
off0 = (i-1) * NGv;
off1 = i * NGv;
off2 = (i+1) * NGv;
ia2 = i * a2;
for (n = 0; n < NGv; n++) {
if (gz[n] != 0) {
gx[off2+n] = ia2 * gx[off0+n] + (rijri[0] - kx[n]*a2*_Complex_I) * gx[off1+n];
gy[off2+n] = ia2 * gy[off0+n] + (rijri[1] - ky[n]*a2*_Complex_I) * gy[off1+n];
gz[off2+n] = ia2 * gz[off0+n] + (rijri[2] - kz[n]*a2*_Complex_I) * gz[off1+n];
}
}
}
for (j = 1; j <= lj; j++) {
ptr = dj * j;
for (i = ptr; i <= ptr + nmax - j; i++) {
off0 = i * NGv - dj * NGv; // [i, j-1]
off1 = (i+1) * NGv - dj * NGv; // [i+1,j-1]
off2 = i * NGv; // [i, j ]
for (n = 0; n < NGv; n++) {
if (gz[n] != 0) {
gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n];
gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n];
gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n];
}
}
}
}
}
static void aopair_rr_iltj_lazy(double complex *g, double ai, double aj,
CINTEnvVars *envs, FPtr_eval_gz eval_gz,
double complex fac, double *Gv, double *b,
int *gxyz, int *gs, size_t NGv, double *cache)
{
const int nmax = envs->li_ceil + envs->lj_ceil;
const int li = envs->li_ceil;
const int dj = envs->g_stride_j;
const double aij = ai + aj;
const double a2 = .5 / aij;
const double *ri = envs->ri;
const double *rj = envs->rj;
double rij[3], rirj[3], rijrj[3];
double complex *gx = g;
double complex *gy = gx + envs->g_size * NGv;
double complex *gz = gy + envs->g_size * NGv;
double *kx = Gv;
double *ky = kx + NGv;
double *kz = ky + NGv;
size_t off0, off1, off2;
int i, j, n;
double ia2;
rirj[0] = rj[0] - ri[0];
rirj[1] = rj[1] - ri[1];
rirj[2] = rj[2] - ri[2];
rij[0] = (ai * ri[0] + aj * rj[0]) / aij;
rij[1] = (ai * ri[1] + aj * rj[1]) / aij;
rij[2] = (ai * ri[2] + aj * rj[2]) / aij;
rijrj[0] = rij[0] - rj[0];
rijrj[1] = rij[1] - rj[1];
rijrj[2] = rij[2] - rj[2];
for (n = 0; n < NGv; n++) {
gx[n] = 1;
gy[n] = 1;
}
(*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, NGv, cache);
if (nmax > 0) {
off0 = dj * NGv;
for (n = 0; n < NGv; n++) {
if (gz[n] != 0) {
gx[off0+n] = (rijrj[0] - kx[n]*a2*_Complex_I) * gx[n];
gy[off0+n] = (rijrj[1] - ky[n]*a2*_Complex_I) * gy[n];
gz[off0+n] = (rijrj[2] - kz[n]*a2*_Complex_I) * gz[n];
}
}
}
for (i = 1; i < nmax; i++) {
off0 = (i-1) * dj * NGv;
off1 = i * dj * NGv;
off2 = (i+1) * dj * NGv;
ia2 = i * a2;
for (n = 0; n < NGv; n++) {
if (gz[n] != 0) {
gx[off2+n] = ia2 * gx[off0+n] + (rijrj[0] - kx[n]*a2*_Complex_I) * gx[off1+n];
gy[off2+n] = ia2 * gy[off0+n] + (rijrj[1] - ky[n]*a2*_Complex_I) * gy[off1+n];
gz[off2+n] = ia2 * gz[off0+n] + (rijrj[2] - kz[n]*a2*_Complex_I) * gz[off1+n];
}
}
}
for (i = 1; i <= li; i++) {
for (j = 0; j <= nmax - i; j++) {
off0 = (i-1) * NGv + j * dj * NGv; // [i-1,j ]
off1 = (i-1) * NGv + (j+1) * dj * NGv; // [i-1,j+1]
off2 = i * NGv + j * dj * NGv; // [i ,j ]
for (n = 0; n < NGv; n++) {
if (gz[n] != 0) {
gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n];
gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n];
gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n];
}
}
}
}
}
static void inner_prod(double complex *g, double complex *gout,
int *idx, const CINTEnvVars *envs,
double *Gv, size_t NGv, int empty)
{
int ix, iy, iz, n, k;
double complex *gz = g + envs->g_size * NGv * 2;
if (empty) {
for (n = 0; n < envs->nf; n++) {
ix = idx[n*3+0];
iy = idx[n*3+1];
iz = idx[n*3+2];
for (k = 0; k < NGv; k++) {
if (gz[k] != 0) {
gout[n*NGv+k] = g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k];
} else {
gout[n*NGv+k] = 0;
}
}
}
} else {
for (n = 0; n < envs->nf; n++) {
ix = idx[n*3+0];
iy = idx[n*3+1];
iz = idx[n*3+2];
for (k = 0; k < NGv; k++) {
if (gz[k] != 0) {
gout[n*NGv+k] += g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k];
}
}
}
}
}
static void prim_to_ctr(double complex *gc, const size_t nf, double complex *gp,
const int nprim, const int nctr, const double *coeff,
int empty)
{
size_t n, i;
double c;
if (empty) {
for (n = 0; n < nctr; n++) {
c = coeff[nprim*n];
for (i = 0; i < nf; i++) {
gc[i] = gp[i] * c;
}
gc += nf;
}
} else {
for (n = 0; n < nctr; n++) {
c = coeff[nprim*n];
if (c != 0) {
for (i = 0; i < nf; i++) {
gc[i] += gp[i] * c;
}
}
gc += nf;
}
}
}
static void transpose(double complex *out, double complex *in,
int nf, int comp, size_t NGv)
{
size_t n, k, ic;
double complex *pin;
for (ic = 0; ic < comp; ic++) {
for (n = 0; n < nf; n++) {
pin = in + (n*comp+ic) * NGv;
for (k = 0; k < NGv; k++) {
out[n*NGv+k] = pin[k];
}
}
out += nf * NGv;
}
}
static const int _GBUFSIZE[] = {
1, 4, 10, 10, 20, 48, 20, 35, 75, 150, 35, 56, 108, 216, 384,
56, 84, 147, 294, 510, 850, 84, 120, 192, 384, 654, 1090, 1640,
120, 165, 243, 486, 816, 1360, 2040, 3030
};
#define bufsize(i,j) _GBUFSIZE[((i>=j) ? (i*(i+1)/2+j) : (j*(j+1)/2+i))]
int GTO_aopair_early_contract(double complex *out, CINTEnvVars *envs,
FPtr_eval_gz eval_gz, double complex fac,
double *Gv, double *b, int *gxyz, int *gs,
size_t NGv, double *cache)
{
const int *shls = envs->shls;
const int *bas = envs->bas;
const double *env = envs->env;
const int i_sh = shls[0];
const int j_sh = shls[1];
const int i_l = envs->i_l;
const int j_l = envs->j_l;
const int i_ctr = envs->x_ctr[0];
const int j_ctr = envs->x_ctr[1];
const int i_prim = bas(NPRIM_OF, i_sh);
const int j_prim = bas(NPRIM_OF, j_sh);
const int nf = envs->nf;
const double *ri = envs->ri;
const double *rj = envs->rj;
const double *ai = env + bas(PTR_EXP, i_sh);
const double *aj = env + bas(PTR_EXP, j_sh);
const double *ci = env + bas(PTR_COEFF, i_sh);
const double *cj = env + bas(PTR_COEFF, j_sh);
double fac1i, fac1j;
double aij, dij, eij;
int ip, jp, n;
int empty[2] = {1, 1};
int *jempty = empty + 0;
int *iempty = empty + 1;
const size_t len1 = bufsize(i_l,j_l) * NGv;
const size_t leni = len1 * i_ctr;
const size_t lenj = len1 * i_ctr * j_ctr;
double complex *gctrj = malloc(sizeof(double complex)*(lenj+leni+len1));
if (gctrj == NULL) {
fprintf(stderr, "gctrj = malloc(%zu) falied in GTO_aopair_early_contractv\n",
sizeof(double complex) * (lenj + leni + len1));
}
double complex *g = gctrj + lenj;
double complex *gctri, *g1d;
if (j_ctr == 1) {
gctri = gctrj;
iempty = jempty;
} else {
gctri = g;
g += leni;
}
g1d = g;
void (*aopair_rr)();
int offset_g1d;
if (i_l >= j_l) {
aopair_rr = aopair_rr_igtj_early;
offset_g1d = _CUM_LEN_CART[i_l] - _LEN_CART[i_l];
} else {
aopair_rr = aopair_rr_iltj_early;
offset_g1d = _CUM_LEN_CART[j_l] - _LEN_CART[j_l];
}
int len_g1d = _CUM_LEN_CART[i_l+j_l] - offset_g1d;
double rrij = CINTsquare_dist(ri, rj);
double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l);
*jempty = 1;
for (jp = 0; jp < j_prim; jp++) {
if (j_ctr == 1) {
fac1j = fac1 * cj[jp];
} else {
fac1j = fac1;
*iempty = 1;
}
for (ip = 0; ip < i_prim; ip++) {
aij = ai[ip] + aj[jp];
eij = (ai[ip] * aj[jp] / aij) * rrij;
if (eij > EXP_CUTOFF) {
continue;
}
dij = exp(-eij) / (aij * sqrt(aij));
fac1i = fac1j * dij;
(*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz,
fac*fac1i, Gv, b, gxyz, gs, NGv, cache);
prim_to_ctr(gctri, len_g1d*NGv, g1d+offset_g1d*NGv,
i_prim, i_ctr, ci+ip, *iempty);
*iempty = 0;
}
if (!*iempty) {
if (j_ctr > 1) {
prim_to_ctr(gctrj, i_ctr*len_g1d*NGv, gctri,
j_prim,j_ctr, cj+jp, *jempty);
}
*jempty = 0;
}
}
if (!*jempty) {
g1d = gctrj;
for (n = 0; n < i_ctr*j_ctr; n++) {
if (i_l >= j_l) {
vrr2d_withGv(out+n*nf*NGv, g1d, gctrj+lenj,
envs->li_ceil, envs->lj_ceil, ri, rj, NGv);
} else {
hrr2d_withGv(out+n*nf*NGv, g1d, gctrj+lenj,
envs->li_ceil, envs->lj_ceil, ri, rj, NGv);
}
g1d += len_g1d * NGv;
}
}
free(gctrj);
return !*jempty;
}
int GTO_aopair_lazy_contract(double complex *gctr, CINTEnvVars *envs,
FPtr_eval_gz eval_gz, double complex fac,
double *Gv, double *b, int *gxyz, int *gs,
size_t NGv, double *cache)
{
const int *shls = envs->shls;
const int *bas = envs->bas;
const double *env = envs->env;
const int i_sh = shls[0];
const int j_sh = shls[1];
const int i_l = envs->i_l;
const int j_l = envs->j_l;
const int i_ctr = envs->x_ctr[0];
const int j_ctr = envs->x_ctr[1];
const int i_prim = bas(NPRIM_OF, i_sh);
const int j_prim = bas(NPRIM_OF, j_sh);
const int n_comp = envs->ncomp_e1 * envs->ncomp_tensor;
const int nf = envs->nf;
const double *ri = envs->ri;
const double *rj = envs->rj;
const double *ai = env + bas(PTR_EXP, i_sh);
const double *aj = env + bas(PTR_EXP, j_sh);
const double *ci = env + bas(PTR_COEFF, i_sh);
const double *cj = env + bas(PTR_COEFF, j_sh);
double fac1i, fac1j;
double aij, dij, eij;
int ip, jp;
int empty[3] = {1, 1, 1};
int *jempty = empty + 0;
int *iempty = empty + 1;
int *gempty = empty + 2;
const size_t len1 = envs->g_size * 3 * (1<<envs->gbits) * NGv;
const size_t leng = nf * n_comp * NGv;
const size_t leni = nf * i_ctr * n_comp * NGv;
size_t lenj = 0;
if (n_comp > 1) {
lenj = nf * i_ctr * j_ctr * n_comp * NGv;
}
double complex *g = malloc(sizeof(double complex) * (len1+leng+leni+lenj));
if (g == NULL) {
fprintf(stderr, "g = malloc(%zu) falied in GTO_aopair_lazy_contract\n",
sizeof(double complex) * (len1 + leng + leni + lenj));
}
double complex *g1 = g + len1;
double complex *gout, *gctri, *gctrj;
if (n_comp == 1) {
gctrj = gctr;
} else {
gctrj = g1;
g1 += lenj;
}
if (j_ctr == 1) {
gctri = gctrj;
iempty = jempty;
} else {
gctri = g1;
g1 += leni;
}
if (i_ctr == 1) {
gout = gctri;
gempty = iempty;
} else {
gout = g1;
}
void (*aopair_rr)();
if (i_l >= j_l) {
aopair_rr = aopair_rr_igtj_lazy;
} else {
aopair_rr = aopair_rr_iltj_lazy;
}
int *idx = malloc(sizeof(int) * nf * 3);
_g2c_index_xyz(idx, envs);
double rrij = CINTsquare_dist(ri, rj);
double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l);
*jempty = 1;
for (jp = 0; jp < j_prim; jp++) {
envs->aj = aj[jp];
if (j_ctr == 1) {
fac1j = fac1 * cj[jp];
} else {
fac1j = fac1;
*iempty = 1;
}
for (ip = 0; ip < i_prim; ip++) {
envs->ai = ai[ip];
aij = ai[ip] + aj[jp];
eij = (ai[ip] * aj[jp] / aij) * rrij;
if (eij > EXP_CUTOFF) {
continue;
}
dij = exp(-eij) / (aij * sqrt(aij));
if (i_ctr == 1) {
fac1i = fac1j * dij * ci[ip];
} else {
fac1i = fac1j * dij;
}
(*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz,
fac*fac1i, Gv, b, gxyz, gs, NGv, cache);
(*envs->f_gout)(g, gout, idx, envs, Gv, NGv, *gempty);
if (i_ctr > 1) {
prim_to_ctr(gctri, nf*n_comp*NGv, gout,
i_prim, i_ctr, ci+ip, *iempty);
}
*iempty = 0;
}
if (!*iempty) {
if (j_ctr > 1) {
prim_to_ctr(gctrj, i_ctr*nf*n_comp*NGv, gctri,
j_prim, j_ctr, cj+jp, *jempty);
}
*jempty = 0;
}
}
if (n_comp > 1 && !*jempty) {
transpose(gctr, gctrj, nf*i_ctr*j_ctr, n_comp, NGv);
}
free(g);
free(idx);
return !*jempty;
}
void GTO_Gv_general(double complex *out, double aij, double *rij,
double complex fac, double *Gv, double *b,
int *gxyz, int *gs, size_t NGv, double *cache)
{
double *kx = Gv;
double *ky = kx + NGv;
double *kz = ky + NGv;
const double cutoff = EXP_CUTOFF * aij * 4;
int n;
double kR, kk;
for (n = 0; n < NGv; n++) {
kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n];
if (kk < cutoff) {
kR = kx[n] * rij[0] + ky[n] * rij[1] + kz[n] * rij[2];
out[n] = exp(-.25*kk/aij) * fac * (cos(kR) - sin(kR)*_Complex_I);
} else {
out[n] = 0;
}
}
}
/*
* Gv = dot(b.T,gxyz) + kpt
* kk = dot(Gv, Gv)
* kr = dot(rij, Gv) = dot(rij,b.T, gxyz) + dot(rij,kpt) = dot(br, gxyz) + dot(rij,kpt)
* out = fac * exp(-.25 * kk / aij) * (cos(kr) - sin(kr) * _Complex_I);
*
* b: the first 9 elements are 2\pi*inv(a^T), then 3 elements for k_{ij},
* followed by 3*NGv floats for Gbase
*/
void GTO_Gv_orth(double complex *out, double aij, double *rij,
double complex fac, double *Gv, double *b,
int *gxyz, int *gs, size_t NGv, double *cache)
{
const int nx = gs[0];
const int ny = gs[1];
const int nz = gs[2];
double br[3]; // dot(rij, b)
br[0] = rij[0] * b[0];
br[1] = rij[1] * b[4];
br[2] = rij[2] * b[8];
double *kpt = b + 9;
double kr[3];
kr[0] = rij[0] * kpt[0];
kr[1] = rij[1] * kpt[1];
kr[2] = rij[2] * kpt[2];
double *Gxbase = b + 12;
double *Gybase = Gxbase + nx;
double *Gzbase = Gybase + ny;
double *kx = Gv;
double *ky = kx + NGv;
double *kz = ky + NGv;
double *kkpool = cache;
double *kkx = kkpool;
double *kky = kkx + nx;
double *kkz = kky + ny;
double complex *zbuf = (double complex *)(kkz + nz);
double complex *csx = zbuf;
double complex *csy = csx + nx;
double complex *csz = csy + ny;
int *gx = gxyz;
int *gy = gx + NGv;
int *gz = gy + NGv;
const double cutoff = EXP_CUTOFF * aij * 4;
int n, ix, iy, iz;
double Gr;
for (n = 0; n < nx+ny+nz; n++) {
kkpool[n] = -1;
}
for (n = 0; n < NGv; n++) {
ix = gx[n];
iy = gy[n];
iz = gz[n];
if (kkx[ix] < 0) {
Gr = Gxbase[ix] * br[0] + kr[0];
kkx[ix] = .25 * kx[n]*kx[n] / aij;
csx[ix] = exp(-kkx[ix]) * (cos(Gr)-sin(Gr)*_Complex_I);
}
if (kky[iy] < 0) {
Gr = Gybase[iy] * br[1] + kr[1];
kky[iy] = .25 * ky[n]*ky[n] / aij;
csy[iy] = exp(-kky[iy]) * (cos(Gr)-sin(Gr)*_Complex_I);
}
if (kkz[iz] < 0) {
Gr = Gzbase[iz] * br[2] + kr[2];
kkz[iz] = .25 * kz[n]*kz[n] / aij;
csz[iz] = fac * exp(-kkz[iz]) * (cos(Gr)-sin(Gr)*_Complex_I);
}
if (kkx[ix] + kky[iy] + kkz[iz] < cutoff) {
out[n] = csx[ix] * csy[iy] * csz[iz];
} else {
out[n] = 0;
}
}
}
void GTO_Gv_nonorth(double complex *out, double aij, double *rij,
double complex fac, double *Gv, double *b,
int *gxyz, int *gs, size_t NGv, double *cache)
{
const int nx = gs[0];
const int ny = gs[1];
const int nz = gs[2];
double br[3]; // dot(rij, b)
br[0] = rij[0] * b[0];
br[0] += rij[1] * b[1];
br[0] += rij[2] * b[2];
br[1] = rij[0] * b[3];
br[1] += rij[1] * b[4];
br[1] += rij[2] * b[5];
br[2] = rij[0] * b[6];
br[2] += rij[1] * b[7];
br[2] += rij[2] * b[8];
double *kpt = b + 9;
double kr[3];
kr[0] = rij[0] * kpt[0];
kr[1] = rij[1] * kpt[1];
kr[2] = rij[2] * kpt[2];
double *Gxbase = b + 12;
double *Gybase = Gxbase + nx;
double *Gzbase = Gybase + ny;
double *kx = Gv;
double *ky = kx + NGv;
double *kz = ky + NGv;
double complex *zbuf = (double complex *)cache;
double complex *csx = zbuf;
double complex *csy = csx + nx;
double complex *csz = csy + ny;
int n;
char *empty = (char *)(csz + nz);
char *xempty = empty;
char *yempty = xempty + nx;
char *zempty = yempty + ny;
for (n = 0; n < nx+ny+nz; n++) {
empty[n] = 1;
}
int *gx = gxyz;
int *gy = gx + NGv;
int *gz = gy + NGv;
const double cutoff = EXP_CUTOFF * aij * 4;
int ix, iy, iz;
double Gr, kk;
for (n = 0; n < NGv; n++) {
ix = gx[n];
iy = gy[n];
iz = gz[n];
kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n];
if (kk < cutoff) {
ix = gx[n];
iy = gy[n];
iz = gz[n];
if (xempty[ix]) {
Gr = Gxbase[ix] * br[0] + kr[0];
csx[ix] = cos(Gr)-sin(Gr)*_Complex_I;
xempty[ix] = 0;
}
if (yempty[iy]) {
Gr = Gybase[iy] * br[1] + kr[1];
csy[iy] = cos(Gr)-sin(Gr)*_Complex_I;
yempty[iy] = 0;
}
if (zempty[iz]) {
Gr = Gzbase[iz] * br[2] + kr[2];
csz[iz] = fac * (cos(Gr)-sin(Gr)*_Complex_I);
zempty[iz] = 0;
}
out[n] = exp(-.25*kk/aij) * csx[ix]*csy[iy]*csz[iz];
} else {
out[n] = 0;
}
}
}
static void zcopy_ij(double complex *out, const double complex *gctr,
const int mi, const int mj, const int ni, const size_t NGv)
{
int i, j, k;
for (j = 0; j < mj; j++) {
for (i = 0; i < mi; i++) {
for (k = 0; k < NGv; k++) {
out[i*NGv+k] = gctr[i*NGv+k];
}
}
out += ni * NGv;
gctr += mi * NGv;
}
}
void GTO_ft_c2s_cart(double complex *out, double complex *gctr,
int *dims, CINTEnvVars *envs, size_t NGv)
{
const int i_ctr = envs->x_ctr[0];
const int j_ctr = envs->x_ctr[1];
const int nfi = envs->nfi;
const int nfj = envs->nfj;
const int ni = nfi*i_ctr;
const int nj = nfj*j_ctr;
const int nf = envs->nf;
int ic, jc;
double complex *pout;
for (jc = 0; jc < nj; jc += nfj) {
for (ic = 0; ic < ni; ic += nfi) {
pout = out + (dims[0] * jc + ic) * NGv;
zcopy_ij(pout, gctr, nfi, nfj, dims[0], NGv);
gctr += nf * NGv;
} }
}
#define C2S(sph, nket, cart, l) \
(double complex *)CINTc2s_ket_sph((double *)(sph), nket, (double *)(cart), l)
#define OF_CMPLX 2
void GTO_ft_c2s_sph(double complex *out, double complex *gctr,
int *dims, CINTEnvVars *envs, size_t NGv)
{
const int i_l = envs->i_l;
const int j_l = envs->j_l;
const int i_ctr = envs->x_ctr[0];
const int j_ctr = envs->x_ctr[1];
const int di = i_l * 2 + 1;
const int dj = j_l * 2 + 1;
const int ni = di*i_ctr;
const int nj = dj*j_ctr;
const int nfi = envs->nfi;
const int nf = envs->nf;
int ic, jc, k;
const int buflen = nfi*dj;
double complex *buf1 = malloc(sizeof(double complex) * buflen*2 * NGv);
if (buf1 == NULL) {
fprintf(stderr, "buf1 = malloc(%zu) falied in GTO_ft_c2s_sph\n",
sizeof(double complex) * buflen*2 * NGv);
}
double complex *buf2 = buf1 + buflen * NGv;
double complex *pout, *pij, *buf;
for (jc = 0; jc < nj; jc += dj) {
for (ic = 0; ic < ni; ic += di) {
buf = C2S(buf1, nfi*NGv*OF_CMPLX, gctr, j_l);
pij = C2S(buf2, NGv*OF_CMPLX, buf, i_l);
for (k = NGv; k < dj*NGv; k+=NGv) {
pout = C2S(buf2+k*di, NGv*OF_CMPLX, buf+k*nfi, i_l);
}
pout = out + (dims[0] * jc + ic) * NGv;
zcopy_ij(pout, pij, di, dj, dims[0], NGv);
gctr += nf * NGv;
} }
free(buf1);
}
static void _ft_zset0(double complex *out, int *dims, int *counts,
int comp, size_t NGv)
{
double complex *pout;
int i, j, k, ic;
for (ic = 0; ic < comp; ic++) {
for (j = 0; j < counts[1]; j++) {
pout = out + j * dims[0] * NGv;
for (i = 0; i < counts[0]; i++) {
for (k = 0; k < NGv; k++) {
pout[i*NGv+k] = 0;
}
}
}
out += dims[0] * dims[1] * NGv;
}
}
/*************************************************
*
* eval_aopair is one of GTO_aopair_early_contract,
* GTO_aopair_lazy_contract
*
* eval_gz is one of GTO_Gv_general, GTO_Gv_uniform_orth,
* GTO_Gv_uniform_nonorth, GTO_Gv_nonuniform_orth
*
*************************************************/
int GTO_ft_aopair_drv(double complex *out, int *dims,
int (*eval_aopair)(), FPtr_eval_gz eval_gz, void (*f_c2s)(),
double complex fac, double *Gv, double *b, int *gxyz,
int *gs, size_t NGv, CINTEnvVars *envs)
{
const int i_ctr = envs->x_ctr[0];
const int j_ctr = envs->x_ctr[1];
const int n_comp = envs->ncomp_e1 * envs->ncomp_tensor;
const size_t nc = envs->nf * i_ctr * j_ctr * NGv;
double complex *gctr = malloc(sizeof(double complex) * nc * n_comp);
if (gctr == NULL) {
fprintf(stderr, "gctr = malloc(%zu) falied in GTO_ft_aopair_drv\n",
sizeof(double complex) * nc * n_comp);
}
double *cache = malloc(sizeof(double) * (gs[0] + gs[1] + gs[2]) * 3);
if (cache == NULL) {
fprintf(stderr, "cache = malloc(%zu) falied in GTO_ft_aopair_drv\n",
sizeof(double complex) * (gs[0] + gs[1] + gs[2]) * 3);
}
if (eval_gz == NULL) {
eval_gz = GTO_Gv_general;
}
if (eval_gz != GTO_Gv_general) {
assert(gxyz != NULL);
}
if (eval_aopair == NULL) {
const int *shls = envs->shls;
const int *bas = envs->bas;
const int i_sh = shls[0];
const int j_sh = shls[1];
const int i_prim = bas(NPRIM_OF, i_sh);
const int j_prim = bas(NPRIM_OF, j_sh);
if (i_prim*j_prim < i_ctr*j_ctr*3) {
eval_aopair = GTO_aopair_lazy_contract;
} else {
eval_aopair = GTO_aopair_early_contract;
}
}
int has_value = (*eval_aopair)(gctr, envs, eval_gz,
fac, Gv, b, gxyz, gs, NGv, cache);
int counts[4];
if (f_c2s == >O_ft_c2s_sph) {
counts[0] = (envs->i_l*2+1) * i_ctr;
counts[1] = (envs->j_l*2+1) * j_ctr;
} else { // f_c2s == >O_ft_c2s_cart
counts[0] = envs->nfi * i_ctr;
counts[1] = envs->nfj * j_ctr;
}
if (dims == NULL) {
dims = counts;
}
size_t nout = dims[0] * dims[1] * NGv;
int n;
if (has_value) {
for (n = 0; n < n_comp; n++) {
(*f_c2s)(out+nout*n, gctr+nc*n, dims, envs, NGv);
}
} else {
_ft_zset0(out, dims, counts, n_comp, NGv);
}
free(gctr);
free(cache);
return has_value;
}
int GTO_ft_ovlp_cart(double complex *out, int *shls, int *dims,
int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex fac,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
CINTEnvVars envs;
int ng[] = {0, 0, 0, 0, 0, 1, 0, 1};
GTO_ft_init1e_envs(&envs, ng, shls, atm, natm, bas, nbas, env);
envs.f_gout = &inner_prod;
return GTO_ft_aopair_drv(out, dims, eval_aopair, eval_gz, >O_ft_c2s_cart,
fac, Gv, b, gxyz, gs, nGv, &envs);
}
int GTO_ft_ovlp_sph(double complex *out, int *shls, int *dims,
int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex fac,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
CINTEnvVars envs;
int ng[] = {0, 0, 0, 0, 0, 1, 0, 1};
GTO_ft_init1e_envs(&envs, ng, shls, atm, natm, bas, nbas, env);
envs.f_gout = &inner_prod;
return GTO_ft_aopair_drv(out, dims, eval_aopair, eval_gz, >O_ft_c2s_sph,
fac, Gv, b, gxyz, gs, nGv, &envs);
}
/*************************************************
*
*************************************************/
static void zcopy_s2_igtj(double complex *out, double complex *in, size_t NGv,
int comp, int nij, int ip, int di, int dj)
{
const size_t ip1 = ip + 1;
int i, j, n, ic;
double complex *pin, *pout;
for (ic = 0; ic < comp; ic++) {
pout = out + ic * nij * NGv;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pin = in + NGv * (j*di+i);
for (n = 0; n < NGv; n++) {
pout[j*NGv+n] = pin[n];
}
}
pout += (ip1 + i) * NGv;
}
}
}
static void zcopy_s2_ieqj(double complex *out, double complex *in, size_t NGv,
int comp, int nij, int ip, int di, int dj)
{
const size_t ip1 = ip + 1;
int i, j, n, ic;
double complex *pin, *pout;
for (ic = 0; ic < comp; ic++) {
pout = out + ic * nij * NGv;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
pin = in + NGv * (j*di+i);
for (n = 0; n < NGv; n++) {
pout[j*NGv+n] = pin[n];
}
}
pout += (ip1 + i) * NGv;
}
}
}
void GTO_ft_fill_s1(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz,
double complex *mat, int comp, int ish, int jsh,
double complex *buf,
int *shls_slice, int *ao_loc, double complex fac,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
ish += ish0;
jsh += jsh0;
const int nrow = ao_loc[ish1] - ao_loc[ish0];
const int ncol = ao_loc[jsh1] - ao_loc[jsh0];
const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow;
int shls[2] = {ish, jsh};
int dims[2] = {nrow, ncol};
(*intor)(mat+off*nGv, shls, dims, eval_aopair, eval_gz,
fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env);
}
void GTO_ft_fill_s1hermi(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz,
double complex *mat, int comp, int ish, int jsh,
double complex *buf,
int *shls_slice, int *ao_loc, double complex fac,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
ish += ish0;
jsh += jsh0;
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
if (ip < jp) {
return;
}
const int nrow = ao_loc[ish1] - ao_loc[ish0];
const int ncol = ao_loc[jsh1] - ao_loc[jsh0];
const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow;
const size_t NGv = nGv;
int shls[2] = {ish, jsh};
int dims[2] = {nrow, ncol};
(*intor)(mat+off*NGv, shls, dims, eval_aopair, eval_gz,
fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env);
if (ip != jp && ish0 == jsh0 && ish1 == jsh1) {
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
double complex *in = mat + off * NGv;
double complex *out = mat + (ao_loc[jsh] - ao_loc[jsh0] +
(ao_loc[ish] - ao_loc[ish0]) * nrow) * NGv;
int i, j, n, ic;
double complex *pout, *pin;
for (ic = 0; ic < comp; ic++) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pin = in + NGv * (j*nrow+i);
pout = out + NGv * (i*nrow+j);
for (n = 0; n < nGv; n++) {
pout[n] = pin[n];
}
}
}
out += nrow * ncol * NGv;
}
}
}
void GTO_ft_fill_s2(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz,
double complex *mat, int comp, int ish, int jsh,
double complex *buf,
int *shls_slice, int *ao_loc, double complex fac,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
ish += ish0;
jsh += jsh0;
const int ip = ao_loc[ish];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
if (ip < jp) {
return;
}
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int i0 = ao_loc[ish0];
const size_t off0 = i0 * (i0 + 1) / 2;
const size_t off = ip * (ip + 1) / 2 - off0 + jp;
const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0;
const size_t NGv = nGv;
int shls[2] = {ish, jsh};
int dims[2] = {di, dj};
(*intor)(buf, shls, dims, eval_aopair, eval_gz,
fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env);
if (ip != jp) {
zcopy_s2_igtj(mat+off*NGv, buf, NGv, comp, nij, ip, di, dj);
} else {
zcopy_s2_ieqj(mat+off*NGv, buf, NGv, comp, nij, ip, di, dj);
}
}
/*
* Fourier transform AO pairs and add to mat (inplace)
*/
void GTO_ft_fill_drv(int (*intor)(), FPtr_eval_gz eval_gz, void (*fill)(),
double complex *mat, int comp,
int *shls_slice, int *ao_loc, double phase,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
const double complex fac = cos(phase) + sin(phase)*_Complex_I;
int (*eval_aopair)() = NULL;
if (intor != >O_ft_ovlp_cart && intor != >O_ft_ovlp_sph) {
eval_aopair = >O_aopair_lazy_contract;
}
size_t di = GTOmax_shell_dim(ao_loc, shls_slice , 1);
size_t dj = GTOmax_shell_dim(ao_loc, shls_slice+2, 1);
#pragma omp parallel
{
int i, j, ij;
double complex *buf = malloc(sizeof(double complex)
* di*dj*comp*(size_t)nGv);
if (buf == NULL) {
fprintf(stderr, "buf = malloc(%zu) falied in GTO_ft_fill_drv\n",
di*dj*comp*(size_t)nGv);
}
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
i = ij / njsh;
j = ij % njsh;
(*fill)(intor, eval_aopair, eval_gz, mat,
comp, i, j, buf, shls_slice, ao_loc, fac,
Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env);
}
free(buf);
}
}
/*
* Given npair of shls in shls_lst, FT their AO pair value and add to
* out (inplace)
*/
void GTO_ft_fill_shls_drv(int (*intor)(), FPtr_eval_gz eval_gz,
double complex *out, int comp,
int npair, int *shls_lst, int *ao_loc, double phase,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int n, di, dj, ish, jsh;
int *ijloc = malloc(sizeof(int) * npair);
ijloc[0] = 0;
for (n = 1; n < npair; n++) {
ish = shls_lst[n*2-2];
jsh = shls_lst[n*2-1];
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
ijloc[n] = ijloc[n-1] + di*dj;
}
const double complex fac = cos(phase) + sin(phase)*_Complex_I;
const size_t NGv = nGv;
int (*eval_aopair)() = NULL;
if (intor != >O_ft_ovlp_cart && intor != >O_ft_ovlp_sph) {
eval_aopair = >O_aopair_lazy_contract;
}
#pragma omp parallel private(n)
{
int ish, jsh;
int dims[2];
#pragma omp for schedule(dynamic)
for (n = 0; n < npair; n++) {
ish = shls_lst[n*2 ];
jsh = shls_lst[n*2+1];
dims[0] = ao_loc[ish+1] - ao_loc[ish];
dims[1] = ao_loc[jsh+1] - ao_loc[jsh];
(*intor)(out+ijloc[n]*comp*NGv, shls_lst+n*2, dims,
eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv,
atm, natm, bas, nbas, env);
}
}
free(ijloc);
}
/*
* Reversed vrr2d. They are used by numint_uniform_grid.c
*/
void GTOplain_vrr2d_ket_inc1(double *out, const double *g,
double *rirj, int li, int lj)
{
if (lj == 0) {
NPdcopy(out, g, _LEN_CART[li]);
return;
}
const int row_10 = _LEN_CART[li+1];
const int row_00 = _LEN_CART[li ];
const int col_00 = _LEN_CART[lj-1];
const double *g00 = g;
const double *g10 = g + row_00*col_00;
int i, j;
const double *p00, *p10;
double *p01 = out;
for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i);
p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i));
p01[i] = p10[0] + rirj[0] * p00[0];
}
p01 += row_00;
}
for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i);
p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i));
p01[i] = p10[0] + rirj[1] * p00[0];
}
p01 += row_00;
}
j = STARTZ_IF_L_DEC1(lj);
if (j < _LEN_CART[lj-1]) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i);
p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i));
p01[i] = p10[0] + rirj[2] * p00[0];
}
}
}
void GTOreverse_vrr2d_ket_inc1(double *g01, double *g00,
double *rirj, int li, int lj)
{
const int row_10 = _LEN_CART[li+1];
const int row_00 = _LEN_CART[li ];
const int col_00 = _LEN_CART[lj-1];
double *g10 = g00 + row_00*col_00;
double *p00, *p10;
int i, j;
for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i);
p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i));
p10[0] += g01[i];
p00[0] += g01[i] * rirj[0];
}
g01 += row_00;
}
for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i);
p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i));
p10[0] += g01[i];
p00[0] += g01[i] * rirj[1];
}
g01 += row_00;
}
j = STARTZ_IF_L_DEC1(lj);
if (j < _LEN_CART[lj-1]) {
for (i = 0; i < row_00; i++) {
p00 = g00 + (j*row_00+i);
p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i));
p10[0] += g01[i];
p00[0] += g01[i] * rirj[2];
}
}
}
|
ShapeFunctionDiscretizer.h | /******************************************************************************
* SOFA, Simulation Open-Framework Architecture, development version *
* (c) 2006-2017 INRIA, USTL, UJF, CNRS, MGH *
* *
* This program is free software; you can redistribute it and/or modify it *
* under the terms of the GNU Lesser General Public License as published by *
* the Free Software Foundation; either version 2.1 of the License, or (at *
* your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, but WITHOUT *
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License *
* for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
*******************************************************************************
* Authors: The SOFA Team and external contributors (see Authors.txt) *
* *
* Contact information: contact@sofa-framework.org *
******************************************************************************/
#ifndef FLEXIBLE_ShapeFunctionDiscretizer_H
#define FLEXIBLE_ShapeFunctionDiscretizer_H
#include <Flexible/config.h>
#include "../shapeFunction/BaseShapeFunction.h"
#include <image/ImageTypes.h>
#include <sofa/core/DataEngine.h>
#include <sofa/core/objectmodel/BaseObject.h>
#include <sofa/defaulttype/Vec.h>
#include <sofa/helper/rmath.h>
namespace sofa
{
namespace component
{
namespace engine
{
/**
* This class discretize shape functions in an image
*/
template <class ImageTypes_>
class ShapeFunctionDiscretizer : public core::DataEngine
{
public:
typedef core::DataEngine Inherited;
SOFA_CLASS(SOFA_TEMPLATE(ShapeFunctionDiscretizer,ImageTypes_),Inherited);
typedef SReal Real;
/** @name Image data */
//@{
typedef ImageTypes_ ImageTypes;
typedef typename ImageTypes::T T;
typedef typename ImageTypes::imCoord imCoord;
typedef helper::ReadAccessor<Data< ImageTypes > > raImage;
Data< ImageTypes > f_image;
typedef defaulttype::ImageLPTransform<Real> TransformType;
typedef helper::ReadAccessor<Data< TransformType > > raTransform;
Data< TransformType > f_transform;
typedef unsigned int IndT;
typedef defaulttype::Image<IndT> IndTypes;
typedef helper::WriteOnlyAccessor<Data< IndTypes > > waInd;
Data< IndTypes > f_index;
typedef double DistT;
typedef defaulttype::Image<DistT> DistTypes;
typedef helper::WriteOnlyAccessor<Data< DistTypes > > waDist;
Data< DistTypes > f_w;
//@}
/** @name Shape Function types */
//@{
enum { spatial_dimensions = 3 };
typedef core::behavior::ShapeFunctionTypes<spatial_dimensions,Real> ShapeFunctionType;
typedef core::behavior::BaseShapeFunction<ShapeFunctionType> BaseShapeFunction;
typedef typename BaseShapeFunction::VReal VReal;
typedef typename BaseShapeFunction::VRef VRef;
typedef typename BaseShapeFunction::Coord Coord;
BaseShapeFunction* _shapeFunction; ///< where the weights are computed
//@}
virtual std::string getTemplateName() const { return templateName(this); }
static std::string templateName(const ShapeFunctionDiscretizer<ImageTypes>* = NULL) { return ImageTypes::Name(); }
ShapeFunctionDiscretizer() : Inherited()
, f_image(initData(&f_image,ImageTypes(),"image",""))
, f_transform(initData(&f_transform,TransformType(),"transform",""))
, f_index(initData(&f_index,IndTypes(),"indices",""))
, f_w(initData(&f_w,DistTypes(),"weights",""))
, _shapeFunction(NULL)
{
f_image.setReadOnly(true);
f_transform.setReadOnly(true);
}
virtual ~ShapeFunctionDiscretizer() {}
virtual void init()
{
if( !_shapeFunction ) this->getContext()->get(_shapeFunction,core::objectmodel::BaseContext::SearchUp);
if ( !_shapeFunction ) serr << "ShapeFunction<"<<ShapeFunctionType::Name()<<"> component not found" << sendl;
addInput(&f_image);
addInput(&f_transform);
addOutput(&f_w);
addOutput(&f_index);
setDirtyValue();
}
virtual void reinit() { update(); }
protected:
virtual void update()
{
if( !_shapeFunction ) return;
// read input image and transform
raImage in(this->f_image);
raTransform inT(this->f_transform);
if(in->isEmpty()) { serr<<"Image not found"<<sendl; return; }
const cimg_library::CImg<T>& inimg = in->getCImg(0); // suppose time=0
cleanDirty();
// init indices and weights images
const unsigned int nbref=_shapeFunction->f_nbRef.getValue();
imCoord dim = in->getDimensions();
dim[3]=nbref;
dim[4]=1;
waInd indData(this->f_index); indData->setDimensions(dim);
cimg_library::CImg<IndT>& indices = indData->getCImg(); indices.fill(0);
waDist weightData(this->f_w); weightData->setDimensions(dim);
cimg_library::CImg<DistT>& weights = weightData->getCImg(); weights.fill(0);
// // fill indices and weights images
//#ifdef _OPENMP
//#pragma omp parallel for
//#endif
for(int z=0; z<inimg.depth(); z++)
for(int y=0; y<inimg.height(); y++)
for(int x=0; x<inimg.width(); x++)
if(inimg(x,y,z))
{
Coord p=inT->fromImage(Coord(x,y,z));
VReal w; VRef ref;
_shapeFunction->computeShapeFunction(p,ref,w);
for(unsigned int i=0;i<ref.size();i++)
{
indices(x,y,z,i)=ref[i]+1;
weights(x,y,z,i)=w[i];
}
}
}
};
} // namespace engine
} // namespace component
} // namespace sofa
#endif // SOFA_IMAGE_ShapeFunctionDiscretizer_H
|
column_matrix.h | /*!
* Copyright 2017 by Contributors
* \file column_matrix.h
* \brief Utility for fast column-wise access
* \author Philip Cho
*/
#ifndef XGBOOST_COMMON_COLUMN_MATRIX_H_
#define XGBOOST_COMMON_COLUMN_MATRIX_H_
#include <limits>
#include <vector>
#include <memory>
#include "hist_util.h"
namespace xgboost {
namespace common {
class ColumnMatrix;
/*! \brief column type */
enum ColumnType {
kDenseColumn,
kSparseColumn
};
/*! \brief a column storage, to be used with ApplySplit. Note that each
bin id is stored as index[i] + index_base.
Different types of column index for each column allow
to reduce the memory usage. */
template <typename BinIdxType>
class Column {
public:
Column(ColumnType type, common::Span<const BinIdxType> index, const uint32_t index_base)
: type_(type),
index_(index),
index_base_(index_base) {}
uint32_t GetGlobalBinIdx(size_t idx) const {
return index_base_ + static_cast<uint32_t>(index_[idx]);
}
BinIdxType GetFeatureBinIdx(size_t idx) const { return index_[idx]; }
const uint32_t GetBaseIdx() const { return index_base_; }
common::Span<const BinIdxType> GetFeatureBinIdxPtr() const { return index_; }
ColumnType GetType() const { return type_; }
/* returns number of elements in column */
size_t Size() const { return index_.size(); }
private:
/* type of column */
ColumnType type_;
/* bin indexes in range [0, max_bins - 1] */
common::Span<const BinIdxType> index_;
/* bin index offset for specific feature */
const uint32_t index_base_;
};
template <typename BinIdxType>
class SparseColumn: public Column<BinIdxType> {
public:
SparseColumn(ColumnType type, common::Span<const BinIdxType> index,
uint32_t index_base, common::Span<const size_t> row_ind)
: Column<BinIdxType>(type, index, index_base),
row_ind_(row_ind) {}
const size_t* GetRowData() const { return row_ind_.data(); }
size_t GetRowIdx(size_t idx) const {
return row_ind_.data()[idx];
}
private:
/* indexes of rows */
common::Span<const size_t> row_ind_;
};
template <typename BinIdxType>
class DenseColumn: public Column<BinIdxType> {
public:
DenseColumn(ColumnType type, common::Span<const BinIdxType> index,
uint32_t index_base, const std::vector<bool>& missing_flags,
size_t feature_offset)
: Column<BinIdxType>(type, index, index_base),
missing_flags_(missing_flags),
feature_offset_(feature_offset) {}
bool IsMissing(size_t idx) const { return missing_flags_[feature_offset_ + idx]; }
private:
/* flags for missing values in dense columns */
const std::vector<bool>& missing_flags_;
size_t feature_offset_;
};
/*! \brief a collection of columns, with support for construction from
GHistIndexMatrix. */
class ColumnMatrix {
public:
// get number of features
inline bst_uint GetNumFeature() const {
return static_cast<bst_uint>(type_.size());
}
// construct column matrix from GHistIndexMatrix
inline void Init(const GHistIndexMatrix& gmat,
double sparse_threshold) {
const int32_t nfeature = static_cast<int32_t>(gmat.cut.Ptrs().size() - 1);
const size_t nrow = gmat.row_ptr.size() - 1;
// identify type of each column
feature_counts_.resize(nfeature);
type_.resize(nfeature);
std::fill(feature_counts_.begin(), feature_counts_.end(), 0);
uint32_t max_val = std::numeric_limits<uint32_t>::max();
for (int32_t fid = 0; fid < nfeature; ++fid) {
CHECK_LE(gmat.cut.Ptrs()[fid + 1] - gmat.cut.Ptrs()[fid], max_val);
}
bool all_dense = gmat.IsDense();
gmat.GetFeatureCounts(&feature_counts_[0]);
// classify features
for (int32_t fid = 0; fid < nfeature; ++fid) {
if (static_cast<double>(feature_counts_[fid])
< sparse_threshold * nrow) {
type_[fid] = kSparseColumn;
all_dense = false;
} else {
type_[fid] = kDenseColumn;
}
}
// want to compute storage boundary for each feature
// using variants of prefix sum scan
feature_offsets_.resize(nfeature + 1);
size_t accum_index_ = 0;
feature_offsets_[0] = accum_index_;
for (int32_t fid = 1; fid < nfeature + 1; ++fid) {
if (type_[fid - 1] == kDenseColumn) {
accum_index_ += static_cast<size_t>(nrow);
} else {
accum_index_ += feature_counts_[fid - 1];
}
feature_offsets_[fid] = accum_index_;
}
SetTypeSize(gmat.max_num_bins);
index_.resize(feature_offsets_[nfeature] * bins_type_size_, 0);
if (!all_dense) {
row_ind_.resize(feature_offsets_[nfeature]);
}
// store least bin id for each feature
index_base_ = const_cast<uint32_t*>(gmat.cut.Ptrs().data());
const bool noMissingValues = NoMissingValues(gmat.row_ptr[nrow], nrow, nfeature);
any_missing_ = !noMissingValues;
if (noMissingValues) {
missing_flags_.resize(feature_offsets_[nfeature], false);
} else {
missing_flags_.resize(feature_offsets_[nfeature], true);
}
// pre-fill index_ for dense columns
if (all_dense) {
BinTypeSize gmat_bin_size = gmat.index.GetBinTypeSize();
if (gmat_bin_size == kUint8BinsTypeSize) {
SetIndexAllDense(gmat.index.data<uint8_t>(), gmat, nrow, nfeature, noMissingValues);
} else if (gmat_bin_size == kUint16BinsTypeSize) {
SetIndexAllDense(gmat.index.data<uint16_t>(), gmat, nrow, nfeature, noMissingValues);
} else {
CHECK_EQ(gmat_bin_size, kUint32BinsTypeSize);
SetIndexAllDense(gmat.index.data<uint32_t>(), gmat, nrow, nfeature, noMissingValues);
}
/* For sparse DMatrix gmat.index.getBinTypeSize() returns always kUint32BinsTypeSize
but for ColumnMatrix we still have a chance to reduce the memory consumption */
} else {
if (bins_type_size_ == kUint8BinsTypeSize) {
SetIndex<uint8_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature);
} else if (bins_type_size_ == kUint16BinsTypeSize) {
SetIndex<uint16_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature);
} else {
CHECK_EQ(bins_type_size_, kUint32BinsTypeSize);
SetIndex<uint32_t>(gmat.index.data<uint32_t>(), gmat, nrow, nfeature);
}
}
}
/* Set the number of bytes based on numeric limit of maximum number of bins provided by user */
void SetTypeSize(size_t max_num_bins) {
if ( (max_num_bins - 1) <= static_cast<int>(std::numeric_limits<uint8_t>::max()) ) {
bins_type_size_ = kUint8BinsTypeSize;
} else if ((max_num_bins - 1) <= static_cast<int>(std::numeric_limits<uint16_t>::max())) {
bins_type_size_ = kUint16BinsTypeSize;
} else {
bins_type_size_ = kUint32BinsTypeSize;
}
}
/* Fetch an individual column. This code should be used with type swith
to determine type of bin id's */
template <typename BinIdxType>
std::unique_ptr<const Column<BinIdxType> > GetColumn(unsigned fid) const {
CHECK_EQ(sizeof(BinIdxType), bins_type_size_);
const size_t feature_offset = feature_offsets_[fid]; // to get right place for certain feature
const size_t column_size = feature_offsets_[fid + 1] - feature_offset;
common::Span<const BinIdxType> bin_index = { reinterpret_cast<const BinIdxType*>(
&index_[feature_offset * bins_type_size_]),
column_size };
std::unique_ptr<const Column<BinIdxType> > res;
if (type_[fid] == ColumnType::kDenseColumn) {
res.reset(new DenseColumn<BinIdxType>(type_[fid], bin_index, index_base_[fid],
missing_flags_, feature_offset));
} else {
res.reset(new SparseColumn<BinIdxType>(type_[fid], bin_index, index_base_[fid],
{&row_ind_[feature_offset], column_size}));
}
return res;
}
template<typename T>
inline void SetIndexAllDense(T* index, const GHistIndexMatrix& gmat, const size_t nrow,
const size_t nfeature, const bool noMissingValues) {
T* local_index = reinterpret_cast<T*>(&index_[0]);
/* missing values make sense only for column with type kDenseColumn,
and if no missing values were observed it could be handled much faster. */
if (noMissingValues) {
#pragma omp parallel for num_threads(omp_get_max_threads())
for (omp_ulong rid = 0; rid < nrow; ++rid) {
const size_t ibegin = rid*nfeature;
const size_t iend = (rid+1)*nfeature;
size_t j = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
const size_t idx = feature_offsets_[j];
local_index[idx + rid] = index[i];
}
}
} else {
/* to handle rows in all batches, sum of all batch sizes equal to gmat.row_ptr.size() - 1 */
size_t rbegin = 0;
for (const auto &batch : gmat.p_fmat->GetBatches<SparsePage>()) {
const xgboost::Entry* data_ptr = batch.data.HostVector().data();
const std::vector<bst_row_t>& offset_vec = batch.offset.HostVector();
const size_t batch_size = batch.Size();
CHECK_LT(batch_size, offset_vec.size());
for (size_t rid = 0; rid < batch_size; ++rid) {
const size_t size = offset_vec[rid + 1] - offset_vec[rid];
SparsePage::Inst inst = {data_ptr + offset_vec[rid], size};
const size_t ibegin = gmat.row_ptr[rbegin + rid];
const size_t iend = gmat.row_ptr[rbegin + rid + 1];
CHECK_EQ(ibegin + inst.size(), iend);
size_t j = 0;
size_t fid = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
fid = inst[j].index;
const size_t idx = feature_offsets_[fid];
/* rbegin allows to store indexes from specific SparsePage batch */
local_index[idx + rbegin + rid] = index[i];
missing_flags_[idx + rbegin + rid] = false;
}
}
rbegin += batch.Size();
}
}
}
template<typename T>
inline void SetIndex(uint32_t* index, const GHistIndexMatrix& gmat,
const size_t nrow, const size_t nfeature) {
std::vector<size_t> num_nonzeros;
num_nonzeros.resize(nfeature);
std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0);
T* local_index = reinterpret_cast<T*>(&index_[0]);
size_t rbegin = 0;
for (const auto &batch : gmat.p_fmat->GetBatches<SparsePage>()) {
const xgboost::Entry* data_ptr = batch.data.HostVector().data();
const std::vector<bst_row_t>& offset_vec = batch.offset.HostVector();
const size_t batch_size = batch.Size();
CHECK_LT(batch_size, offset_vec.size());
for (size_t rid = 0; rid < batch_size; ++rid) {
const size_t ibegin = gmat.row_ptr[rbegin + rid];
const size_t iend = gmat.row_ptr[rbegin + rid + 1];
size_t fid = 0;
const size_t size = offset_vec[rid + 1] - offset_vec[rid];
SparsePage::Inst inst = {data_ptr + offset_vec[rid], size};
CHECK_EQ(ibegin + inst.size(), iend);
size_t j = 0;
for (size_t i = ibegin; i < iend; ++i, ++j) {
const uint32_t bin_id = index[i];
fid = inst[j].index;
if (type_[fid] == kDenseColumn) {
T* begin = &local_index[feature_offsets_[fid]];
begin[rid + rbegin] = bin_id - index_base_[fid];
missing_flags_[feature_offsets_[fid] + rid + rbegin] = false;
} else {
T* begin = &local_index[feature_offsets_[fid]];
begin[num_nonzeros[fid]] = bin_id - index_base_[fid];
row_ind_[feature_offsets_[fid] + num_nonzeros[fid]] = rid + rbegin;
++num_nonzeros[fid];
}
}
}
rbegin += batch.Size();
}
}
const BinTypeSize GetTypeSize() const {
return bins_type_size_;
}
// This is just an utility function
const bool NoMissingValues(const size_t n_elements,
const size_t n_row, const size_t n_features) {
return n_elements == n_features * n_row;
}
// And this returns part of state
const bool AnyMissing() const {
return any_missing_;
}
private:
std::vector<uint8_t> index_;
std::vector<size_t> feature_counts_;
std::vector<ColumnType> type_;
std::vector<size_t> row_ind_;
/* indicate where each column's index and row_ind is stored. */
std::vector<size_t> feature_offsets_;
// index_base_[fid]: least bin id for feature fid
uint32_t* index_base_;
std::vector<bool> missing_flags_;
BinTypeSize bins_type_size_;
bool any_missing_;
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_COLUMN_MATRIX_H_
|
convolutiondepthwise_3x3_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00 = _mm256_loadu_ps(k0);
__m256 _k01 = _mm256_loadu_ps(k0 + 8);
__m256 _k02 = _mm256_loadu_ps(k0 + 16);
__m256 _k10 = _mm256_loadu_ps(k0 + 24);
__m256 _k11 = _mm256_loadu_ps(k0 + 32);
__m256 _k12 = _mm256_loadu_ps(k0 + 40);
__m256 _k20 = _mm256_loadu_ps(k0 + 48);
__m256 _k21 = _mm256_loadu_ps(k0 + 56);
__m256 _k22 = _mm256_loadu_ps(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm256_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm256_fmadd_ps(_k22, _r23, _sum1);
__m256 _sum2 = _bias0;
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_fmadd_ps(_k00, _r02, _sum2);
_sum2 = _mm256_fmadd_ps(_k01, _r03, _sum2);
_sum2 = _mm256_fmadd_ps(_k02, _r04, _sum2);
_sum2 = _mm256_fmadd_ps(_k10, _r12, _sum2);
_sum2 = _mm256_fmadd_ps(_k11, _r13, _sum2);
_sum2 = _mm256_fmadd_ps(_k12, _r14, _sum2);
_sum2 = _mm256_fmadd_ps(_k20, _r22, _sum2);
_sum2 = _mm256_fmadd_ps(_k21, _r23, _sum2);
_sum2 = _mm256_fmadd_ps(_k22, _r24, _sum2);
__m256 _sum3 = _bias0;
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_fmadd_ps(_k00, _r03, _sum3);
_sum3 = _mm256_fmadd_ps(_k01, _r04, _sum3);
_sum3 = _mm256_fmadd_ps(_k02, _r05, _sum3);
_sum3 = _mm256_fmadd_ps(_k10, _r13, _sum3);
_sum3 = _mm256_fmadd_ps(_k11, _r14, _sum3);
_sum3 = _mm256_fmadd_ps(_k12, _r15, _sum3);
_sum3 = _mm256_fmadd_ps(_k20, _r23, _sum3);
_sum3 = _mm256_fmadd_ps(_k21, _r24, _sum3);
_sum3 = _mm256_fmadd_ps(_k22, _r25, _sum3);
_mm256_storeu_ps(outptr0 + 24, _sum3);
r0 += 32;
r1 += 32;
r2 += 32;
outptr0 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_k00, _r01, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k10, _r11, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k20, _r21, _sum1);
_sum1 = _mm256_fmadd_ps(_k21, _r22, _sum1);
_sum1 = _mm256_fmadd_ps(_k22, _r23, _sum1);
_mm256_storeu_ps(outptr0 + 8, _sum1);
r0 += 16;
r1 += 16;
r2 += 16;
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 8;
}
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
}
}
}
static void convdw3x3s2_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
__m256 _k00 = _mm256_loadu_ps(k0);
__m256 _k01 = _mm256_loadu_ps(k0 + 8);
__m256 _k02 = _mm256_loadu_ps(k0 + 16);
__m256 _k10 = _mm256_loadu_ps(k0 + 24);
__m256 _k11 = _mm256_loadu_ps(k0 + 32);
__m256 _k12 = _mm256_loadu_ps(k0 + 40);
__m256 _k20 = _mm256_loadu_ps(k0 + 48);
__m256 _k21 = _mm256_loadu_ps(k0 + 56);
__m256 _k22 = _mm256_loadu_ps(k0 + 64);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_k00, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r04, _sum1);
_sum1 = _mm256_fmadd_ps(_k10, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r14, _sum1);
_sum1 = _mm256_fmadd_ps(_k20, _r22, _sum1);
_sum1 = _mm256_fmadd_ps(_k21, _r23, _sum1);
_sum1 = _mm256_fmadd_ps(_k22, _r24, _sum1);
__m256 _sum2 = _bias0;
__m256 _r05 = _mm256_loadu_ps(r0 + 40);
__m256 _r15 = _mm256_loadu_ps(r1 + 40);
__m256 _r25 = _mm256_loadu_ps(r2 + 40);
__m256 _r06 = _mm256_loadu_ps(r0 + 48);
__m256 _r16 = _mm256_loadu_ps(r1 + 48);
__m256 _r26 = _mm256_loadu_ps(r2 + 48);
_mm256_storeu_ps(outptr0 + 8, _sum1);
_sum2 = _mm256_fmadd_ps(_k00, _r04, _sum2);
_sum2 = _mm256_fmadd_ps(_k01, _r05, _sum2);
_sum2 = _mm256_fmadd_ps(_k02, _r06, _sum2);
_sum2 = _mm256_fmadd_ps(_k10, _r14, _sum2);
_sum2 = _mm256_fmadd_ps(_k11, _r15, _sum2);
_sum2 = _mm256_fmadd_ps(_k12, _r16, _sum2);
_sum2 = _mm256_fmadd_ps(_k20, _r24, _sum2);
_sum2 = _mm256_fmadd_ps(_k21, _r25, _sum2);
_sum2 = _mm256_fmadd_ps(_k22, _r26, _sum2);
__m256 _sum3 = _bias0;
__m256 _r07 = _mm256_loadu_ps(r0 + 56);
__m256 _r17 = _mm256_loadu_ps(r1 + 56);
__m256 _r27 = _mm256_loadu_ps(r2 + 56);
__m256 _r08 = _mm256_loadu_ps(r0 + 64);
__m256 _r18 = _mm256_loadu_ps(r1 + 64);
__m256 _r28 = _mm256_loadu_ps(r2 + 64);
_mm256_storeu_ps(outptr0 + 16, _sum2);
_sum3 = _mm256_fmadd_ps(_k00, _r06, _sum3);
_sum3 = _mm256_fmadd_ps(_k01, _r07, _sum3);
_sum3 = _mm256_fmadd_ps(_k02, _r08, _sum3);
_sum3 = _mm256_fmadd_ps(_k10, _r16, _sum3);
_sum3 = _mm256_fmadd_ps(_k11, _r17, _sum3);
_sum3 = _mm256_fmadd_ps(_k12, _r18, _sum3);
_sum3 = _mm256_fmadd_ps(_k20, _r26, _sum3);
_sum3 = _mm256_fmadd_ps(_k21, _r27, _sum3);
_sum3 = _mm256_fmadd_ps(_k22, _r28, _sum3);
_mm256_storeu_ps(outptr0 + 24, _sum3);
r0 += 2 * 32;
r1 += 2 * 32;
r2 += 2 * 32;
outptr0 += 32;
}
for (; j + 1 < outw; j += 2)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
__m256 _sum1 = _bias0;
__m256 _r03 = _mm256_loadu_ps(r0 + 24);
__m256 _r13 = _mm256_loadu_ps(r1 + 24);
__m256 _r23 = _mm256_loadu_ps(r2 + 24);
__m256 _r04 = _mm256_loadu_ps(r0 + 32);
__m256 _r14 = _mm256_loadu_ps(r1 + 32);
__m256 _r24 = _mm256_loadu_ps(r2 + 32);
_mm256_storeu_ps(outptr0, _sum0);
_sum1 = _mm256_fmadd_ps(_k00, _r02, _sum1);
_sum1 = _mm256_fmadd_ps(_k01, _r03, _sum1);
_sum1 = _mm256_fmadd_ps(_k02, _r04, _sum1);
_sum1 = _mm256_fmadd_ps(_k10, _r12, _sum1);
_sum1 = _mm256_fmadd_ps(_k11, _r13, _sum1);
_sum1 = _mm256_fmadd_ps(_k12, _r14, _sum1);
_sum1 = _mm256_fmadd_ps(_k20, _r22, _sum1);
_sum1 = _mm256_fmadd_ps(_k21, _r23, _sum1);
_sum1 = _mm256_fmadd_ps(_k22, _r24, _sum1);
_mm256_storeu_ps(outptr0 + 8, _sum1);
r0 += 2 * 16;
r1 += 2 * 16;
r2 += 2 * 16;
outptr0 += 16;
}
for (; j < outw; j++)
{
__m256 _sum0 = _bias0;
__m256 _r00 = _mm256_loadu_ps(r0);
__m256 _r01 = _mm256_loadu_ps(r0 + 8);
__m256 _r02 = _mm256_loadu_ps(r0 + 16);
__m256 _r10 = _mm256_loadu_ps(r1);
__m256 _r11 = _mm256_loadu_ps(r1 + 8);
__m256 _r12 = _mm256_loadu_ps(r1 + 16);
__m256 _r20 = _mm256_loadu_ps(r2);
__m256 _r21 = _mm256_loadu_ps(r2 + 8);
__m256 _r22 = _mm256_loadu_ps(r2 + 16);
_sum0 = _mm256_fmadd_ps(_k00, _r00, _sum0);
_sum0 = _mm256_fmadd_ps(_k01, _r01, _sum0);
_sum0 = _mm256_fmadd_ps(_k02, _r02, _sum0);
_sum0 = _mm256_fmadd_ps(_k10, _r10, _sum0);
_sum0 = _mm256_fmadd_ps(_k11, _r11, _sum0);
_sum0 = _mm256_fmadd_ps(_k12, _r12, _sum0);
_sum0 = _mm256_fmadd_ps(_k20, _r20, _sum0);
_sum0 = _mm256_fmadd_ps(_k21, _r21, _sum0);
_sum0 = _mm256_fmadd_ps(_k22, _r22, _sum0);
_mm256_storeu_ps(outptr0, _sum0);
r0 += 2 * 8;
r1 += 2 * 8;
r2 += 2 * 8;
outptr0 += 8;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
convolution_2x2.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv2x2s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
int q = 0;
for (; q + 1 < inch; q += 2)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* img1 = bottom_blob.channel(q + 1);
const float* kernel0 = kernel + p * inch * 4 + q * 4;
const float* kernel1 = kernel0 + 4;
const float* r00 = img0;
const float* r01 = img0 + w;
const float* r10 = img1;
const float* r11 = img1 + w;
#if __ARM_NEON
float32x4_t _k0 = vld1q_f32(kernel0);
float32x4_t _k1 = vld1q_f32(kernel1);
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.4s}, [%2], #16 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v12.4s}, [%3], #16 \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v14.4s}, [%4], #16 \n"
"0: \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v9.4s}, [%5] \n"
"fmul v8.4s, v0.4s, %12.s[0] \n"
"fmla v9.4s, v2.4s, %12.s[2] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v1.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v3.4s}, [%2], #16 \n"
"ext v10.16b, v0.16b, v1.16b, #4 \n"
"ext v11.16b, v2.16b, v3.16b, #4 \n"
"fmla v8.4s, v12.4s, %13.s[0] \n"
"fmla v9.4s, v14.4s, %13.s[2] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v13.4s}, [%3], #16 \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v15.4s}, [%4], #16 \n"
"fmla v8.4s, v10.4s, %12.s[1] \n"
"fmla v9.4s, v11.4s, %12.s[3] \n"
"ext v10.16b, v12.16b, v13.16b, #4 \n"
"ext v11.16b, v14.16b, v15.16b, #4 \n"
"fmla v8.4s, v10.4s, %13.s[1] \n"
"fmla v9.4s, v11.4s, %13.s[3] \n"
"orr v0.16b, v1.16b, v1.16b \n"
"orr v2.16b, v3.16b, v3.16b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"orr v12.16b, v13.16b, v13.16b \n"
"orr v14.16b, v15.16b, v15.16b \n"
"subs %w0, %w0, #1 \n"
"st1 {v8.4s}, [%5], #16 \n"
"bne 0b \n"
"sub %1, %1, #16 \n"
"sub %2, %2, #16 \n"
"sub %3, %3, #16 \n"
"sub %4, %4, #16 \n"
: "=r"(nn), // %0
"=r"(r00), // %1
"=r"(r01), // %2
"=r"(r10), // %3
"=r"(r11), // %4
"=r"(outptr) // %5
: "0"(nn),
"1"(r00),
"2"(r01),
"3"(r10),
"4"(r11),
"5"(outptr),
"w"(_k0), // %12
"w"(_k1) // %13
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d4-d5}, [%2]! \n"
"pld [%3, #128] \n"
"vld1.f32 {d24-d25}, [%3]! \n"
"pld [%4, #128] \n"
"vld1.f32 {d28-d29}, [%4]! \n"
"0: \n"
"pld [%5, #128] \n"
"vld1.f32 {d18-d19}, [%5] \n" // q9 = sum
"vmul.f32 q8, q0, %e12[0] \n"
"vmla.f32 q9, q2, %f12[0] \n"
"pld [%1, #128] \n"
"vld1.f32 {d2-d3}, [%1]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d6-d7}, [%2]! \n"
"vext.f32 q10, q0, q1, #1 \n"
"vext.f32 q11, q2, q3, #1 \n"
"vmla.f32 q8, q12, %e13[0] \n"
"vmla.f32 q9, q14, %f13[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d26-d27}, [%3]! \n"
"pld [%4, #128] \n"
"vld1.f32 {d30-d31}, [%4]! \n"
"vmla.f32 q8, q10, %e12[1] \n"
"vmla.f32 q9, q11, %f12[1] \n"
"vext.f32 q10, q12, q13, #1 \n"
"vext.f32 q11, q14, q15, #1 \n"
"vmla.f32 q8, q10, %e13[1] \n"
"vmla.f32 q9, q11, %f13[1] \n"
"vorr q0, q1, q1 \n"
"vorr q2, q3, q3 \n"
"vadd.f32 q8, q8, q9 \n"
"vorr q12, q13, q13 \n"
"vorr q14, q15, q15 \n"
"subs %0, #1 \n"
"vst1.f32 {d16-d17}, [%5]! \n"
"bne 0b \n"
"sub %1, #16 \n"
"sub %2, #16 \n"
"sub %3, #16 \n"
"sub %4, #16 \n"
: "=r"(nn), // %0
"=r"(r00), // %1
"=r"(r01), // %2
"=r"(r10), // %3
"=r"(r11), // %4
"=r"(outptr) // %5
: "0"(nn),
"1"(r00),
"2"(r01),
"3"(r10),
"4"(r11),
"5"(outptr),
"w"(_k0), // %12
"w"(_k1) // %13
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
float32x2_t _r00 = vld1_f32(r00);
float32x2_t _r01 = vld1_f32(r01);
float32x4_t _r00r1 = vcombine_f32(_r00, _r01);
float32x4_t _s0s1 = vmulq_f32(_r00r1, _k0);
float32x2_t _r10 = vld1_f32(r10);
float32x2_t _r11 = vld1_f32(r11);
float32x4_t _r10r1 = vcombine_f32(_r10, _r11);
_s0s1 = vmlaq_f32(_s0s1, _r10r1, _k1);
float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1));
_s = vpadd_f32(_s, _s);
*outptr += vget_lane_f32(_s, 0);
#else
float sum = 0.f;
sum += r00[0] * kernel0[0];
sum += r00[1] * kernel0[1];
sum += r01[0] * kernel0[2];
sum += r01[1] * kernel0[3];
sum += r10[0] * kernel1[0];
sum += r10[1] * kernel1[1];
sum += r11[0] * kernel1[2];
sum += r11[1] * kernel1[3];
*outptr += sum;
#endif // __ARM_NEON
r00 += 1;
r01 += 1;
r10 += 1;
r11 += 1;
outptr++;
}
r00 += 1;
r01 += 1;
r10 += 1;
r11 += 1;
}
}
for (; q < inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 4 + q * 4;
const float* r0 = img0;
const float* r1 = img0 + w;
#if __ARM_NEON
float32x4_t _k0 = vdupq_n_f32(kernel0[0]);
float32x4_t _k1 = vdupq_n_f32(kernel0[1]);
float32x4_t _k2 = vdupq_n_f32(kernel0[2]);
float32x4_t _k3 = vdupq_n_f32(kernel0[3]);
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v2.4s}, [%2], #16 \n"
"0: \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v9.4s}, [%3] \n"
"fmul v8.4s, v0.4s, %8.4s \n"
"fmla v9.4s, v2.4s, %10.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v1.4s}, [%1], #16 \n"
"ext v10.16b, v0.16b, v1.16b, #4 \n"
"fmla v8.4s, v10.4s, %9.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v3.4s}, [%2], #16 \n"
"ext v11.16b, v2.16b, v3.16b, #4 \n"
"fmla v9.4s, v11.4s, %11.4s \n"
"orr v0.16b, v1.16b, v1.16b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"orr v2.16b, v3.16b, v3.16b \n"
"subs %w0, %w0, #1 \n"
"st1 {v8.4s}, [%3], #16 \n"
"bne 0b \n"
"sub %1, %1, #16 \n"
"sub %2, %2, #16 \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr),
"w"(_k0), // %8
"w"(_k1), // %9
"w"(_k2), // %10
"w"(_k3) // %11
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11");
}
#else
if (nn > 0)
{
asm volatile(
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d4-d5}, [%2]! \n"
"0: \n"
"pld [%3, #128] \n"
"vld1.f32 {d18-d19}, [%3] \n" // q9 = sum
"vmul.f32 q8, q0, %q8 \n"
"vmla.f32 q9, q2, %q10 \n"
"pld [%1, #128] \n"
"vld1.f32 {d2-d3}, [%1]! \n"
"vext.f32 q10, q0, q1, #1 \n"
"vmla.f32 q8, q10, %q9 \n"
"pld [%2, #128] \n"
"vld1.f32 {d6-d7}, [%2]! \n"
"vext.f32 q11, q2, q3, #1 \n"
"vmla.f32 q9, q11, %q11 \n"
"vorr q0, q1, q1 \n"
"vadd.f32 q8, q8, q9 \n"
"vorr q2, q3, q3 \n"
"subs %0, #1 \n"
"vst1.f32 {d16-d17}, [%3]! \n"
"bne 0b \n"
"sub %1, #16 \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr),
"w"(_k0), // %8
"w"(_k1), // %9
"w"(_k2), // %10
"w"(_k3) // %11
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
}
#endif // __aarch64__
#endif // __ARM_NEON
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
#endif
for (; remain > 0; remain--)
{
#if __ARM_NEON
float32x2_t _r0 = vld1_f32(r0);
float32x2_t _r1 = vld1_f32(r1);
float32x4_t _r0r1 = vcombine_f32(_r0, _r1);
float32x4_t _s0s1 = vmulq_f32(_r0r1, _k0123);
float32x2_t _s = vadd_f32(vget_low_f32(_s0s1), vget_high_f32(_s0s1));
_s = vpadd_f32(_s, _s);
*outptr += vget_lane_f32(_s, 0);
#else
float sum = 0.f;
sum += r0[0] * kernel0[0];
sum += r0[1] * kernel0[1];
sum += r1[0] * kernel0[2];
sum += r1[1] * kernel0[3];
*outptr += sum;
#endif
r0 += 1;
r1 += 1;
outptr++;
}
r0 += 1;
r1 += 1;
}
}
}
}
|
multisort-omp-task-rama-cutoff.c | #include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "omp.h"
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6f\n",(_m), stamp);
// N and MIN must be powers of 2
long N;
long MIN_SORT_SIZE;
long MIN_MERGE_SIZE;
int CUTOFF;
#define BLOCK_SIZE 1024L
#define T int
void basicsort(long n, T data[n]);
void basicmerge(long n, T left[n], T right[n], T result[n*2], long start, long length);
void merge(long n, T left[n], T right[n], T result[n*2], long start, long length, int depth) {
if (length < MIN_MERGE_SIZE*2L) {
// Base case
basicmerge(n, left, right, result, start, length);
} else {
// Recursive decomposition
if(!omp_in_final()){
#pragma omp task final (depth >= CUTOFF)
merge(n, left, right, result, start, length/2, depth+1 );
#pragma omp task final (depth >= CUTOFF)
merge(n, left, right, result, start + length/2, length/2, depth+1);
#pragma omp taskwait
}else{
merge(n, left, right, result, start, length/2, depth+1);
merge(n, left, right, result, start + length/2, length/2, depth+1);
}
}
}
void multisort(long n, T data[n], T tmp[n], int depth) {
if (n >= MIN_SORT_SIZE*4L) {
// Recursive decomposition
if(!omp_in_final()){
#pragma omp task final (depth >= CUTOFF)
multisort(n/4L, &data[0], &tmp[0], depth+1);
#pragma omp task final (depth >= CUTOFF)
multisort(n/4L, &data[n/4L], &tmp[n/4L], depth+1);
#pragma omp task final (depth >= CUTOFF)
multisort(n/4L, &data[n/2L], &tmp[n/2L], depth+1);
#pragma omp task final (depth >= CUTOFF)
multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L], depth+1);
#pragma omp taskwait
#pragma omp task final (depth >= CUTOFF)
merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L,0);
#pragma omp task final (depth >= CUTOFF)
merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L,0);
#pragma omp taskwait
#pragma omp task final (depth >= CUTOFF)
merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n,0);
#pragma omp taskwait
}else{
multisort(n/4L, &data[0], &tmp[0], depth+1);
multisort(n/4L, &data[n/4L], &tmp[n/4L], depth+1);
multisort(n/4L, &data[n/2L], &tmp[n/2L], depth+1);
multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L], depth+1);
merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L,0);
merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L,0);
merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n,0);
}
} else {
// Base case
basicsort(n, data);
}
}
static void initialize(long length, T data[length]) {
for (long i = 0; i < length; i++) {
if (i==0) {
data[i] = rand();
} else {
data[i] = ((data[i-1]+1) * i * 104723L) % N;
}
}
}
static void clear(long length, T data[length]) {
for (long i = 0; i < length; i++) {
data[i] = 0;
}
}
void check_sorted(long n, T data[n])
{
int unsorted=0;
for (int i=1; i<n; i++)
if (data[i-1] > data[i]) unsorted++;
if (unsorted > 0)
printf ("\nERROR: data is NOT properly sorted. There are %d unordered positions\n\n",unsorted);
else {
// printf ("data IS ordered; ");
}
}
int main(int argc, char **argv) {
/* Defaults for command line arguments */
N = 32768 * BLOCK_SIZE;
MIN_SORT_SIZE = 32 * BLOCK_SIZE;
MIN_MERGE_SIZE = 32 * BLOCK_SIZE;;
CUTOFF = 4;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-n")==0) {
N = atol(argv[++i]) * BLOCK_SIZE;
}
else if (strcmp(argv[i], "-s")==0) {
MIN_SORT_SIZE = atol(argv[++i]) * BLOCK_SIZE;
}
else if (strcmp(argv[i], "-m")==0) {
MIN_MERGE_SIZE = atol(argv[++i]) * BLOCK_SIZE;
}
else if (strcmp(argv[i], "-c")==0) {
CUTOFF = atoi(argv[++i]);
}
else {
fprintf(stderr, "Usage: %s [-n vector_size -s MIN_SORT_SIZE -m MIN_MERGE_SIZE]\n", argv[0]);
fprintf(stderr, " -n to specify the size of the vector (in Kelements) to sort (default 32768)\n");
fprintf(stderr, " -s to specify the size of the vector (in Kelements) that breaks recursion in the sort phase (default 32)\n");
fprintf(stderr, " -m to specify the size of the vector (in Kelements) that breaks recursion in the merge phase (default 32)\n");
fprintf(stderr, " -c to specify the cut off recursion level to stop task generation in OpenMP (default 4)\n");
return EXIT_FAILURE;
}
}
fprintf(stdout, "Arguments (Kelements): N=%ld, MIN_SORT_SIZE=%ld, MIN_MERGE_SIZE=%ld\n", N/BLOCK_SIZE, MIN_SORT_SIZE/BLOCK_SIZE, MIN_MERGE_SIZE/BLOCK_SIZE);
fprintf(stdout, " CUTOFF=%d\n", CUTOFF);
T *data = malloc(N*sizeof(T));
T *tmp = malloc(N*sizeof(T));
double stamp;
START_COUNT_TIME;
initialize(N, data);
clear(N, tmp);
STOP_COUNT_TIME("Initialization time in seconds");
START_COUNT_TIME;
#pragma omp parallel
#pragma omp single
multisort(N, data, tmp,0);
STOP_COUNT_TIME("Multisort execution time");
START_COUNT_TIME;
check_sorted (N, data);
STOP_COUNT_TIME("Check sorted data execution time");
fprintf(stdout, "Multisort program finished\n");
return 0;
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(16*t2-Nz+5,8)),t1),2*t1-2*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(8*t1+Ny+7,8)),floord(16*t2+Ny+3,8)),floord(16*t1-16*t2+Nz+Ny+5,8));t3++) {
for (t4=max(max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-19,32)),ceild(8*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(8*t1+Nx+7,32)),floord(16*t2+Nx+3,32)),floord(8*t3+Nx-5,32)),floord(16*t1-16*t2+Nz+Nx+5,32));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),2*t3),Nt-1),2*t1+3),4*t2+2),8*t4+6);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.