Search is not available for this dataset
text
string
meta
dict
/** * * @file core_dgeqp3_norms.c * * PLASMA core_blas kernel * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @version 2.6.0 * @author Mark Gates * @date 2010-11-15 * @generated d Tue Jan 7 11:44:49 2014 * **/ #include <lapacke.h> #include <math.h> #include <cblas.h> #include "common.h" #define A(m,n) BLKADDR( A, double, m, n ) /***************************************************************************//** * * @ingroup CORE_double * * CORE_dgeqp3_norms computes the 2-norm of each column of A[ ioff:m, joff:n ] * that is marked with norms2[j] == -1 on entry. Entries that are not marked * are assumed to already contain the correct 2-norm, so that the same routine * can be used for computing the initial norms and for updating bad norms. * The result is stored duplicated in norms1 and norms2. * ******************************************************************************* * * @param[in] A * PLASMA descriptor of the matrix A. * On entry, the M-by-N matrix described by the descriptor. * * @param[in] ioff * Row offset. * * @param[in] joff * Column offset. * * @param[in,out] norms1 * Vector of size A.n. * On exit, norms1[j] is 2-norm of column j, for j >= joff. * * @param[in,out] norms2 * Vector of size A.n. * On entry, if norms2[j] == -1, re-compute norm of column j. * On exit, norms2[j] is 2-norm of column j, for j >= joff. **/ #if defined(PLASMA_HAVE_WEAK) #pragma weak CORE_dgeqp3_norms = PCORE_dgeqp3_norms #define CORE_dgeqp3_norms PCORE_dgeqp3_norms #define CORE_dgessq PCORE_dgessq int CORE_dgessq(int M, int N, const double *A, int LDA, double *scale, double *sumsq); #endif void CORE_dgeqp3_norms( PLASMA_desc A, int ioff, int joff, double *norms1, double *norms2 ) { const double *Ai; int j, ii, ioff2, len, mb, nb, lda; double sumsq, scale; if ( A.nt != 1 ) { coreblas_error(1, "Illegal value of A.nt"); return; } nb = min( A.nb, A.n ); for( j = joff; j < nb; ++j ) { if ( norms2[j] == -1. ) { scale = 0.; sumsq = 1.; ioff2 = ioff; for( ii = 0; ii < A.mt; ++ii ) { mb = min( A.mb, A.m - ii*A.mb ); Ai = A(ii,0); lda = BLKLDD( A, ii ); len = mb - ioff2; CORE_dgessq( len, 1, Ai + j*lda + ioff2, lda, &scale, &sumsq ); ioff2 = 0; } norms2[j] = scale * sqrt( sumsq ); norms1[j] = norms2[j]; } } }
{ "alphanum_fraction": 0.5284132841, "avg_line_length": 29.4565217391, "ext": "c", "hexsha": "e2525a5eef5ef134d3334e8de2c66cfbb3fbd060", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "bcc99c164a256bc7df7c936b9c43afd38c12aea2", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "zhuangsc/Plasma-ompss1", "max_forks_repo_path": "core_blas/core_dgeqp3_norms.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "bcc99c164a256bc7df7c936b9c43afd38c12aea2", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "zhuangsc/Plasma-ompss1", "max_issues_repo_path": "core_blas/core_dgeqp3_norms.c", "max_line_length": 91, "max_stars_count": null, "max_stars_repo_head_hexsha": "bcc99c164a256bc7df7c936b9c43afd38c12aea2", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "zhuangsc/Plasma-ompss1", "max_stars_repo_path": "core_blas/core_dgeqp3_norms.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 814, "size": 2710 }
#pragma once #include <imageview/ImageRowView.h> #include <imageview/IsPixelFormat.h> #include <imageview/internal/ImageViewIterator.h> #include <imageview/internal/ImageViewStorage.h> #include <imageview/internal/PixelRef.h> #include <gsl/assert> #include <gsl/span> #include <cstddef> #include <type_traits> namespace imageview { // Non-owning view into a bitmap image with the specified pixel format. // // \param PixelFormat - specifies how colors are stored in the bitmap. // ContinuousImageView only supports images with fixed color depth (bits // per pixel). PixelFormat should satisfy IsPixelFormat trait, i.e. be // like // class MyPixelFormat { // public: // using color_type = MyColor; // static constexpr int kBytesPerPixel = N; // color_type read(gsl::span<const std::byte, kBytesPerPixel> pixel_data) const; // void write(const color_type& color, gsl::span<std::byte, kBytesPerPixel> pixel_data) const; // }; // \param Mutable - if true, ContinuousImageView provides write access to the // bitmap (naturally, this requires that ContinuousImageView is // constructed from a non-const pointer to the data). Otherwise, only // read-only access is provided. template <class PixelFormat, bool Mutable = false> class ContinuousImageView { public: static_assert(IsPixelFormat<PixelFormat>::value, "Not a PixelFormat."); using byte_type = std::conditional_t<Mutable, std::byte, const std::byte>; using value_type = typename PixelFormat::color_type; // If `Mutable == false`, then `reference` is an alias to `value_type`. // Otherwise, it is a proxy class, which mimics `value_type&`: // * is implicitly convertible to `value_type`. // * you can assign a value_type to it, changing the color of the referenced pixel. using reference = std::conditional_t<Mutable, detail::PixelRef<PixelFormat>, value_type>; // Constant LegacyInputIterator whose value_type is @value_type. // It is not, however, a LegacyForwardIterator, because // std::iterator_traits<iterator>::reference // is not const value_type&. // Nevertheless, this iterator supports all arithmetic operations of LegacyRandomAccessIterator, e.g., // `it += n`, ` --it`, `it[n]`. using const_iterator = detail::ImageViewIterator<PixelFormat, false>; // LegacyInputIterator whose value_type is @value_type. If Mutable == true, then it also // models LegacyOutputIterator. It is not, however, a LegacyForwardIterator, because // std::iterator_traits<iterator>::reference // is neither value_type& nor const value_type&. // Nevertheless, this iterator supports all arithmetic operations of LegacyRandomAccessIterator, e.g., // `it += n`, ` --it`, `it[n]`. using iterator = detail::ImageViewIterator<PixelFormat, Mutable>; // Construct an empty view. // This constructor is only available if PixelFormat is default-constructible. template <class Enable = std::enable_if_t<std::is_default_constructible_v<PixelFormat>>> constexpr ContinuousImageView() noexcept(noexcept(std::is_nothrow_default_constructible_v<PixelFormat>)) {} // Construct a view into an image. // This constructor is only available if PixelFormat is default-constructible // \param height - height of the image. // \param width - width of the image. // \param data - pointer to the uncompressed RGB24 bitmap data. The size of // the array should be exactly // height * width * PixelFormat::kBytesPerPixel. // Pixels are assumed to be stored contiguously, with each pixel // occupying exactly PixelFormat::kBytesPerPixel. template <class Enable = std::enable_if_t<std::is_default_constructible_v<PixelFormat>>> constexpr ContinuousImageView(unsigned int height, unsigned int width, gsl::span<byte_type> data) noexcept( std::is_nothrow_default_constructible_v<PixelFormat>); // Construct a view into an image. // \param height - height of the image. // \param width - width of the image. // \param data - pointer to the uncompressed RGB24 bitmap data. The size of // the array should be exactly // height * width * PixelFormat::kBytesPerPixel. // Pixels are assumed to be stored contiguously, with each pixel // occupying exactly PixelFormat::kBytesPerPixel. // \param pixel_format - PixelFormat instance to use. constexpr ContinuousImageView(unsigned int height, unsigned int width, gsl::span<byte_type> data, const PixelFormat& pixel_format); // Construct a view into an image. // \param height - height of the image. // \param width - width of the image. // \param data - pointer to the uncompressed RGB24 bitmap data. The size of // the array should be exactly // height * width * PixelFormat::kBytesPerPixel. // Pixels are assumed to be stored contiguously, with each pixel // occupying exactly PixelFormat::kBytesPerPixel. // \param pixel_format - PixelFormat instance to use. constexpr ContinuousImageView(unsigned int height, unsigned int width, gsl::span<byte_type> data, PixelFormat&& pixel_format); // Construct a read-only view from a mutable view. template <class Enable = std::enable_if_t<!Mutable>> constexpr ContinuousImageView(ContinuousImageView<PixelFormat, !Mutable> image); // Returns the height of the image. constexpr unsigned int height() const noexcept; // Returns the width of the image. constexpr unsigned int width() const noexcept; // Returns the total number of pixels. constexpr std::size_t area() const noexcept; // Returns true if the image has zero area, false otherwise. constexpr bool empty() const noexcept; // Returns the pixel format used by this image. constexpr const PixelFormat& pixelFormat() const noexcept; // Returns the pointer to the bitmap data. constexpr gsl::span<byte_type> data() const noexcept; // Returns the total size of the bitmap in bytes. constexpr std::size_t size_bytes() const noexcept; // Returns an iterator to the first pixel. constexpr iterator begin() const; // Returns a const iterator to the first pixel. constexpr const_iterator cbegin() const; // Returns an iterator past the last pixel. constexpr iterator end() const; // Returns a const iterator past the last pixel. constexpr const_iterator cend() const; // Access the specified pixel. // \param y - Y coordinate of the pixel. Should be within [0; height()). // \param x - X coordinate of the pixel. Should be within [0; width()). // \return the color of the specified pixel. constexpr reference operator()(unsigned int y, unsigned int x) const; // Returns a non-owning view into the specified row of the image. // \param y - 0-based index of the row. Should be within [0; height()). // \return a non-owning view into the row @y. constexpr ImageRowView<PixelFormat, Mutable> row(unsigned int y) const; private: constexpr gsl::span<byte_type, PixelFormat::kBytesPerPixel> getPixelData(unsigned int y, unsigned int x) const; detail::ImageViewStorage<PixelFormat, Mutable> storage_; unsigned int height_ = 0; unsigned int width_ = 0; }; // Convert a ContinuousImageView into an ImageRowView. // \param image - input image. // \return an ImageRowView referring to the same data as image; the number of // elements in the returned view equals image.area(). template <class PixelFormat, bool Mutable> constexpr ImageRowView<PixelFormat, Mutable> flatten(ContinuousImageView<PixelFormat, Mutable> image) { return ImageRowView<PixelFormat, Mutable>(image.data(), image.area(), image.pixelFormat()); } template <class PixelFormat, bool Mutable> template <class Enable> constexpr ContinuousImageView<PixelFormat, Mutable>::ContinuousImageView( unsigned int height, unsigned int width, gsl::span<byte_type> data) noexcept(std::is_nothrow_default_constructible_v<PixelFormat>) : storage_(data.data()), height_(height), width_(width) { Expects(data.size() == height * width * PixelFormat::kBytesPerPixel); } template <class PixelFormat, bool Mutable> constexpr ContinuousImageView<PixelFormat, Mutable>::ContinuousImageView(unsigned int height, unsigned int width, gsl::span<byte_type> data, const PixelFormat& pixel_format) : storage_(data.data(), pixel_format), height_(height), width_(width) { Expects(data.size() == height * width * PixelFormat::kBytesPerPixel); } template <class PixelFormat, bool Mutable> constexpr ContinuousImageView<PixelFormat, Mutable>::ContinuousImageView(unsigned int height, unsigned int width, gsl::span<byte_type> data, PixelFormat&& pixel_format) : storage_(data.data(), std::move(pixel_format)), height_(height), width_(width) { Expects(data.size() == height * width * PixelFormat::kBytesPerPixel); } template <class PixelFormat, bool Mutable> template <class Enable> constexpr ContinuousImageView<PixelFormat, Mutable>::ContinuousImageView( ContinuousImageView<PixelFormat, !Mutable> image) : ContinuousImageView(image.height(), image.width(), image.data(), image.pixelFormat()) {} template <class PixelFormat, bool Mutable> constexpr unsigned int ContinuousImageView<PixelFormat, Mutable>::height() const noexcept { return height_; } template <class PixelFormat, bool Mutable> constexpr unsigned int ContinuousImageView<PixelFormat, Mutable>::width() const noexcept { return width_; } template <class PixelFormat, bool Mutable> constexpr std::size_t ContinuousImageView<PixelFormat, Mutable>::area() const noexcept { return static_cast<std::size_t>(height_) * width_; } template <class PixelFormat, bool Mutable> constexpr bool ContinuousImageView<PixelFormat, Mutable>::empty() const noexcept { return height_ == 0 || width_ == 0; } template <class PixelFormat, bool Mutable> constexpr const PixelFormat& ContinuousImageView<PixelFormat, Mutable>::pixelFormat() const noexcept { return storage_.pixelFormat(); } template <class PixelFormat, bool Mutable> constexpr auto ContinuousImageView<PixelFormat, Mutable>::data() const noexcept -> gsl::span<byte_type> { return gsl::span<byte_type>(storage_.data(), size_bytes()); } template <class PixelFormat, bool Mutable> constexpr std::size_t ContinuousImageView<PixelFormat, Mutable>::size_bytes() const noexcept { return area() * PixelFormat::kBytesPerPixel; } template <class PixelFormat, bool Mutable> constexpr auto ContinuousImageView<PixelFormat, Mutable>::begin() const -> iterator { return iterator(storage_.data(), pixelFormat()); } template <class PixelFormat, bool Mutable> constexpr auto ContinuousImageView<PixelFormat, Mutable>::cbegin() const -> const_iterator { return const_iterator(storage_.data(), pixelFormat()); } template <class PixelFormat, bool Mutable> constexpr auto ContinuousImageView<PixelFormat, Mutable>::end() const -> iterator { return iterator(storage_.data() + size_bytes(), pixelFormat()); } template <class PixelFormat, bool Mutable> constexpr auto ContinuousImageView<PixelFormat, Mutable>::cend() const -> const_iterator { return const_iterator(storage_.data() + size_bytes(), pixelFormat()); } template <class PixelFormat, bool Mutable> constexpr auto ContinuousImageView<PixelFormat, Mutable>::getPixelData(unsigned int y, unsigned int x) const -> gsl::span<byte_type, PixelFormat::kBytesPerPixel> { Expects(y < height_); Expects(x < width_); const std::size_t offset = (y * width_ + x) * PixelFormat::kBytesPerPixel; return gsl::span<byte_type, PixelFormat::kBytesPerPixel>(storage_.data() + offset, PixelFormat::kBytesPerPixel); } template <class PixelFormat, bool Mutable> constexpr auto ContinuousImageView<PixelFormat, Mutable>::operator()(unsigned int y, unsigned int x) const -> reference { const gsl::span<byte_type, PixelFormat::kBytesPerPixel> pixel_data = getPixelData(y, x); if constexpr (Mutable) { return detail::PixelRef<PixelFormat>(pixel_data, storage_.pixelFormat()); } else { return storage_.pixelFormat().read(pixel_data); } } template <class PixelFormat, bool Mutable> constexpr ImageRowView<PixelFormat, Mutable> ContinuousImageView<PixelFormat, Mutable>::row(unsigned int y) const { Expects(y < height_); const std::size_t bytes_per_row = static_cast<std::size_t>(width_) * PixelFormat::kBytesPerPixel; const gsl::span<byte_type> row_data(storage_.data() + y * bytes_per_row, bytes_per_row); return ImageRowView<PixelFormat, Mutable>(row_data, width_, pixelFormat()); } } // namespace imageview
{ "alphanum_fraction": 0.719879051, "avg_line_length": 45.5759717314, "ext": "h", "hexsha": "bb8432b865371bd2fcb368739beff863303110fa", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "817e92ac1dcbffc7fb0ebb11afe4ee9836f37df0", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "alexanderbelous/imageview", "max_forks_repo_path": "include/imageview/ContinuousImageView.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "817e92ac1dcbffc7fb0ebb11afe4ee9836f37df0", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "alexanderbelous/imageview", "max_issues_repo_path": "include/imageview/ContinuousImageView.h", "max_line_length": 115, "max_stars_count": null, "max_stars_repo_head_hexsha": "817e92ac1dcbffc7fb0ebb11afe4ee9836f37df0", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "alexanderbelous/imageview", "max_stars_repo_path": "include/imageview/ContinuousImageView.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2811, "size": 12898 }
#pragma once #include <cstddef> #include <cstdint> #include <functional> #include <initializer_list> #include <memory> #include <numeric> #include <set> #include <utility> #include <vector> #include <absl/container/flat_hash_map.h> #include <gsl/gsl> #include "chainerx/array.h" #include "chainerx/array_body.h" #include "chainerx/array_node.h" #include "chainerx/constant.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/graph.h" #include "chainerx/macro.h" #include "chainerx/op_node.h" #include "chainerx/shape.h" namespace chainerx { namespace backward_builder_detail { // This class is used by the BackwardBuilder to record retained inputs and outputs. // The records are used to create outer graph edges (between op nodes and previous array nodes) when the builder is finalized. class RetentionRecord { public: explicit RetentionRecord(size_t size) : size_{size} { CHAINERX_ASSERT(size_ > 0); } size_t size() const { return size_; } void Record(size_t index) { if (flags_.empty()) { flags_.resize(size_); } gsl::at(flags_, index) = static_cast<int8_t>(true); } bool IsAnyRecorded() const { return !flags_.empty(); } bool IsRecorded(size_t index) const { return static_cast<bool>(flags_[index]); } private: size_t size_{}; std::vector<int8_t> flags_{}; // binary flags }; template <typename Tag> class RetainedArrayToken { public: RetainedArrayToken(internal::ArrayBody::Params array_params, size_t index) : array_params_{std::move(array_params)}, index_{index} {} ~RetainedArrayToken() = default; RetainedArrayToken(const RetainedArrayToken&) = default; RetainedArrayToken(RetainedArrayToken&&) noexcept = default; RetainedArrayToken& operator=(const RetainedArrayToken&) = default; // TODO(hvy): Make the move assignment operator noexcept. RetainedArrayToken& operator=(RetainedArrayToken&&) = default; // NOLINT(performance-noexcept-move-constructor) private: friend class chainerx::BackwardContext; // Returns the array index. size_t index() const { return index_; } const internal::ArrayBody::Params& array_params() const { return array_params_; } internal::ArrayBody::Params array_params_; size_t index_; }; } // namespace backward_builder_detail // An object used by op implementations to bridge between BackwardBuilder::RetainInput() and BackwardContext::GetRetainedInput(). // // See BackwardBuilder::RetainInput() for details. using RetainedInputToken = backward_builder_detail::RetainedArrayToken<struct InputTag>; // An object used by op implementations to bridge between BackwardBuilder::RetainOutput() and BackwardContext::GetRetainedOutput(). // // See BackwardBuilder::RetainOutput() for details. using RetainedOutputToken = backward_builder_detail::RetainedArrayToken<struct OutputTag>; // A class that is used to define backward operations and connect the graph. // // This class is not thread safe. class BackwardBuilder { public: // Target is responsible to define edges from OpNode to input ArrayNodes with given BackwardFunction. // Note that Targets built from the same BackwardBuilder share some properties not to compute again. class Target { public: explicit operator bool() const { return is_definition_required(); } // Defines a backward function with respect to specified input arrays (target). void Define(const BackwardFunction& backward_func); bool is_definition_required() const { return !graph_to_input_array_nodes_.empty(); } private: friend class BackwardBuilder; // Only BackwardBuilder can create Target using InputArrayNodes = std::vector<const std::shared_ptr<internal::ArrayNode>*>; Target(BackwardBuilder& builder, std::vector<size_t> input_indices); // Collect input ArrayNodes, grouped by graph considering IsBackpropRequired. // This functions is only called once in the constructor. absl::flat_hash_map<BackpropId, InputArrayNodes> CreateInputArrayNodesMap() const; BackwardBuilder& builder_; std::vector<size_t> input_indices_; // TODO(hvy): Consider using linear search since elements are usually few. absl::flat_hash_map<BackpropId, InputArrayNodes> graph_to_input_array_nodes_; }; // TODO(niboshi): Add an overload to accept `const std::vector<Array>&` as `inputs` and `outputs` // Note that simply overloading with the above type will results in ambiguous calls. // One solution is to define a type that accepts all of the expected types of inputs. BackwardBuilder(const char* op_name, std::vector<ConstArrayRef> inputs, std::vector<ConstArrayRef> outputs); BackwardBuilder(const char* op_name, const Array& input, std::vector<ConstArrayRef> outputs) : BackwardBuilder{op_name, std::vector<ConstArrayRef>{input}, std::move(outputs)} {} BackwardBuilder(const char* op_name, std::vector<ConstArrayRef> inputs, const Array& output) : BackwardBuilder{op_name, std::move(inputs), std::vector<ConstArrayRef>{output}} {} BackwardBuilder(const char* op_name, const Array& input, const Array& output) : BackwardBuilder{op_name, std::vector<ConstArrayRef>{input}, std::vector<ConstArrayRef>{output}} {} ~BackwardBuilder() { CHAINERX_ASSERT(is_finalized_); } BackwardBuilder(const BackwardBuilder&) = delete; BackwardBuilder(BackwardBuilder&&) noexcept = default; BackwardBuilder& operator=(const BackwardBuilder&) = delete; BackwardBuilder& operator=(BackwardBuilder&&) = delete; // Creates a backward target for the specified inputs. Target CreateTarget(std::vector<size_t> input_indices) { // input_indices shouldn't have duplicates. CHAINERX_ASSERT((std::set<size_t>{input_indices.begin(), input_indices.end()}.size() == input_indices.size())); for (size_t input_index : input_indices) { CHAINERX_ASSERT(input_index < inputs_target_created_.size()); CHAINERX_ASSERT(!inputs_target_created_[input_index]); inputs_target_created_[input_index] = true; } return Target{*this, std::move(input_indices)}; } // Creates a backward target for the specified input. Target CreateTarget(size_t input_index) { return CreateTarget(std::vector<size_t>{input_index}); } // Creates a backward target for all the inputs. Target CreateTarget() { std::vector<size_t> input_indices; input_indices.resize(inputs_.size()); std::iota(input_indices.begin(), input_indices.end(), size_t{0}); return CreateTarget(std::move(input_indices)); } // TODO(hvy): Write comment. RetainedInputToken RetainInput(size_t input_index); std::vector<RetainedInputToken> RetainInput(std::vector<size_t> indices); // Flags an output array to be retained for use in the backward pass. // Op implementations can use this function in combination with BackwardContext::GetRetainedOutput() to retrieve output arrays in the // backward pass. // // If an op implementation requires the output array of the forward pass in the backward pass, it should call // BackwardBuilder::RetainOutput() in the forward pass and keep its return value (either assign a variable or capture by // value in a lambda expression). In the backward pass, it should call BackwardContext::GetRetainedOutput() with this token to retrieve // the output array. // // Capturing the output array directly with lambda expression would cause cyclic reference and therefore would lead to memory leak. // // Reusing the token for higher-order backward functions results in undefined behavior. // // `output` must be one of the arrays specified in the constructor of BackwardBuilder as output arrays. // If invalid array is specified, ChainerxError will be thrown. RetainedOutputToken RetainOutput(size_t output_index); std::vector<RetainedOutputToken> RetainOutput(std::vector<size_t> indices); // Finalizes the builder. // // This functions must be called when targets have been created for all inputs. void Finalize(); private: // Create an op node for a specific graph. // Edges from output nodes to the op node are connected. std::shared_ptr<internal::OpNode>& FindOrCreateOpNode(const BackpropId& backprop_id); // Add shared ptrs between op nodes and array nodes belonging to outer graphs. // This functions is called once when the builder is finalized. // These references are required to restore retained inputs/outputs. void AddEdgesFromOpNodeToArrayNodeOfOuterGraphsForRetention(); void ConnectBackpropIds(); const char* op_name_; Context& context_; // Input arrays of the op. std::vector<ConstArrayRef> inputs_; // Flags indicating whether CreateTarget has been called for each of the input arrays. // All of these flags must be true after all the backwards have been defined for a BackwardBuilder. // This can be checked by calling is_complete(); std::vector<bool> inputs_target_created_; // Output arrays of the op. std::vector<ConstArrayRef> outputs_; // A collection of op nodes, each of which corresponds to a graph. // This record is increasingly populated as new graphs are encountered in multiple Define() calls. absl::flat_hash_map<BackpropId, std::shared_ptr<internal::OpNode>> op_node_map_; backward_builder_detail::RetentionRecord input_retention_record_; backward_builder_detail::RetentionRecord output_retention_record_; bool has_any_applicable_outputs_; bool is_finalized_{false}; }; } // namespace chainerx
{ "alphanum_fraction": 0.7310769231, "avg_line_length": 41.4893617021, "ext": "h", "hexsha": "aafa29e403c7ae51b50050d55f36c4094b8a3aca", "lang": "C", "max_forks_count": 1150, "max_forks_repo_forks_event_max_datetime": "2022-03-29T02:29:32.000Z", "max_forks_repo_forks_event_min_datetime": "2017-06-02T03:39:46.000Z", "max_forks_repo_head_hexsha": "e9da1423255c58c37be9733f51b158aa9b39dc93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "zjzh/chainer", "max_forks_repo_path": "chainerx_cc/chainerx/backward_builder.h", "max_issues_count": 5998, "max_issues_repo_head_hexsha": "e9da1423255c58c37be9733f51b158aa9b39dc93", "max_issues_repo_issues_event_max_datetime": "2022-03-08T01:42:44.000Z", "max_issues_repo_issues_event_min_datetime": "2017-06-01T06:40:17.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "zjzh/chainer", "max_issues_repo_path": "chainerx_cc/chainerx/backward_builder.h", "max_line_length": 139, "max_stars_count": 3705, "max_stars_repo_head_hexsha": "e9da1423255c58c37be9733f51b158aa9b39dc93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "zjzh/chainer", "max_stars_repo_path": "chainerx_cc/chainerx/backward_builder.h", "max_stars_repo_stars_event_max_datetime": "2022-03-30T10:46:15.000Z", "max_stars_repo_stars_event_min_datetime": "2017-06-01T07:36:12.000Z", "num_tokens": 2202, "size": 9750 }
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <gsl/gsl> #include "core/optimizer/graph_transformer.h" #include "core/optimizer/rule_based_graph_transformer.h" #include "core/optimizer/rewrite_rule.h" namespace onnxruntime { struct FreeDimensionOverride; namespace transformer_utils { /** Generates all predefined rules for this level. If rules_to_enable is not empty, it returns the intersection of predefined rules and rules_to_enable. TODO: This is visible for testing at the moment, but we should rather make it private. */ std::vector<std::unique_ptr<RewriteRule>> GenerateRewriteRules(TransformerLevel level, const std::vector<std::string>& rules_to_enable = {}); /** Generates all predefined (both rule-based and non-rule-based) transformers for this level. If transformers_and_rules_to_enable is not empty, it returns the intersection between the predefined transformers/rules and the transformers_and_rules_to_enable. */ std::vector<std::unique_ptr<GraphTransformer>> GenerateTransformers(TransformerLevel level, gsl::span<const FreeDimensionOverride> free_dimension_overrides, const std::vector<std::string>& rules_and_transformers_to_enable = {}); /** Given a TransformerLevel, this method generates a name for the rule-based graph transformer of that level. */ std::string GenerateRuleBasedTransformerName(TransformerLevel level); } // namespace transformer_utils } // namespace onnxruntime
{ "alphanum_fraction": 0.7063679245, "avg_line_length": 48.4571428571, "ext": "h", "hexsha": "d9a0fb369b48ed148e6bc6eac2c3e95ba9b7175f", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-09-09T06:55:51.000Z", "max_forks_repo_forks_event_min_datetime": "2020-09-09T06:55:51.000Z", "max_forks_repo_head_hexsha": "0e799a03f2a99da6a1b87a2cd37facb420c482aa", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ajinkya933/onnxruntime", "max_forks_repo_path": "include/onnxruntime/core/optimizer/graph_transformer_utils.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "0e799a03f2a99da6a1b87a2cd37facb420c482aa", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ajinkya933/onnxruntime", "max_issues_repo_path": "include/onnxruntime/core/optimizer/graph_transformer_utils.h", "max_line_length": 139, "max_stars_count": 1, "max_stars_repo_head_hexsha": "a36810471b346ec862ac6e4de7f877653f49525e", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "csteegz/onnxruntime", "max_stars_repo_path": "include/onnxruntime/core/optimizer/graph_transformer_utils.h", "max_stars_repo_stars_event_max_datetime": "2020-07-12T15:23:49.000Z", "max_stars_repo_stars_event_min_datetime": "2020-07-12T15:23:49.000Z", "num_tokens": 310, "size": 1696 }
#ifdef HAVE_LIBGSL /* interface_gsl.h * Easel's interfaces to the GNU Scientific Library * * SRE, Tue Jul 13 15:36:48 2004 * SVN $Id: interface_gsl.h 664 2011-02-27 17:08:36Z eddys $ * SVN $URL: https://svn.janelia.org/eddylab/eddys/easel/branches/hmmer/3.1/interface_gsl.h $ */ #ifndef eslINTERFACE_GSL_INCLUDED #define eslINTERFACE_GSL_INCLUDED #include <stdlib.h> #include <easel/easel.h> #include <easel/dmatrix.h> #include <gsl/gsl_math.h> #include <gsl/gsl_blas.h> #include <gsl/gsl_permutation.h> #include <gsl/gsl_eigen.h> extern int esl_GSL_MatrixInversion(ESL_DMATRIX *A, ESL_DMATRIX **ret_Ai); #endif /*eslINTERFACE_GSL_INCLUDED*/ #endif /*HAVE_LIBGSL*/
{ "alphanum_fraction": 0.7529585799, "avg_line_length": 27.04, "ext": "h", "hexsha": "ac090d60bbedbeacc26c8b3a0d99eef961c642be", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "6c1beb53922471218386b7ed24e72fb093fe457c", "max_forks_repo_licenses": [ "Linux-OpenIB" ], "max_forks_repo_name": "YJY-98/PROSAVA", "max_forks_repo_path": "Linux/easel/interface_gsl.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "6c1beb53922471218386b7ed24e72fb093fe457c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Linux-OpenIB" ], "max_issues_repo_name": "YJY-98/PROSAVA", "max_issues_repo_path": "Linux/easel/interface_gsl.h", "max_line_length": 93, "max_stars_count": null, "max_stars_repo_head_hexsha": "6c1beb53922471218386b7ed24e72fb093fe457c", "max_stars_repo_licenses": [ "Linux-OpenIB" ], "max_stars_repo_name": "YJY-98/PROSAVA", "max_stars_repo_path": "Linux/easel/interface_gsl.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 222, "size": 676 }
#ifndef _EM_CTX_H_ #define _EM_CTX_H_ 1 #include "em_defs.h" #include <petsc.h> #include <set> #include <map> #include <memory> #include <vector> class Mesh; struct EMContext { MPI_Comm world_comm, group_comm; int world_size, world_rank, group_size, group_rank, group_id; std::vector<Point> rx; std::vector<double> tx; std::vector<double> freqs; Eigen::VectorXcd rsp, obs, obserr; Eigen::VectorXi otype, fidx, tidx, ridx; Point top_corners[4]; Eigen::VectorXd ztop[4], lsig[4]; std::shared_ptr<Mesh> original_mesh, mesh; std::vector<int> relevant_edges; std::pair<int, int> local_vertices, local_edges; std::set<int> bdr_cells; int aniso_form; Eigen::MatrixXd rho; Eigen::VectorXd lb, ub; std::vector<std::string> rho_name; Vec w; Mat C, M, A, B; KSP A_ksp, B_ksp; PETScBlockVector s, csem_e, dual_e; std::map<int, PETScBlockVector> mt_e; PetscViewer LS_log; Eigen::VectorXd csem_error; std::map<int, Eigen::VectorXd> mt_error; Mat G; std::vector<PetscReal> v_coords; PetscBool use_ams; char iprefix[256], oprefix[256]; PetscReal max_rx_edge_length, refine_fraction, e_rtol, dual_rtol; PetscInt max_adaptive_refinements, max_dofs, refine_strategy, n_groups, K_max_it, pc_threshold, inner_pc_type, direct_solver_type; PetscClassId EMCTX_ID; PetscLogEvent CreateLS, AssembleMat, AssembleRHS, SetupAMS, CreatePC, SolveLS, EstimateError, RefineMesh, CalculateRSP; }; PetscErrorCode create_context(EMContext *); PetscErrorCode destroy_context(EMContext *); PetscErrorCode process_options(EMContext *); #endif
{ "alphanum_fraction": 0.7387218045, "avg_line_length": 22.8, "ext": "h", "hexsha": "9a61faf6faf8a7be30817dde506feb95f29e61d3", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9129e28610d7fcb83a88021528575dfeaadad502", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "emfem/emfem", "max_forks_repo_path": "src/em_ctx.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "9129e28610d7fcb83a88021528575dfeaadad502", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "emfem/emfem", "max_issues_repo_path": "src/em_ctx.h", "max_line_length": 132, "max_stars_count": 1, "max_stars_repo_head_hexsha": "9129e28610d7fcb83a88021528575dfeaadad502", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "emfem/emfem", "max_stars_repo_path": "src/em_ctx.h", "max_stars_repo_stars_event_max_datetime": "2021-08-03T12:22:37.000Z", "max_stars_repo_stars_event_min_datetime": "2021-08-03T12:22:37.000Z", "num_tokens": 472, "size": 1596 }
// Copyright Jean Pierre Cimalando 2019. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE.md or copy at // http://www.boost.org/LICENSE_1_0.txt) #pragma once #include <gsl/gsl> #include <string> std::string get_home_directory(); std::string get_current_directory(); bool is_path_absolute(gsl::cstring_span path); bool is_path_separator(char32_t character); void append_path_separator(std::string &path); std::string normalize_path_separators(gsl::cstring_span path); std::string make_path_canonical(gsl::cstring_span path); std::string expand_path_tilde(gsl::cstring_span path); gsl::cstring_span path_file_name(gsl::cstring_span path); gsl::cstring_span path_directory(gsl::cstring_span path); std::string get_display_path(gsl::cstring_span path); #if defined(_WIN32) std::string known_folder_path(int csidl, std::error_code &ec); std::string known_folder_path(int csidl); #endif
{ "alphanum_fraction": 0.777544596, "avg_line_length": 35.2962962963, "ext": "h", "hexsha": "8e95320a7bdfd47d96c5d58f1d3e5d29a5b729e1", "lang": "C", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-11-22T08:05:13.000Z", "max_forks_repo_forks_event_min_datetime": "2020-07-10T18:48:10.000Z", "max_forks_repo_head_hexsha": "9978db5c9fa8b6eebe558eee212a2c0ed5c9e1bb", "max_forks_repo_licenses": [ "BSL-1.0" ], "max_forks_repo_name": "jpcima/smf-dsp", "max_forks_repo_path": "sources/utility/paths.h", "max_issues_count": 19, "max_issues_repo_head_hexsha": "9978db5c9fa8b6eebe558eee212a2c0ed5c9e1bb", "max_issues_repo_issues_event_max_datetime": "2022-01-16T20:44:07.000Z", "max_issues_repo_issues_event_min_datetime": "2020-07-05T23:59:33.000Z", "max_issues_repo_licenses": [ "BSL-1.0" ], "max_issues_repo_name": "jpcima/smf-dsp", "max_issues_repo_path": "sources/utility/paths.h", "max_line_length": 62, "max_stars_count": 22, "max_stars_repo_head_hexsha": "9978db5c9fa8b6eebe558eee212a2c0ed5c9e1bb", "max_stars_repo_licenses": [ "BSL-1.0" ], "max_stars_repo_name": "jpcima/smf-dsp", "max_stars_repo_path": "sources/utility/paths.h", "max_stars_repo_stars_event_max_datetime": "2022-03-26T23:08:17.000Z", "max_stars_repo_stars_event_min_datetime": "2020-07-08T15:23:44.000Z", "num_tokens": 230, "size": 953 }
#pragma once #include "BoundingVolume.h" #include "Library.h" #include "Tile.h" #include "TileContext.h" #include "TileID.h" #include "TileRefine.h" #include "TilesetOptions.h" #include <CesiumAsync/AsyncSystem.h> #include <CesiumAsync/IAssetAccessor.h> #include <CesiumAsync/IAssetRequest.h> #include <gsl/span> #include <spdlog/fwd.h> #include <cstddef> #include <memory> namespace Cesium3DTilesSelection { /** * @brief The information that is passed to a {@link TileContentLoader} to * create a {@link TileContentLoadResult}. * * For many types of tile content, only the `pRequest` field is required. The * other members are used for content that can generate child tiles, like * external tilesets or composite tiles. These members are usually initialized * from * the corresponding members of the {@link Tile} that the content belongs to. */ struct CESIUM3DTILESSELECTION_API TileContentLoadInput { /** * @brief Creates a new, uninitialized instance for the given tile. * * The `asyncSystem`, `pLogger`, `pAssetAccessor` and `pRequest` will have * default values, and have to be initialized before this instance is passed * to one of the loader functions. * * @param tile The {@link Tile} that the content belongs to */ TileContentLoadInput(const Tile& tile); /** * @brief Creates a new instance * * @param asyncSystem The async system to use for tile content loading. * @param pLogger The logger that will be used * @param pAssetAccessor The asset accessor to make further requests with. * @param pRequest The original tile request and its response. * @param tile The {@link Tile} that the content belongs to. */ TileContentLoadInput( const CesiumAsync::AsyncSystem& asyncSystem, const std::shared_ptr<spdlog::logger>& pLogger, const std::shared_ptr<CesiumAsync::IAssetAccessor>& pAssetAccessor, const std::shared_ptr<CesiumAsync::IAssetRequest>& pRequest, const Tile& tile); /** * @brief Creates a new instance. * * @param asyncSystem The async system to use for tile content loading. * @param pLogger The logger that will be used * @param pAssetAccessor The asset accessor to make further requests with. * @param pRequest The original tile request and its response. * @param tileID The {@link TileID} * @param tileBoundingVolume The tile {@link BoundingVolume} * @param tileContentBoundingVolume The tile content {@link BoundingVolume} * @param tileRefine The {@link TileRefine} strategy * @param tileGeometricError The geometric error of the tile * @param tileTransform The tile transform */ TileContentLoadInput( const CesiumAsync::AsyncSystem& asyncSystem, const std::shared_ptr<spdlog::logger>& pLogger, const std::shared_ptr<CesiumAsync::IAssetAccessor>& pAssetAccessor, const std::shared_ptr<CesiumAsync::IAssetRequest>& pRequest, const TileID& tileID, const BoundingVolume& tileBoundingVolume, const std::optional<BoundingVolume>& tileContentBoundingVolume, TileRefine tileRefine, double tileGeometricError, const glm::dmat4& tileTransform, const TilesetContentOptions& contentOptions); /** * @brief The async system to use for tile content loading. */ CesiumAsync::AsyncSystem asyncSystem; /** * @brief The logger that receives details of loading errors and warnings. */ std::shared_ptr<spdlog::logger> pLogger; /** * @brief The asset accessor to make further requests with. */ std::shared_ptr<CesiumAsync::IAssetAccessor> pAssetAccessor; /** * @brief The asset request and response data for the tile. */ std::shared_ptr<CesiumAsync::IAssetRequest> pRequest; /** * @brief The {@link TileID}. */ TileID tileID; /** * @brief The tile {@link BoundingVolume}. */ BoundingVolume tileBoundingVolume; /** * @brief Tile content {@link BoundingVolume}. */ std::optional<BoundingVolume> tileContentBoundingVolume; /** * @brief The {@link TileRefine}. */ TileRefine tileRefine; /** * @brief The geometric error. */ double tileGeometricError; /** * @brief The tile transform */ glm::dmat4 tileTransform; /** * @brief Options for parsing content and creating Gltf models. */ TilesetContentOptions contentOptions; }; } // namespace Cesium3DTilesSelection
{ "alphanum_fraction": 0.7136021873, "avg_line_length": 30.4791666667, "ext": "h", "hexsha": "b4f59680b7bfc2e2168e5145b6f3d8cbb13343c3", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1d9912307336c833b74b7e9b7bc715d0a4e6c7ec", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "JiangMuWen/cesium-native", "max_forks_repo_path": "Cesium3DTilesSelection/include/Cesium3DTilesSelection/TileContentLoadInput.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "1d9912307336c833b74b7e9b7bc715d0a4e6c7ec", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "JiangMuWen/cesium-native", "max_issues_repo_path": "Cesium3DTilesSelection/include/Cesium3DTilesSelection/TileContentLoadInput.h", "max_line_length": 78, "max_stars_count": 2, "max_stars_repo_head_hexsha": "1d9912307336c833b74b7e9b7bc715d0a4e6c7ec", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "JiangMuWen/cesium-native", "max_stars_repo_path": "Cesium3DTilesSelection/include/Cesium3DTilesSelection/TileContentLoadInput.h", "max_stars_repo_stars_event_max_datetime": "2021-10-02T17:45:15.000Z", "max_stars_repo_stars_event_min_datetime": "2021-10-02T17:45:12.000Z", "num_tokens": 1078, "size": 4389 }
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <virginian.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> int main(int argc, char **argv) { if(argc != 3) { fprintf(stderr, "%s <database name> <rows>\n", argv[0]); exit(1); } unlink(argv[1]); virginian virg; virginian *v = &virg; virg_init(v); virg_db_create(v, argv[1]); virg_table_create(v, "test", VIRG_INT); virg_table_addcolumn(v, 0, "uniformi", VIRG_INT); virg_table_addcolumn(v, 0, "normali5", VIRG_INT); virg_table_addcolumn(v, 0, "normali20", VIRG_INT); virg_table_addcolumn(v, 0, "uniformf", VIRG_FLOAT); virg_table_addcolumn(v, 0, "normalf5", VIRG_FLOAT); virg_table_addcolumn(v, 0, "normalf20", VIRG_FLOAT); int i; void *buff = malloc(4 * 6); int *buff_i = (int*)buff; float *buff_f = (float*)buff; const gsl_rng_type *type; gsl_rng *ran; type = gsl_rng_ranlxd2; ran = gsl_rng_alloc(type); gsl_rng_set(ran, time(0)); for(i = 0; i < atoi(argv[2]); i++) { buff_i[0] = (int)gsl_ran_flat(ran, -100, 100); buff_i[1] = (int)gsl_ran_gaussian(ran, 5); buff_i[2] = (int)gsl_ran_gaussian(ran, 20); buff_f[3] = (float)gsl_ran_flat(ran, -100, 100); buff_f[4] = (float)gsl_ran_gaussian(ran, 5); buff_f[5] = (float)gsl_ran_gaussian(ran, 20); virg_table_insert(v, 0, (char*)&i, buff, NULL); if(i % 10000 == 0) { printf("%i,", i); fflush(stdout); } } printf("\n"); free(buff); virg_db_close(v); virg_close(v); gsl_rng_free(ran); return 0; }
{ "alphanum_fraction": 0.6554508748, "avg_line_length": 22.5151515152, "ext": "c", "hexsha": "5323dc6968add59c11e0c8c36b64d1ba7cc4d5e1", "lang": "C", "max_forks_count": 30, "max_forks_repo_forks_event_max_datetime": "2021-03-30T23:53:15.000Z", "max_forks_repo_forks_event_min_datetime": "2015-02-01T15:12:21.000Z", "max_forks_repo_head_hexsha": "f5d84e6693f04cc4a0094707ce64da0b40ed7ca4", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "krunalpuri/virginian", "max_forks_repo_path": "db/generate.c", "max_issues_count": 3, "max_issues_repo_head_hexsha": "f5d84e6693f04cc4a0094707ce64da0b40ed7ca4", "max_issues_repo_issues_event_max_datetime": "2017-05-09T10:24:03.000Z", "max_issues_repo_issues_event_min_datetime": "2015-01-29T22:11:01.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "krunalpuri/virginian", "max_issues_repo_path": "db/generate.c", "max_line_length": 58, "max_stars_count": 77, "max_stars_repo_head_hexsha": "61b3578f7169c9f17e989938f93867c61a02908f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "bakks/virginian", "max_stars_repo_path": "db/generate.c", "max_stars_repo_stars_event_max_datetime": "2020-12-24T22:20:56.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-18T00:45:00.000Z", "num_tokens": 566, "size": 1486 }
#ifndef __INTEGRATOR_H #define __INTEGRATOR_H #include "autoparams.h" #include <gsl/gsl_integration.h> #include <gsl/gsl_errno.h> #include <stack> namespace integrator { double infty = 1000; class WorkspaceHandle; class Workspaces { public: static const size_t WORKSPACE_SIZE = 1000000; static const size_t NUM_INITIAL_WORKSPACES = 4; static WorkspaceHandle get_workspace_handle(); virtual ~Workspaces() { while (!workspace_stack_.empty()) { gsl_integration_workspace_free(workspace_stack_.top()); workspace_stack_.pop(); } } private: Workspaces(): // private constructor workspace_stack_() { for (int i = 0; i < NUM_INITIAL_WORKSPACES; i++) workspace_stack_.push( gsl_integration_workspace_alloc(WORKSPACE_SIZE)); } Workspaces(Workspaces const& copy); // not impl Workspaces& operator=(Workspaces const& copy); // not impl std::stack<gsl_integration_workspace*> workspace_stack_; }; class WorkspaceHandle { public: WorkspaceHandle(std::stack<gsl_integration_workspace*>* workspaces): workspaces_(workspaces), ws_(0) { if (!workspaces->empty()) { ws_ = workspaces_->top(); workspaces_->pop(); } else ws_ = gsl_integration_workspace_alloc(Workspaces::WORKSPACE_SIZE); } virtual ~WorkspaceHandle() { workspaces_->push(ws_); } gsl_integration_workspace* get() { return ws_; } private: std::stack<gsl_integration_workspace*>* workspaces_; gsl_integration_workspace* ws_; }; WorkspaceHandle Workspaces::get_workspace_handle() { static Workspaces workspaces; return WorkspaceHandle(&workspaces.workspace_stack_); } struct WrapperInfo { double l; double r; double ts; gsl_function fu; }; double wrapper_func(const double x, const void* p) { const WrapperInfo* wi = static_cast<const WrapperInfo*>(p); const double l = wi->l; const double r = wi->r; const double ts = wi->ts; return (r-l)/2 * 1./atan(ts) * 1./(1.+x*x) * wi->fu.function((r-l)/2*atan(x)/atan(ts)+(l+r)/2, wi->fu.params); } typedef double (*func_with_nonconst_args)(double, void*); double integrate(double (*func)(const double, const void*), const double a, const double b, const double eps, const void* const p=0) { WorkspaceHandle w = Workspaces::get_workspace_handle(); double result = 0; double error = 0; gsl_function fu = {(func_with_nonconst_args)func, const_cast<void*>(p)}; gsl_integration_qags(&fu, a, b, params::integ_epsabs, eps, Workspaces::WORKSPACE_SIZE, w.get(), &result, &error); return result; } double integrate_inf(double (*func)(double, const void*), const double eps, const void* const p=0) { WorkspaceHandle w = Workspaces::get_workspace_handle(); double result = 0; double error = 0; gsl_function fu = {(func_with_nonconst_args)func, const_cast<void*>(p)}; gsl_integration_qagi(&fu, params::integ_epsabs, eps, Workspaces::WORKSPACE_SIZE, w.get(), &result, &error); return result; } double integrate_infu(double (*func)(double, const void*), const double a, const double eps, const void* const p=0) { WorkspaceHandle w = Workspaces::get_workspace_handle(); double result = 0; double error = 0; gsl_function fu = {(func_with_nonconst_args)func, const_cast<void*>(p)}; gsl_integration_qagiu(&fu, a, params::integ_epsabs, eps, Workspaces::WORKSPACE_SIZE, w.get(), &result, &error); return result; } double integrate_infl(double (*func)(double, const void*), const double b, const double eps, const void* const p=0) { WorkspaceHandle w = Workspaces::get_workspace_handle(); double result = 0; double error = 0; gsl_function fu = {(func_with_nonconst_args)func, const_cast<void*>(p)}; gsl_integration_qagil(&fu, b, params::integ_epsabs, eps, Workspaces::WORKSPACE_SIZE, w.get(), &result, &error); return result; } double edge_emph_integrate(double (*func)(double, const void*), const double l, const double r, const double ts, const double a, const double b, const double eps, const void* const p=0) { WorkspaceHandle w = Workspaces::get_workspace_handle(); double result = 0; double error = 0; gsl_function fu = {(func_with_nonconst_args)func, const_cast<void*>(p)}; WrapperInfo wi = {l, r, ts, fu}; gsl_function wra_fu = {(func_with_nonconst_args)wrapper_func, &wi}; gsl_integration_qags(&wra_fu, tan(2./(r-l)*(a-(l+r)/2)*atan(ts)), tan(2./(r-l)*(b-(l+r)/2)*atan(ts)), params::integ_epsabs, eps, Workspaces::WORKSPACE_SIZE, //GSL_INTEG_GAUSS21, w.get(), &result, &error); return result; } }; #endif // __INTEGRATOR_H
{ "alphanum_fraction": 0.6491651579, "avg_line_length": 28.4057142857, "ext": "h", "hexsha": "5178ac2a2c6408a18fb5baf4298dd148b2c89530", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8f641f73bcac2700b476663fe656fcad7d63470d", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "ModelDBRepository/228604", "max_forks_repo_path": "simulation/integrator.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "8f641f73bcac2700b476663fe656fcad7d63470d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "ModelDBRepository/228604", "max_issues_repo_path": "simulation/integrator.h", "max_line_length": 91, "max_stars_count": null, "max_stars_repo_head_hexsha": "8f641f73bcac2700b476663fe656fcad7d63470d", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "ModelDBRepository/228604", "max_stars_repo_path": "simulation/integrator.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1266, "size": 4971 }
/*-------------------------------------------------------------------- * $Id$ * * This file is part of libRadtran. * Copyright (c) 1997-2012 by Arve Kylling, Bernhard Mayer, * Claudia Emde, Robert Buras * * ######### Contact info: http://www.libradtran.org ######### * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. *--------------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> #if HAVE_NETCDF4 #include <netcdf.h> #endif #include "uvspec.h" #include "cdisort.h" #include "ckdfu.h" #include "ascii.h" #if HAVE_NETCDF4 #include "netCDF_functions.h" #endif #include "numeric.h" #include "solver.h" #include "errors.h" #if HAVE_LIBGSL #include <gsl/gsl_math.h> #include <gsl/gsl_diff.h> #endif #ifndef PI #define PI 3.14159265358979323846264338327 #endif #define EPSILON 1E-6 #define s2day 3600.0 * 24.0 /* seconds to day */ #define mW2W 1.E-3; /* mW to W */ #define ERRCODE 2 #define ERR(e) \ { \ printf ("Error: %s\n", nc_strerror (e)); \ exit (ERRCODE); \ status++; \ } /* prototypes of internal functions */ static int cnvlv (double* x_spec, float* y_spec, int n_spec, double* x_slit, double* y_slit, int n_slit, int std); static float radiance2bt (float rad, float* wvnmlo, float* wvnmhi, float* filter, int n, int processing, int ivi); static int output2bt (input_struct input, output_struct* output, int iv, int is_3d); static int read_photon_file (char* filename, float* lambda_r, int nlamdba_r, float** fraction); static int select_wavelength_indices (float* lambda, int nlambda, float* lambda_lower, float* lambda_upper, int start_index, int end_index, int quiet, int raman, int* lower, int* upper); static int set_raman_wl_grid (wl_inp_struct* wl_inp, wl_out_struct* wl_out, char* filename, int quiet); static int find_raman_closest_wl_in_solar_file (float* wls, float* wle, char* filename, int quiet); static int set_transmittance_wl_grid (wl_inp_struct* wl_inp, wl_out_struct* wl_out, int quiet); static int set_transmittance_wl_grid_lowtran (wl_inp_struct* wl_inp, wl_out_struct* wl_out, int quiet); static int set_transmittance_wl_grid_reptran (input_struct input, float** lambda_lower, float** lambda_upper, wl_out_struct* wl_out); static int set_rte_wl_grid_reptran (input_struct input, output_struct* output); static int write_spectral3D (input_struct input, output_struct* output); static int spec2rgb (input_struct input, output_struct* output); static int spec2rgb3D (input_struct input, output_struct* output); static int raman_spec2spec (input_struct input, output_struct* output); static inline int float_equal (float a, float b); double linpol (double x1, double x2, double y1, double y2, double x); double nm_to_inv_cm (double wavelength_nm); double inv_cm_to_nm (double wavenumber_inv_cm); double polarizability_anisotropy_N2 (double nu); double polarizability_anisotropy_O2 (double nu); int pfraction_reptran (wl_out_struct* wl_out); /********************************************/ /* Setup the transmittance wavelength grid. */ /********************************************/ int setup_wlgrid (input_struct input, output_struct* output) { int iv = 0, status = 0, monochromatic = 0; float *lambda_lower = NULL, *lambda_upper = NULL; float test_lower = 0, test_upper = 0; char function_name[] = "setup_wlgrid"; char file_name[] = "ancillary.c"; /* Check whether representative wavelengths will be used. */ if (input.ck_scheme == CK_REPTRAN || input.ck_scheme == CK_REPTRAN_CHANNEL) output->wl.use_reptran = 1; else output->wl.use_reptran = 0; /* in case of RGB conversion we need an internal wavelength grid */ if (input.processing == PROCESS_RGB || input.processing == PROCESS_RGBNORM) { /* reset wavelength range */ input.wl.start = -999; input.wl.end = -999; switch (input.source) { case SRC_SOLAR: case SRC_BLITZ: /* BCA */ case SRC_LIDAR: /* BCA */ /* and replace transmittance grid */ strcpy (input.filename[FN_WLTRANS], input.filename[FN_PATH]); strcat (input.filename[FN_WLTRANS], "solar_flux/rgb"); if (!input.quiet) { fprintf (stderr, " ... ignoring user-defined wavelength range and using\n"); fprintf (stderr, " ... internal wavelength grid from %s\n", input.filename[FN_WLTRANS]); } break; case SRC_THERMAL: /* and replace thermal bands file */ strcpy (input.filename[FN_WLBANDS], input.filename[FN_PATH]); strcat (input.filename[FN_WLBANDS], "solar_flux/rgb_bands"); if (!input.quiet) { fprintf (stderr, " ... ignoring user-defined wavelength range and using\n"); fprintf (stderr, " ... internal thermal bands from %s\n", input.filename[FN_WLBANDS]); } break; default: fprintf (stderr, "Error, unsupported source %d in %s (%s)\n", input.source, function_name, file_name); return -1; } } /* set default unit for band width */ output->bandwidth_unit = input.bandwidth_unit; if (output->bandwidth_unit == UNIT_NOT_DEFINED) { switch (input.source) { case SRC_SOLAR: case SRC_BLITZ: /* BCA */ case SRC_LIDAR: /* BCA */ output->bandwidth_unit = UNIT_PER_NM; break; case SRC_THERMAL: output->bandwidth_unit = UNIT_PER_CM_1; break; default: fprintf (stderr, "Error, unsupported source %d in %s (%s)\n", input.source, function_name, file_name); return -1; } } if (input.raman) { /* Internally the lower and upper wavelengths for Raman scattering are different from */ /* what the user specifies. This is setup in setup_wlgrid (ancillary.c). */ /* For Raman scattering only include wavelengths that the user asked. */ /* Internally we have to include more wavelengths to account for */ /* Raman scattered radiation */ /* The values 196.8269 and 194.3015 are the maximum shifts (in cm-1) for N2 and O2 */ /* as given in report: Accounting for Raman Scattering in DOAS, J.F. de Haan, */ /* SN-OMIE-KNMI-409, Version 1.0, May 18, 2003. See also functions: crs_raman_N2 and */ /* crs_raman_O2. */ output->wl.delta_wvl_raman_extra = 1.0; output->wl.delta_wvl_raman_lower = input.wl.start - inv_cm_to_nm (nm_to_inv_cm (input.wl.start) + 196.8269); input.wl.start = inv_cm_to_nm (nm_to_inv_cm (input.wl.start) + 196.8269) - output->wl.delta_wvl_raman_extra; output->wl.delta_wvl_raman_upper = inv_cm_to_nm (nm_to_inv_cm (input.wl.end) - 194.3015) - input.wl.end; input.wl.end = inv_cm_to_nm (nm_to_inv_cm (input.wl.end) - 194.3015) + output->wl.delta_wvl_raman_extra; status = find_raman_closest_wl_in_solar_file (&input.wl.start, &input.wl.end, input.filename[FN_EXTRATERRESTRIAL], input.quiet); } if (input.wl.start > 0 && input.wl.end > 0) { output->wl.start = input.wl.start; output->wl.end = input.wl.end; } output->wl.type = WLGRID_NONE; if (strlen (input.filename[FN_FILTERFUNCTION]) > 0 && input.ck_scheme == CK_REPTRAN_CHANNEL) fprintf (stderr, "Error: Combining options 'mol_abs_param reptran_channel' and 'filter_function_file' is not allowed."); if (output->wl.use_reptran || input.ck_scheme == CK_CRS || input.ck_scheme == CK_LOWTRAN || input.ck_scheme == CK_RAMAN) { /* no real correlated-k */ if (strlen (input.filename[FN_WLBANDS]) > 0 && input.source == SRC_THERMAL) { /* thermal_bands_file */ output->wl.type = WLGRID_BANDS; output->wl.ignore_solar_file = 1; /* ignore solar file if FN_WLBANDS is used */ if (!input.quiet) fprintf (stderr, " ... reading thermal_bands_file from %s\n", input.filename[FN_WLBANDS]); /* read center wavelength and band limits [wavenumbers] from file */ status = read_3c_file_float (input.filename[FN_WLBANDS], &(output->wl.lambda_t), &(lambda_lower), &(lambda_upper), &output->wl.nlambda_t); if (status != 0) { fprintf (stderr, "Error %d opening %s\n", status, input.filename[FN_WLBANDS]); return status; } } else if (strlen (input.filename[FN_WLTRANS]) > 0) { /* transmittance_wl_file */ output->wl.type = WLGRID_USER; /* read internal wavelength grid from transmittance file */ status = read_1c_file_float (input.filename[FN_WLTRANS], &(output->wl.lambda_t), &output->wl.nlambda_t); if (status != 0) { fprintf (stderr, "Error %d opening %s\n", status, input.filename[FN_WLTRANS]); return status; } if (output->wl.use_reptran) { status = set_transmittance_wl_grid_reptran (input, &lambda_lower, &lambda_upper, &output->wl); if (status) return fct_err_out (status, "set_transmittance_wl_grid_reptran", ERROR_POSITION); } } else if (strlen (input.filename[FN_MOL_TAU_ABS]) > 0) { /* moltau_file */ output->wl.type = WLGRID_MOLABS; if (!input.quiet) { fprintf (stderr, " ... molecular_tau_file specified but computational wavelength grid\n"); fprintf (stderr, " ... not explicitely defined; reading the wavelength grid\n"); fprintf (stderr, " ... from molecular_tau_file %s\n", input.filename[FN_MOL_TAU_ABS]); } status = read_molecular_absorption_lambda (input.filename[FN_MOL_TAU_ABS], input.quiet, &output->wl.lambda_t, &output->wl.nlambda_t, &monochromatic); if (status != 0) { fprintf (stderr, "Error %d reading wavelength grid from %s\n", status, input.filename[FN_MOL_TAU_ABS]); return -1; } } if (output->wl.type == WLGRID_MOLABS && monochromatic == 1) { output->wl.lambda_t[0] = input.wl.start; } else if (output->wl.type == WLGRID_NONE || (output->wl.type == WLGRID_MOLABS && monochromatic == 0 && input.ck_scheme == CK_RAMAN)) { output->wl.type = WLGRID_UVSPEC; if (output->wl.use_reptran) { /* transmittance wavelength grid consists of band centers */ status = set_transmittance_wl_grid_reptran (input, &lambda_lower, &lambda_upper, &output->wl); if (status) return fct_err_out (status, "set_transmittance_wl_grid_reptran", ERROR_POSITION); } else if (input.ck_scheme == CK_CRS) { /* set up a reasonable wavelength grid for the radiative transfer calculation */ status = set_transmittance_wl_grid (&input.wl, &output->wl, input.quiet); if (status) return fct_err_out (status, "set_transmittance_wl_grid", ERROR_POSITION); } else if (input.ck_scheme == CK_RAMAN) { /* Set the internal radiative transfer grid equal to the grid specified in */ /* the extraterrestrial spectrum, because we use the absolute value of */ /* the solar source in all calculations. */ status = set_raman_wl_grid (&input.wl, &output->wl, input.filename[FN_EXTRATERRESTRIAL], input.quiet); /* For Raman scattering only include wavelengths that the user asked. */ /* Internally we have to include more wavelengths because we need */ /* these cross sections to account for Raman scattered radiation. */ test_lower = output->wl.lambda_t[0] + output->wl.delta_wvl_raman_lower + output->wl.delta_wvl_raman_extra; test_upper = output->wl.lambda_t[output->wl.nlambda_t - 1] - output->wl.delta_wvl_raman_upper - output->wl.delta_wvl_raman_extra; for (iv = 0; iv < output->wl.nlambda_t; iv++) { if (output->wl.lambda_t[iv] < test_lower) output->wl.raman_start_id = iv + 1; if (output->wl.lambda_t[output->wl.nlambda_t - 1 - iv] > test_upper) output->wl.raman_end_id = output->wl.nlambda_t - iv - 2; } } else status = set_transmittance_wl_grid_lowtran (&input.wl, &output->wl, input.quiet); if (status != 0) { fprintf (stderr, "Error %d setting up wavelength grid\n", status); return status; } /* here we need to calculate the full wavelength range */ output->wl.nlambda_rte_lower = 0; output->wl.nlambda_rte_upper = output->wl.nlambda_t - 1; } } else { /* correlated-k */ output->wl.type = WLGRID_CK; /* read information about wavelength grid and quadrature points */ switch (input.ck_scheme) { case CK_KATO: case CK_KATO2: case CK_KATO2_96: case CK_KATO2ANDWANDJI: /* read Kato et al. [1999] tables */ status = kato_readtables (input.ck_scheme, &(output->ck), input.filename[FN_PATH], input.rte.mc.filename[FN_MC_PHOTONS]); if (status != 0) { fprintf (stderr, "Error %d returned by kato_readtables() in %s (%s)\n", status, function_name, file_name); return status; } break; case CK_FU: /* read Fu and Liou [1992/93] tables */ status = fu_readtables (&(output->ck), input.filename[FN_PATH], input.rte.mc.filename[FN_MC_PHOTONS]); if (status != 0) { fprintf (stderr, "Error %d returned by fu_readtables() in %s (%s)\n", status, function_name, file_name); return status; } break; case CK_AVHRR_KRATZ: /* read Kratz [1999] tables */ status = avhrr_kratz_readtables (&(output->ck), input.filename[FN_PATH], input.rte.mc.filename[FN_MC_PHOTONS]); if (status != 0) { fprintf (stderr, "Error %d returned by avhrr_kratz_readtables() in %s (%s)\n", status, function_name, file_name); return status; } break; case CK_FILE: /* read generic tables in CDF format */ status = ck_generic_readtables (&(output->ck), input.ck_scheme_filename, input.rte.mc.filename[FN_MC_PHOTONS]); if (status != 0) { fprintf (stderr, "Error %d returned by ck_generic_readtables() in %s (%s)\n", status, function_name, file_name); return status; } break; default: fprintf (stderr, "Error: unsupported correlated-k scheme\n"); return -1; } /* copy center wavelengths to transmittance grid */ /* and set wavenumber intervals */ output->wl.nlambda_t = output->ck.n_wvl; output->wl.lambda_t = (float*)calloc (output->wl.nlambda_t, sizeof (float)); for (iv = 0; iv < output->wl.nlambda_t; iv++) { output->wl.lambda_t[iv] = output->ck.wvlc[iv + 1]; } } /* end correlated-k */ /* setup array containing the band limits */ output->wl.wvnmlo_t = (float*)calloc (output->wl.nlambda_t, sizeof (float)); output->wl.wvnmhi_t = (float*)calloc (output->wl.nlambda_t, sizeof (float)); if (output->wl.type == WLGRID_CK) { for (iv = 0; iv < output->wl.nlambda_t; iv++) { output->wl.wvnmlo_t[iv] = output->ck.wvnlo[iv + 1]; output->wl.wvnmhi_t[iv] = output->ck.wvnhi[iv + 1]; } } else if (output->wl.type == WLGRID_BANDS && input.source == SRC_THERMAL) { /* band limits from thermal_bands_file */ for (iv = 0; iv < output->wl.nlambda_t; iv++) { output->wl.wvnmlo_t[iv] = 1.0E7 / lambda_upper[iv]; output->wl.wvnmhi_t[iv] = 1.0E7 / lambda_lower[iv]; } } else { /* the default bandwidth is 1cm-1 to get the emittance per cm-1; */ /* input.bandwidth can be set with thermal_bandwidth */ if (output->bandwidth_unit == UNIT_PER_CM_1) { for (iv = 0; iv < output->wl.nlambda_t; iv++) { output->wl.wvnmlo_t[iv] = 1.0E7 / output->wl.lambda_t[iv] - input.bandwidth / 2.0; output->wl.wvnmhi_t[iv] = output->wl.wvnmlo_t[iv] + input.bandwidth; } } else if (output->bandwidth_unit == UNIT_PER_NM) { for (iv = 0; iv < output->wl.nlambda_t; iv++) { output->wl.wvnmlo_t[iv] = 1.0E7 / (output->wl.lambda_t[iv] + input.bandwidth / 2.0); output->wl.wvnmhi_t[iv] = 1.0E7 / (output->wl.lambda_t[iv] - input.bandwidth / 2.0); } } else { fprintf (stderr, "Error, unsupported bandwidth_unit %d in %s (%s)\n", output->bandwidth_unit, function_name, file_name); return -1; } } /* free temporary wavelengths arrays */ if (lambda_lower != NULL) free (lambda_lower); if (lambda_upper != NULL) free (lambda_upper); if (output->wl.type == WLGRID_CK || output->wl.type == WLGRID_BANDS || output->wl.type == WLGRID_USER || (output->wl.type == WLGRID_MOLABS && monochromatic == 0)) { /* if start and end wavelength have not been set, use entire wavelength range */ if (input.wl.start < 0 || input.wl.end < 0) { output->wl.start = output->wl.lambda_t[0]; output->wl.end = output->wl.lambda_t[output->wl.nlambda_t - 1]; if (!input.quiet) fprintf (stderr, " ... setting wavelength range to %f - %fnm\n", output->wl.start, output->wl.end); } /* select wavelength range */ status = select_wavelength_indices (output->wl.lambda_t, output->wl.nlambda_t, &(output->wl.start), &(output->wl.end), input.wl.start_index, input.wl.end_index, input.quiet, input.raman, &(output->wl.nlambda_rte_lower), &(output->wl.nlambda_rte_upper)); if (status != 0) { fprintf (stderr, "Error %d at no correlated-k in setup_wlgrid (ancillary.c) \n", status); return status; } /* check if the end wavelength is larger than 850nm in which case */ /* we require user-selected molecular absorption properties */ /* if (output->wl.lambda_t[output->wl.nlambda_rte_upper]>850 && (input.ck_scheme==CK_CRS && strlen(input.filename[FN_MOL_TAU_ABS])==0)) { fprintf (stderr, "Error, you want to do a spectral calculation for wavelengths larger than 850 nm. While uvspec\n"); fprintf (stderr, " treats ozone absorption correctly, molecular absorption is NOT considered in monochromatic\n"); fprintf (stderr, " uvspec calculations, as absorption cross-section are highly variable with wavelength.\n"); fprintf (stderr, " To consider molecular absorption other than ozone you have two choices \n"); fprintf (stderr, " with uvspec:\n"); fprintf (stderr, " (1) Do a line-by-line calculation using 'molecular_tau_file' to specify\n"); fprintf (stderr, " the wavelength-dependent absorption profile; to calculate the\n"); fprintf (stderr, " latter, you need something like David Edwards' genln2.\n"); fprintf (stderr, " ATTENTION: line-by-line calculations are very time-consuming!\n"); fprintf (stderr, " (2) Use the correlated-k approximation which is the most accurate\n"); fprintf (stderr, " solution after the line-by-line calculation; use either the\n"); fprintf (stderr, " pre-defined parameterization that come with libRadtran or provide\n"); fprintf (stderr, " your own; both options are selected with 'mol_abs_param ...'\n"); fprintf (stderr, "\n"); return -1; } */ } /* now setup the ck structure for the LOWTRAN/SBDART table */ if (input.ck_scheme == CK_LOWTRAN) { /* read LOWTRAN/SBDART tables */ status = sbdart_readtables (&(output->ck), output->wl.nlambda_t, input.filename[FN_PATH], input.rte.mc.filename[FN_MC_PHOTONS], input.quiet); if (status != 0) { fprintf (stderr, "Error %d returned by sbdart_readtables()\n", status); return status; } } if (output->wl.type != WLGRID_CK && output->wl.use_reptran == 0) { /* check if the internal grid covers the required wavelength range */ if (output->wl.lambda_t[0] > output->wl.start || output->wl.lambda_t[output->wl.nlambda_t - 1] < output->wl.end) { fprintf (stderr, "Error, internal wavelength grid (%f - %f nm)\n", output->wl.lambda_t[0], output->wl.lambda_t[output->wl.nlambda_t - 1]); fprintf (stderr, "does not cover the user-defined wavelength range (%f - %f nm)\n", output->wl.start, output->wl.end); return -1; } } /* inform the user about wavelength selection */ if (!input.quiet) { if (output->wl.nlambda_rte_upper != output->wl.nlambda_t - 1 || output->wl.nlambda_rte_lower != 0) { if (fabs (input.wl.start - NOT_DEFINED_FLOAT) > EPSILON) fprintf (stderr, " user wavelength range: %f - %f nm\n", input.wl.start, input.wl.end); fprintf (stderr, " selected wavelength indices %d - %d\n", output->wl.nlambda_rte_lower, output->wl.nlambda_rte_upper); fprintf (stderr, " wavelength bands boundaries: %f - %f nm\n", output->wl.lambda_t[output->wl.nlambda_rte_lower], output->wl.lambda_t[output->wl.nlambda_rte_upper]); } } return 0; } /**********************************************************************/ /* Setup the wavelength grid for the radiative transfer calculations. */ /**********************************************************************/ int setup_rte_wlgrid (input_struct input, output_struct* output) { int i, iv, status; if (output->wl.use_reptran) { /* checking compatibility of representative wavelengths with selected wavelength grid */ if (output->wl.type == WLGRID_BANDS) return err_out ("Error: Combining representative wavelengths and thermal_bands_file is not allowed.\n", -1); if (output->wl.type == WLGRID_MOLABS) return err_out ("Error: Combining representative wavelengths and molecular_tau_file is not allowed.\n", -1); if (output->wl.type == WLGRID_UVSPEC || output->wl.type == WLGRID_USER) { status = set_rte_wl_grid_reptran (input, output); if (status) return fct_err_out (status, "set_rte_wl_grid_reptran", ERROR_POSITION); output->wl.nlambda_rte_lower = 0; output->wl.nlambda_rte_upper = output->wl.nlambda_r - 1; if (input.verbose) { fprintf (stderr, " transmittance wavelength | radiative transfer wavelength | weight\n"); for (iv = 0; iv < output->wl.nlambda_t; iv++) for (i = 0; i < output->wl.nlambda_in_reptran_band[output->wl.reptran_band_t[iv]]; i++) fprintf (stderr, " %12.6f nm | %12.6f nm | %f \n", output->wl.lambda_t[iv], output->wl.lambda_r[output->wl.reptran_band[output->wl.reptran_band_t[iv]][i]], output->wl.weight_reptran_band[output->wl.reptran_band_t[iv]][i]); } } else return err_out ("Error: Uncompatible wavelength grid type.\n", -1); } else { /* If representative wavelengths are not used, the wavelength grids for */ /* radiative transfer and transmission are identical */ output->wl.nlambda_r = output->wl.nlambda_t; output->wl.lambda_r = output->wl.lambda_t; output->wl.wvnmlo_r = output->wl.wvnmlo_t; output->wl.wvnmhi_r = output->wl.wvnmhi_t; } /* need to read the photons file */ if (output->wl.use_reptran || input.ck_scheme == CK_CRS || input.ck_scheme == CK_RAMAN) { if (strlen (input.rte.mc.filename[FN_MC_PHOTONS]) > 0) { status = read_photon_file (input.rte.mc.filename[FN_MC_PHOTONS], output->wl.lambda_r, output->wl.nlambda_r, &(output->wl.pfraction)); if (status != 0) { fprintf (stderr, "Error %d reading photon file name %s\n", status, input.rte.mc.filename[FN_MC_PHOTONS]); return status; } } else { output->wl.pfraction = calloc (output->wl.nlambda_r, sizeof (float)); if (output->wl.use_reptran) status = pfraction_reptran (&(output->wl)); else for (iv = output->wl.nlambda_rte_lower; iv <= output->wl.nlambda_rte_upper; iv++) output->wl.pfraction[iv] = 1.0 / (float)(output->wl.nlambda_rte_upper - output->wl.nlambda_rte_lower + 1); } } if (input.rte.mc.spectral_is) { if (!input.quiet) fprintf (stderr, " ... using %d wavelengths for spectral importance sampling \n", output->wl.nlambda_r); output->mc.alis.nlambda_abs = output->wl.nlambda_r; output->mc.alis.lambda = calloc (output->mc.alis.nlambda_abs, sizeof (float)); for (iv = 0; iv < output->mc.alis.nlambda_abs; iv++) { output->mc.alis.lambda[iv] = output->wl.lambda_r[iv]; } /* Initialization for number of concentrations */ output->mc.alis.Nc = 1; } if (input.rte.mc.concentration_is) output->mc.alis.nlambda_abs = 1; return 0; } double linpol (double x1, double x2, double y1, double y2, double x) { /* Linearly interpolate between two points to get wanted y value for x. */ double y; double a = 0, b = 0; if (x2 > 0 && x1 > 0 && fabs (x2 - x1) < 0.0000001) { y = y1; } else { a = (y2 - y1) / (x2 - x1); b = y1 - a * x1; y = a * x + b; } return y; } /***********************************************************************/ /* Interpolate old_y (on the old_x grid) to new_y (on the new_x grid), */ /* either with */ /* natural cubic splines (linear=0), */ /* linear (linear=1), or */ /* logarithmic (linear=2) */ /* interpolation methods. */ /* The new_y must already be allocated with the right size (n_new_x)!! */ /* If the input is sorted in descending order, */ /* 'descend' needs to be set to 1. */ /***********************************************************************/ int arb_wvn (int n_old_x, float* old_x, float* old_y, int n_new_x, float* new_x, float* new_y, int linear, int descend) { int i = 0, j = 0, status = 0; double *a0 = NULL, *a1 = NULL, *a2 = NULL, *a3 = NULL; double *x = NULL, *y = NULL; double* tmp_y = NULL; double ynew = 0; double tst1 = 0, tst2 = 0; double sum = 0; x = (double*)calloc (n_old_x, sizeof (double)); y = (double*)calloc (n_old_x, sizeof (double)); if (!descend) { for (i = 0; i < n_old_x; i++) { x[i] = (double)old_x[i]; y[i] = (double)old_y[i]; } } else { for (i = 0; i < n_old_x; i++) { x[i] = (double)old_x[n_old_x - 1 - i]; y[i] = (double)old_y[n_old_x - 1 - i]; } } /* check if the array is now sorted in ascending order */ for (i = 0; i < n_old_x - 1; i++) if (x[i] >= x[i + 1]) { fprintf (stderr, "Error, x not sorted in ascending order (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); fprintf (stderr, "x[%d] = %f, x[%d] = %f\n", i, x[i], i + 1, x[i + 1]); return -1; } /* check that range of x_new is <= range of x_old (if not checked -> segfault) */ /* check minimum */ for (i = 0; i < n_new_x; i++) if (new_x[i] < x[0]) { fprintf (stderr, "Error, x_new[%d] = %f < min(x_old) = %f, arb_wvn() (in ancillary.c) \n", i, new_x[i], x[0]); return -2; } /* check maximum */ for (i = 0; i < n_new_x; i++) if (new_x[i] > x[n_old_x - 1]) { fprintf (stderr, "Error, x_new[%d] = %f > max(x_old) = %f, arb_wvn() (in ancillary.c) \n", i, new_x[i], x[n_old_x - 1]); return -3; } /* special check for log_spline interpolation */ if (linear == 4) { sum = 0.0; for (i = 0; i < n_old_x; i++) { sum += y[i]; if (y[i] < 0.0) { fprintf (stderr, "Error, cannot use log_spline interpolation for profile with negative values\n"); return -4; } } if (sum == 0.0) { /* x is zero in each layer, but grid must be changed anyway */ free (x); free (y); for (i = 0; i < n_new_x; i++) new_y[i] = 0.0; return 0; } else { for (i = 0; i < n_old_x; i++) { if (y[i] > 0.0) y[i] = log (y[i]); else y[i] = -10.E+99; /* this is a little bit cheating, but it works */ } } } switch (linear) { case 0: /* spline */ case 4: /* log spline */ status = spline_coeffc (x, y, n_old_x, &a0, &a1, &a2, &a3); if (status != 0) { fprintf (stderr, "arb_wvn: sorry cannot do spline interpolation\n"); fprintf (stderr, "spline_coeffc() returned status %d\n", status); return status; } break; case 1: /* linear */ status = linear_coeffc (x, y, n_old_x, &a0, &a1, &a2, &a3); if (status != 0) { fprintf (stderr, "arb_wvn: sorry cannot do linear interpolation\n"); fprintf (stderr, "linear_coeffc() returned status %d\n", status); return status; } break; case 2: /* log */ tmp_y = (double*)calloc (n_new_x, sizeof (double)); for (i = 0; i < n_new_x; i++) { j = 0; while (new_x[i] > x[j]) j++; if (j > 0) j--; /* logarithmic interpolation (if reasonable) */ tst1 = fabs (y[j + 1] - y[j]); tst2 = (y[j + 1] < y[j] ? y[j + 1] : y[j]); if (tst1 <= 0.001 * y[j] || tst2 <= 0) /* linear */ tmp_y[i] = y[j] + (new_x[i] - x[j]) / (x[j + 1] - x[j]) * (y[j + 1] - y[j]); else /* logarithmic */ tmp_y[i] = exp (log (y[j]) + (new_x[i] - x[j]) / (x[j + 1] - x[j]) * (log (y[j + 1]) - log (y[j]))); } /* first interpolate, then copy because then source */ /* and target may be one and the same */ for (i = 0; i < n_new_x; i++) new_y[i] = (float)tmp_y[i]; free (tmp_y); break; case 3: /* linear mixing ratio */ fprintf (stderr, "Error, linear mixing interpolation not possible with arb_wvn.\n"); fprintf (stderr, "Please use interpolate_density instead. \n"); return -5; break; default: fprintf (stderr, "Error, unknown interpolation type %d\n", linear); return -6; } switch (linear) { case 0: /* spline */ case 1: /* linear */ case 4: /* log spline */ for (i = 0; i < n_new_x; i++) { if (linear == 1) status = calc_linear_value ((double)new_x[i], &ynew, x, n_old_x, a0, a1); else status = calc_splined_value ((double)new_x[i], &ynew, x, n_old_x, a0, a1, a2, a3); /* this check is new in 0.99-alpha-6; so far, no error */ /* was reported if a value could not be interpolated */ if (status != 0) { fprintf (stderr, "Error %d returned by calc_splined_value (%g)\n", status, new_x[i]); fprintf (stderr, " %d data points, x[0] = %g, x[%d] = %g\n", n_old_x, x[0], n_old_x - 1, x[n_old_x - 1]); return status; } new_y[i] = (float)ynew; } free (a0); free (a1); free (a2); free (a3); break; case 2: /* no need to do anything because interpolation has already been done above */ break; case 3: /* linear mixing ratio */ fprintf (stderr, "Error, linear mixing interpolation not possible with arb_wvn.\n"); fprintf (stderr, "Please use interpolate_density instead. \n"); return -1; break; default: fprintf (stderr, "Error, unknown interpolation type %d\n", linear); return -7; } if (linear == 4) for (i = 0; i < n_new_x; i++) new_y[i] = exp (new_y[i]); free (x); free (y); return 0; } int arb_wvn_double (int n_old_x, double* old_x, double* old_y, int n_new_x, double* new_x, double* new_y, int linear, int descend) { int i = 0, j = 0, status = 0; double *a0 = NULL, *a1 = NULL, *a2 = NULL, *a3 = NULL; double *x = NULL, *y = NULL; double* tmp_y = NULL; double ynew = 0; double tst1 = 0, tst2 = 0; double sum = 0; x = (double*)calloc (n_old_x, sizeof (double)); y = (double*)calloc (n_old_x, sizeof (double)); if (!descend) { for (i = 0; i < n_old_x; i++) { x[i] = (double)old_x[i]; y[i] = (double)old_y[i]; } } else { for (i = 0; i < n_old_x; i++) { x[i] = (double)old_x[n_old_x - 1 - i]; y[i] = (double)old_y[n_old_x - 1 - i]; } } /* check if the array is now sorted in ascending order */ for (i = 0; i < n_old_x - 1; i++) if (x[i] >= x[i + 1]) { fprintf (stderr, "Error, x not sorted in ascending order (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); fprintf (stderr, "x[%d] = %f, x[%d] = %f\n", i, x[i], i + 1, x[i + 1]); return -1; } /* check that range of x_new is <= range of x_old (if not checked -> segfault) */ /* check minimum */ for (i = 0; i < n_new_x; i++) if (new_x[i] < x[0]) { fprintf (stderr, "Error, x_new[%d] = %f < min(x_old) = %f, arb_wvn() (in ancillary.c) \n", i, new_x[i], x[0]); return -2; } /* check maximum */ for (i = 0; i < n_new_x; i++) if (new_x[i] > x[n_old_x - 1]) { fprintf (stderr, "Error, x_new[%d] = %f > max(x_old) = %f, arb_wvn() (in ancillary.c) \n", i, new_x[i], x[n_old_x - 1]); return -3; } /* special check for log_spline interpolation */ if (linear == 4) { sum = 0.0; for (i = 0; i < n_old_x; i++) { sum += y[i]; if (y[i] < 0.0) { fprintf (stderr, "Error, cannot use log_spline interpolation for profile with negative values\n"); return -4; } } if (sum == 0.0) { /* x is zero in each layer, but grid must be changed anyway */ free (x); free (y); for (i = 0; i < n_new_x; i++) new_y[i] = 0.0; return 0; } else { for (i = 0; i < n_old_x; i++) { if (y[i] > 0.0) y[i] = log (y[i]); else y[i] = -10.E+99; /* this is a little bit cheating, but it works */ } } } switch (linear) { case 0: /* spline */ case 4: /* log spline */ status = spline_coeffc (x, y, n_old_x, &a0, &a1, &a2, &a3); if (status != 0) { fprintf (stderr, "arb_wvn: sorry cannot do spline interpolation\n"); fprintf (stderr, "spline_coeffc() returned status %d\n", status); return status; } break; case 1: /* linear */ status = linear_coeffc (x, y, n_old_x, &a0, &a1, &a2, &a3); if (status != 0) { fprintf (stderr, "arb_wvn: sorry cannot do linear interpolation\n"); fprintf (stderr, "linear_coeffc() returned status %d\n", status); return status; } break; case 2: /* log */ tmp_y = (double*)calloc (n_new_x, sizeof (double)); for (i = 0; i < n_new_x; i++) { j = 0; while (new_x[i] > x[j]) j++; if (j > 0) j--; /* logarithmic interpolation (if reasonable) */ tst1 = fabs (y[j + 1] - y[j]); tst2 = (y[j + 1] < y[j] ? y[j + 1] : y[j]); if (tst1 <= 0.001 * y[j] || tst2 <= 0) /* linear */ tmp_y[i] = y[j] + (new_x[i] - x[j]) / (x[j + 1] - x[j]) * (y[j + 1] - y[j]); else /* logarithmic */ tmp_y[i] = exp (log (y[j]) + (new_x[i] - x[j]) / (x[j + 1] - x[j]) * (log (y[j + 1]) - log (y[j]))); } /* first interpolate, then copy because then source */ /* and target may be one and the same */ for (i = 0; i < n_new_x; i++) new_y[i] = (float)tmp_y[i]; free (tmp_y); break; case 3: /* linear mixing ratio */ fprintf (stderr, "Error, linear mixing interpolation not possible with arb_wvn.\n"); fprintf (stderr, "Please use interpolate_density instead. \n"); return -5; break; default: fprintf (stderr, "Error, unknown interpolation type %d\n", linear); return -6; } switch (linear) { case 0: /* spline */ case 1: /* linear */ case 4: /* log spline */ for (i = 0; i < n_new_x; i++) { if (linear == 1) status = calc_linear_value ((double)new_x[i], &ynew, x, n_old_x, a0, a1); else status = calc_splined_value ((double)new_x[i], &ynew, x, n_old_x, a0, a1, a2, a3); /* this check is new in 0.99-alpha-6; so far, no error */ /* was reported if a value could not be interpolated */ if (status != 0) { fprintf (stderr, "Error %d returned by calc_splined_value (%g)\n", status, new_x[i]); fprintf (stderr, " %d data points, x[0] = %g, x[%d] = %g\n", n_old_x, x[0], n_old_x - 1, x[n_old_x - 1]); return status; } new_y[i] = (float)ynew; } free (a0); free (a1); free (a2); free (a3); break; case 2: /* no need to do anything because interpolation has already been done above */ break; case 3: /* linear mixing ratio */ fprintf (stderr, "Error, linear mixing interpolation not possible with arb_wvn.\n"); fprintf (stderr, "Please use interpolate_density instead. \n"); return -1; break; default: fprintf (stderr, "Error, unknown interpolation type %d\n", linear); return -7; } if (linear == 4) for (i = 0; i < n_new_x; i++) new_y[i] = exp (new_y[i]); free (x); free (y); return 0; } /***************************************************************/ /* Interpolate from one wavelength grid to another, either */ /* with natural cubic splines (linear=0) or linear (linear=1); */ /* if the input is sorted in descending order, descend needs */ /* to be set to 1. In contrast to arb_wvn(), values that */ /* cannot be interpolated will be set to 0. */ /***************************************************************/ int arb_wvn_zero (int n_old_x, float* old_x, float* old_y, int n_new_x, float* new_x, float* new_y, int linear, int descend) { int i = 0, status = 0; double *a0 = NULL, *a1 = NULL, *a2 = NULL, *a3 = NULL; double *x = NULL, *y = NULL; double ynew = 0; x = (double*)calloc (n_old_x, sizeof (double)); y = (double*)calloc (n_old_x, sizeof (double)); if (!descend) { for (i = 0; i < n_old_x; i++) { x[i] = (double)old_x[i]; y[i] = (double)old_y[i]; } } else { for (i = 0; i < n_old_x; i++) { x[i] = (double)old_x[n_old_x - 1 - i]; y[i] = (double)old_y[n_old_x - 1 - i]; } } switch (linear) { case 0: status = spline_coeffc (x, y, n_old_x, &a0, &a1, &a2, &a3); if (status != 0) { fprintf (stderr, "arb_wvn: sorry cannot do spline interpolation\n"); fprintf (stderr, "spline_coeffc() returned status %d\n", status); return status; } break; case 1: status = linear_coeffc (x, y, n_old_x, &a0, &a1, &a2, &a3); if (status != 0) { fprintf (stderr, "arb_wvn: sorry cannot do linear interpolation\n"); fprintf (stderr, "linear_coeffc() returned status %d\n", status); return status; } break; case 2: default: fprintf (stderr, "Error, unknown interpolation type %d\n", linear); return -1; } switch (linear) { case 0: case 1: for (i = 0; i < n_new_x; i++) { if (linear == 1) status = calc_linear_value ((double)new_x[i], &ynew, x, n_old_x, a0, a1); else status = calc_splined_value ((double)new_x[i], &ynew, x, n_old_x, a0, a1, a2, a3); if (status == 0) new_y[i] = (float)ynew; else new_y[i] = 0; } free (a0); free (a1); free (a2); free (a3); break; case 2: default: fprintf (stderr, "Error, unknown interpolation type %d\n", linear); return -1; } free (x); free (y); return 0; } /***************************************************************/ /* Interpolate from one wavelength grid to another, either */ /* with natural cubic splines (linear=0) or linear (linear=1); */ /* interpolate only to a selected range of the output grid */ /* (including n_new_x_lower and n_new_x_upper) */ /***************************************************************/ int arb_wvn2 (int n_old_x, float* old_x, float* old_y, int n_new_x_lower, int n_new_x_upper, float* new_x, float* new_y, int linear) { int i = 0, status = 0; double *a0 = NULL, *a1 = NULL, *a2 = NULL, *a3 = NULL; double *x = NULL, *y = NULL; double ynew = 0; x = (double*)calloc (n_old_x, sizeof (double)); y = (double*)calloc (n_old_x, sizeof (double)); for (i = 0; i < n_old_x; i++) { x[i] = (double)old_x[i]; y[i] = (double)old_y[i]; } if (linear) { status = linear_coeffc (x, y, n_old_x, &a0, &a1, &a2, &a3); if (status != 0) { fprintf (stderr, "arb_wvn: sorry cannot do linear interpolation\n"); fprintf (stderr, "linear_coeffc() returned status %d\n", status); return status; } } else { status = spline_coeffc (x, y, n_old_x, &a0, &a1, &a2, &a3); if (status != 0) { fprintf (stderr, "arb_wvn: sorry cannot do spline interpolation\n"); fprintf (stderr, "spline_coeffc() returned status %d\n", status); return status; } } for (i = n_new_x_lower; i <= n_new_x_upper; i++) { if (linear) status = calc_linear_value ((double)new_x[i], &ynew, x, n_old_x, a0, a1); else status = calc_splined_value ((double)new_x[i], &ynew, x, n_old_x, a0, a1, a2, a3); /* this check is new in 0.99-alpha-6; so far, no error */ /* was reported if a value could not be interpolated */ if (status != 0) { fprintf (stderr, "Error %d returned by calc_splined_value (%g)\n", status, new_x[i]); fprintf (stderr, " %d data points, x[0] = %g, x[%d] = %g\n", n_old_x, x[0], n_old_x - 1, x[n_old_x - 1]); return status; } new_y[i] = (float)ynew; } free (a0); free (a1); free (a2); free (a3); free (x); free (y); return 0; } int arb_wvn2_double (int n_old_x, double* old_x, double* old_y, int n_new_x_lower, int n_new_x_upper, double* new_x, double* new_y, int linear) { int i = 0, status = 0; double *a0 = NULL, *a1 = NULL, *a2 = NULL, *a3 = NULL; double *x = NULL, *y = NULL; double ynew = 0; x = (double*)calloc (n_old_x, sizeof (double)); y = (double*)calloc (n_old_x, sizeof (double)); for (i = 0; i < n_old_x; i++) { x[i] = (double)old_x[i]; y[i] = (double)old_y[i]; } if (linear) { status = linear_coeffc (x, y, n_old_x, &a0, &a1, &a2, &a3); if (status != 0) { fprintf (stderr, "arb_wvn: sorry cannot do linear interpolation\n"); fprintf (stderr, "linear_coeffc() returned status %d\n", status); return status; } } else { status = spline_coeffc (x, y, n_old_x, &a0, &a1, &a2, &a3); if (status != 0) { fprintf (stderr, "arb_wvn: sorry cannot do spline interpolation\n"); fprintf (stderr, "spline_coeffc() returned status %d\n", status); return status; } } for (i = n_new_x_lower; i <= n_new_x_upper; i++) { if (linear) status = calc_linear_value ((double)new_x[i], &ynew, x, n_old_x, a0, a1); else status = calc_splined_value ((double)new_x[i], &ynew, x, n_old_x, a0, a1, a2, a3); /* this check is new in 0.99-alpha-6; so far, no error */ /* was reported if a value could not be interpolated */ if (status != 0) { fprintf (stderr, "Error %d returned by calc_splined_value (%g)\n", status, new_x[i]); fprintf (stderr, " %d data points, x[0] = %g, x[%d] = %g\n", n_old_x, x[0], n_old_x - 1, x[n_old_x - 1]); return status; } new_y[i] = (double)ynew; } free (a0); free (a1); free (a2); free (a3); free (x); free (y); return 0; } /***************************************************/ /* Distribute the photons according to the weights */ /* when represenative wavelengths are used */ /***************************************************/ int pfraction_reptran (wl_out_struct* wl_out) { int i, i_t, i_band; float weight_sum; /* calculate the sum of the weights over all bands */ weight_sum = 0; for (i_t = 0; i_t < wl_out->nlambda_t; i_t++) { i_band = wl_out->reptran_band_t[i_t]; /* Test if previous _t wavelength was in the same band. */ /* If yes, go to next _t wavelength since we do not */ /* want to add the weights for a band multiple times. */ if (i_t > 0 && (i_band == wl_out->reptran_band_t[i_t - 1])) continue; for (i = 0; i < wl_out->nlambda_in_reptran_band[i_band]; i++) weight_sum += wl_out->weight_reptran_band[i_band][i] * wl_out->extra_reptran_r[wl_out->reptran_band[i_band][i]]; } /* Calculate the pfraction */ for (i_t = 0; i_t < wl_out->nlambda_t; i_t++) { i_band = wl_out->reptran_band_t[i_t]; for (i = 0; i < wl_out->nlambda_in_reptran_band[i_band]; i++) wl_out->pfraction[wl_out->reptran_band[i_band][i]] = wl_out->weight_reptran_band[i_band][i] * wl_out->extra_reptran_r[wl_out->reptran_band[i_band][i]] / weight_sum; } return 0; } /**************************************************************************************************************/ /* Calculate results at transmission grid (results_t) from results at representative wavelengths (results_r). */ /* In case of solar source, the results are weighted with the weights (weight_reptran_band) and the */ /* extraterrestrial spectrum (extra_reptran_r) in accordance with the approach taken during finding */ /* the representative wavelengths. */ /* In case of thermal source, only the weights (weight_reptran_band) are relevant */ /* (extra_reptran_r was set to 1, it is neutral). */ /* Set is_variance=1 for weighting variances, and is_variance=0 for weighting other quantities */ /**************************************************************************************************************/ int weighting_reptran (const wl_out_struct* wl_out, int is_variance, float* results_r, float* results_t) { int i, i_t, i_band; float weight; float sum; float weight_sum; for (i_t = 0; i_t < wl_out->nlambda_t; i_t++) { i_band = wl_out->reptran_band_t[i_t]; sum = 0; weight_sum = 0; for (i = 0; i < wl_out->nlambda_in_reptran_band[i_band]; i++) { weight = wl_out->weight_reptran_band[i_band][i] * wl_out->extra_reptran_r[wl_out->reptran_band[i_band][i]]; if (is_variance) sum += results_r[wl_out->reptran_band[i_band][i]] * weight * weight; else sum += results_r[wl_out->reptran_band[i_band][i]] * weight; weight_sum += weight; } if (is_variance) results_t[i_t] = sum / (weight_sum * weight_sum); else results_t[i_t] = sum / weight_sum; } return 0; } /************************************************************************************************/ /* This function returns the name of the file with the representative wavelengths (if i_mol<=0) */ /* or the name of the corresponding absorption lookup table files (if i_mol>0). */ /************************************************************************************************/ int reptran_filename (input_struct input, int i_mol, char* filename) { int len; char* gas = NULL; if (strlen (input.filename[FN_REPTRAN]) > 0) strcpy (filename, input.filename[FN_REPTRAN]); else { strcpy (filename, input.filename[FN_PATH]); strcat (filename, "correlated_k/reptran/"); strcat (filename, "reptran_"); if (input.source == SRC_THERMAL) strcat (filename, "thermal_"); else if (input.source == SRC_SOLAR) strcat (filename, "solar_"); else return err_out ("Error: Unsupported source in reptran_filename().\n", -1); if (input.ck_scheme == CK_REPTRAN) { if (input.ck_reptran_option == REPTRAN_OPTION_FINE) strcat (filename, "fine"); else if (input.ck_reptran_option == REPTRAN_OPTION_MEDIUM) strcat (filename, "medium"); else if (input.ck_reptran_option == REPTRAN_OPTION_COARSE || input.ck_reptran_option == REPTRAN_OPTION_NONE) strcat (filename, "coarse"); else return err_out ("Error: Unknown ck_reptran_option in function reptran_filename().\n", -1); } else if (input.ck_scheme == CK_REPTRAN_CHANNEL) { // reptran filename contains only first part of channel name until first underscore; len = strlen (input.ck_reptran_channel) - strlen (strchr (input.ck_reptran_channel, '_')); if ( isdigit ( input.ck_reptran_channel [len - 1])) // reptran filename does not contain the last or the last two characters if there are digits (e.g. sentinel3 --> sentinel or sentinel2a --> sentinel) len = len - 1; else if (isdigit (input.ck_reptran_channel[len - 2])) len = len - 2; strncat (filename, input.ck_reptran_channel, len); } } if (i_mol > 0) { strcat (filename, ".lookup."); gas = gas_number2string (i_mol); strcat (filename, gas); if (filename[strlen (filename) - 1] == ' ') filename[strlen (filename) - 1] = '\0'; /* remove upto two space characters from the species names */ if (filename[strlen (filename) - 1] == ' ') filename[strlen (filename) - 1] = '\0'; free (gas); } strcat (filename, ".cdf"); return 0; } /**************************************************************/ /* Interpolate a given profile (x,y) to a new grid xnew; */ /* data are written to original array y and memory is */ /* automatically reallocated */ /* automatically check for negative values */ /* */ /* Ulrich Hamann */ /**************************************************************/ int interpolate_profile (float* x, float** y, int n, float* xnew, int nnew, int interpol_method, int quiet) { int status = 0; float* tmp = NULL; int lc = 0, lc2 = 0, first = 0; tmp = (float*)calloc (nnew, sizeof (float)); switch (interpol_method) { case INTERP_METHOD_SPLINE: case INTERP_METHOD_LINEAR: case INTERP_METHOD_LOG: case INTERP_METHOD_LOG_SPLINE: status = arb_wvn (n, x, *y, nnew, xnew, tmp, interpol_method, 1); break; case INTERP_METHOD_LINMIX: fprintf (stderr, "Error, linear mixing ratio not possible with interpolate_profile\n"); fprintf (stderr, "Please use interpolate_density instead!\n"); return -1; break; default: fprintf (stderr, "Error, unknown interpolation method in interpolate_profile\n"); return -1; } /* testing for negativ density values */ switch (interpol_method) { case INTERP_METHOD_SPLINE: for (lc = 0; lc < nnew; lc++) { if (tmp[lc] < 0.0) { if (!quiet) { if (first == 0) fprintf (stderr, "*** Warning: In interpolate_density, automatic correction of negative density to 0.0\n"); fprintf (stderr, "*** Warning: z[%d]= %5.1f, dens[%d]= %12.7e -> dens[%d]= 0.0\n", lc, x[lc], lc, tmp[lc], lc); tmp[lc] = 0.0; first = 1; } } } break; case INTERP_METHOD_LINEAR: case INTERP_METHOD_LOG: case INTERP_METHOD_LINMIX: case INTERP_METHOD_LOG_SPLINE: for (lc = 0; lc < nnew; lc++) { if (tmp[lc] < 0.0) { if (fabs (tmp[lc]) < EPSILON) { if (!quiet) { fprintf (stderr, "*** Warning, small negative density detected during interpolate_profile!\n"); fprintf (stderr, "*** in layer = %d, dens = %e.\n", lc, tmp[lc]); fprintf (stderr, "*** This may happen, when dens profiles contain 0.0 values.\n"); fprintf (stderr, "*** Setting to 0.0 automatically.\n"); } tmp[lc] = 0.0; } else { fprintf (stderr, "Error, negative density detected in interpolate_profile! lc=%d \n", lc); for (lc2 = 0; lc2 < nnew; lc2++) fprintf (stderr, " ### dens[%d]=%e\n", lc2, tmp[lc2]); return -1; } } } break; default: fprintf (stderr, "Error, unknown interpolation method in interpolate_profile\n"); return -1; } if (status != 0) { fprintf (stderr, "Error %d interpolating profile\n", status); return status; } free (*y); *y = tmp; return 0; } /**************************************************************/ /* Interpolate a given profile (x,y) to a new grid xnew; */ /* data are written to original array y and memory is */ /* automatically reallocated */ /* automatically check for negative values */ /* */ /* Ulrich Hamann */ /**************************************************************/ int interpolate_density (float* x, float** y, int n, float* xnew, int nnew, int interpol_method, float* dens_air_old, float* dens_air, int quiet) { int status = 0; float* tmp = calloc (nnew, sizeof (float)); int lc = 0, lc2 = 0; int first = 0; switch (interpol_method) { case INTERP_METHOD_SPLINE: case INTERP_METHOD_LINEAR: case INTERP_METHOD_LOG: case INTERP_METHOD_LOG_SPLINE: status = arb_wvn (n, x, *y, nnew, xnew, tmp, interpol_method, 1); break; case INTERP_METHOD_LINMIX: /* convert from density to mixing ratio */ for (lc = 0; lc < n; lc++) { if (dens_air_old[lc] <= 0.0) { fprintf (stderr, "Error, cannot use linmix interpolation, when air density is <= 0.0\n"); return -1; } else (*y)[lc] = (*y)[lc] / dens_air_old[lc]; } /*linear interpolation of the mixing ratio*/ status = arb_wvn (n, x, *y, nnew, xnew, tmp, INTERP_METHOD_LINEAR, 1); /* convert back to number density */ for (lc = 0; lc < nnew; lc++) tmp[lc] = tmp[lc] * dens_air[lc]; break; default: fprintf (stderr, "Error, unknown interpolation method in interpolate_profile\n"); return -1; } /* testing for negativ density values */ switch (interpol_method) { case INTERP_METHOD_SPLINE: for (lc = 0; lc < nnew; lc++) { if (tmp[lc] < 0.0) { if (!quiet) { if (first == 0) fprintf (stderr, "*** Warning: In interpolate_density, automatic correction of negative density to 0.0\n"); fprintf (stderr, "*** Warning: z[%3d]= %5.1f, dens[%3d]= %12.7e -> dens[%3d]= 0.0\n", lc, x[lc], lc, tmp[lc], lc); tmp[lc] = 0.0; first = 1; } } } break; case INTERP_METHOD_LINEAR: case INTERP_METHOD_LOG: case INTERP_METHOD_LINMIX: case INTERP_METHOD_LOG_SPLINE: for (lc = 0; lc < nnew; lc++) { if (tmp[lc] < 0.0) { if (fabs (tmp[lc]) < EPSILON) { if (!quiet) { fprintf (stderr, "*** Warning, small negative density detected during interpolate_density!\n"); fprintf (stderr, "*** in layer = %d, dens = %e, abs(dens)= %e\n", lc, tmp[lc], fabs (tmp[lc])); fprintf (stderr, "*** This may happen, when dens profiles contain 0.0 values.\n"); fprintf (stderr, "*** Setting to 0.0 automatically.\n"); } tmp[lc] = 0.0; } else { fprintf (stderr, "Error, negative density detected in interpolate_density!, lc=%3d \n", lc); for (lc2 = 0; lc2 < nnew; lc2++) fprintf (stderr, " ### z[%3d] = %7.2f, dens[%3d] = %e\n", lc2, xnew[lc2], lc2, tmp[lc2]); return -1; } } } break; default: fprintf (stderr, "Error, unknown interpolation method in interpolate_profile\n"); return -1; } if (status != 0) { fprintf (stderr, "Error %d interpolating density\n", status); return status; } free (*y); *y = tmp; return 0; } /*******************************************************************/ /* Interpolate all given atmospheric profiles to a new z-grid */ /* profiles are written to original arrays and memory is */ /* automatically reallocated */ /*******************************************************************/ int interpolate_atmosphere (float* z, float**** p_p, float**** p_T, float***** p_dens, float**** p_Tavg, float***** p_densavg, int n, float* znew, int nnew, int interpol_method_press, int interpol_method_temper, int* interpol_method_gas, int quiet) { /* n is old number of levels, nnew is new number of levels */ /* "p_xxx" here means "pointer to xxx" */ int status = 0; int lc = -999, i = -999; /* float BOLTZMANN = 1.38065e-23; */ float* dens_air_old = NULL; /* Pressure */ status = 0; status += interpolate_profile (z, p_p[0][0], n, znew, nnew, interpol_method_press, quiet); /* Temperature */ status += interpolate_profile (z, p_T[0][0], n, znew, nnew, interpol_method_temper, quiet); /* copy old air number density */ dens_air_old = calloc (n, sizeof (float)); for (lc = 0; lc < n; lc++) dens_air_old[lc] = (*p_dens)[MOL_AIR][0][0][lc]; /* Air */ /* this is not 100% consistent with interpolation of p and T */ status += interpolate_profile (z, &((*p_dens)[MOL_AIR][0][0]), n, znew, nnew, interpol_method_gas[MOL_AIR], quiet); /* /\* determine air number density from pressure and temperature *\/ */ /* free((*p_dens)[MOL_AIR]); */ /* (*p_dens)[MOL_AIR] = calloc (nnew, sizeof(float)); */ /* for (lc=0; lc<nnew; lc++) */ /* (*p_dens)[MOL_AIR][lc] = (*p_p)[lc] / (BOLTZMANN * (*p_T)[lc]) * 100.0 / 1e6; */ /* Ozone, O2, water vapour, CO2, NO2, BRO, OClO, HCHO, O4 */ for (i = 0; i < MOL_NN; i++) { if (i != MOL_AIR) status += interpolate_density (z, &((*p_dens)[i][0][0]), n, znew, nnew, interpol_method_gas[i], dens_air_old, (*p_dens)[MOL_AIR][0][0], quiet); } /* recalculate layer average temperature and densities */ /* they are needed to convert heating rates to K_per_day */ if (*p_Tavg != NULL) ASCII_free_float_3D ((*p_Tavg), 1, 1); ASCII_calloc_float_3D (&(*p_Tavg), 1, 1, nnew); status += average_dens ((*p_T)[0][0], (*p_dens)[MOL_AIR][0][0], znew, nnew, interpol_method_temper, &((*p_Tavg)[0][0]), NO); /* We need this check because otherwise the cloud overlap examples crash! */ /* It seems that memory for the average densities is not allocated */ /* correctly in that case. */ if (*p_densavg != NULL) { for (i = 0; i < MOL_NN; i++) { if (*p_densavg != NULL) free ((*p_densavg)[i][0][0]); status += average_dens ((*p_dens)[i][0][0], (*p_dens)[MOL_AIR][0][0], znew, nnew, interpol_method_gas[i], &(*p_densavg)[i][0][0], YES); } } if (status != 0) { fprintf (stderr, "Error %d interpolating atmosphere\n", status); return status; } free (dens_air_old); return 0; } /******************************************************************/ /* Define the internal wavelength grid for the radiative transfer */ /* calculation. */ /******************************************************************/ static int set_transmittance_wl_grid (wl_inp_struct* wl_inp, wl_out_struct* wl_out, int quiet) { int iv = 0; float lambda = 0.0, lambdanew = 0.0; static int firstsr = 1, firsthz = 1; /* Determine number of wavelengths needed for transmittance calculation; */ /* careful - if something is changed, it needs to be changed twice in the */ /* following code! */ lambda = wl_inp->start; iv = 0; if (wl_inp->start != wl_inp->end) { /* non-monochromatic calculation */ while (lambda < wl_inp->end) { lambdanew = lambda; if (lambda < 121.0) { lambdanew += 1.0; if (lambdanew > 121.0) { lambda = 121.0; lambdanew = 121.0; } } /* Lyman alpha */ if (lambda >= 121.0 && lambda < 122.0) lambdanew += 0.01; /* Schumann-Runge continuum */ if (lambda >= 122.0 && lambda < 130) { lambdanew += 0.1; if (lambdanew >= 1.0E7 / 57000.0) lambda += 0.1; } if (lambda >= 130.0 && lambda < 1.0E7 / 57000.0) { lambdanew += 0.5; if (lambdanew >= 1.0E7 / 57000.0) lambda += 0.5; } /* Schumann-Runge bands */ if (lambda <= 1.0E7 / 49000.5 && lambda >= 1.0E7 / 57000.0) { if (firstsr) { if (iv > 0) lambdanew = 1.0E7 / 57000.0; else lambdanew = 1.0E7 / (floor (2.0 * 1.0E7 / lambda) / 2.0); firstsr = 0; } else lambdanew = 1.0E7 / (1.0E7 / lambda - 0.5); } /* Herzberg continuum, Hartley-Huggins bands */ if (lambda > 1.0E7 / 49000.5 && lambda < 350.0) { if (firsthz) { lambdanew = ceil (2.0 * lambda) / 2.0; if (lambdanew == lambda) lambdanew += 0.5; firsthz = 0; } else lambdanew += 0.5; } if (lambda >= 350.0) lambdanew += 1.0; lambda = lambdanew; iv++; } } firstsr = 1; firsthz = 1; wl_out->nlambda_t = iv + 1; wl_out->lambda_t = (float*)calloc (wl_out->nlambda_t, sizeof (float)); /* Set wavelengths for radiative transfer calculation */ lambda = wl_inp->start; for (iv = 0; iv < wl_out->nlambda_t; iv++) { wl_out->lambda_t[iv] = lambda; lambdanew = lambda; if (lambda < 121.0) { lambdanew += 1.0; if (lambdanew > 121.0) { lambda = 121.0; lambdanew = 121.0; } } /* Lyman alpha */ if (lambda >= 121.0 && lambda < 122.0) lambdanew += 0.01; /* Schumann-Runge continuum */ if (lambda >= 122.0 && lambda < 130) { lambdanew += 0.1; if (lambdanew >= 1.0E7 / 57000.0) lambda += 0.1; } if (lambda >= 130.0 && lambda < 1.0E7 / 57000.0) { lambdanew += 0.5; if (lambdanew >= 1.0E7 / 57000.0) lambda += 0.5; } /* Schumann-Runge bands */ if (lambda <= 1.0E7 / 49000.5 && lambda >= 1.0E7 / 57000.0) { if (firstsr) { if (iv > 0) lambdanew = 1.0E7 / 57000.0; else { /* start of spectrum */ lambdanew = 1.0E7 / (floor (2.0 * 1.0E7 / lambda) / 2.0); if (lambdanew == lambda) /* avoid that we use the same wavelength twice */ lambdanew = 1.0E7 / (1.0E7 / lambda - 0.5); } firstsr = 0; } else lambdanew = 1.0E7 / (1.0E7 / lambda - 0.5); } /* Herzberg continuum, Hartley-Huggins bands */ if (lambda > 1.0E7 / 49000.5 && lambda < 350.0) { if (firsthz) { lambdanew = ceil (2.0 * lambda) / 2.0; if (lambdanew == lambda) lambdanew += 0.5; firsthz = 0; } else lambdanew += 0.5; } if (lambda >= 350.0) lambdanew += 1.0; lambda = lambdanew; } return 0; } static int set_raman_wl_grid (wl_inp_struct* wl_inp, wl_out_struct* wl_out, char* filename, int quiet) { int iv = 0, i = 0; int nlambda = 0, status = 0; int ivs = 0, ive = 0; /* Start and end indices */ float *tmp_lambda = NULL, *tmp_fbeam = NULL; /* read extraterrestrial irradiance */ status = read_2c_file_float (filename, &tmp_lambda, &tmp_fbeam, &nlambda); if (status != 0) { fprintf (stderr, "Error %d reading %s\n", status, filename); return status; } wl_out->nlambda_t = 0; for (iv = 0; iv < nlambda; iv++) { if (tmp_lambda[iv] < wl_inp->start) ivs = iv + 1; if (tmp_lambda[iv] < wl_inp->end) ive = iv + 1; if (tmp_lambda[iv] >= wl_inp->start && tmp_lambda[iv] <= wl_inp->end) { wl_out->nlambda_t++; } } wl_out->nlambda_t = ive - ivs + 1; wl_out->lambda_t = (float*)calloc (wl_out->nlambda_t, sizeof (float)); i = 0; for (iv = ivs; iv <= ive; iv++) { wl_out->lambda_t[i] = tmp_lambda[iv]; i++; } free (tmp_lambda); free (tmp_fbeam); return 0; } static int find_raman_closest_wl_in_solar_file (float* wls, float* wle, char* filename, int quiet) { int iv = 0; int nlambda = 0, status = 0; int ivs = 0, ive = 0; /* Start and end indices */ float *tmp_lambda = NULL, *tmp_fbeam = NULL; /* read extraterrestrial irradiance */ status = read_2c_file_float (filename, &tmp_lambda, &tmp_fbeam, &nlambda); if (status != 0) { fprintf (stderr, "Error %d reading %s\n", status, filename); return status; } for (iv = 0; iv < nlambda; iv++) { if (tmp_lambda[iv] < *wls) ivs = iv; if (tmp_lambda[iv] < *wle) ive = iv + 1; } *wls = tmp_lambda[ivs]; *wle = tmp_lambda[ive]; free (tmp_lambda); free (tmp_fbeam); return 0; } /******************************************************************/ /* Define the internal wavelength grid for the radiative transfer */ /* calculation. LOWTRAN requires much less detail, in */ /* particular below 300nm */ /******************************************************************/ static int set_transmittance_wl_grid_lowtran (wl_inp_struct* wl_inp, wl_out_struct* wl_out, int quiet) { int iv = 0; float lambda = 0.0, wvn_step_t = 0.0; /* Determine number of wavelengths needed for radiative transfer calculation */ lambda = wl_inp->start; iv = 1; while (lambda < wl_inp->end) { if (lambda < 350.0) wvn_step_t = 0.5; else wvn_step_t = 1.0; lambda += wvn_step_t; iv++; } wl_out->nlambda_t = iv; wl_out->lambda_t = (float*)calloc (wl_out->nlambda_t, sizeof (float)); /* Set wavelengths for radiative transfer calculation */ lambda = wl_inp->start; for (iv = 0; iv < wl_out->nlambda_t; iv++) { wl_out->lambda_t[iv] = lambda; if (lambda < 350.0) wvn_step_t = 0.5; else wvn_step_t = 1.0; lambda += wvn_step_t; } return 0; } /***********************************************************************************/ /* Define the transmittance wavelength grid when using representative wavelengths. */ /* The transmittance wavelengths are the center wavelengths of the bands. */ /***********************************************************************************/ static int set_transmittance_wl_grid_reptran (input_struct input, float** lambda_lower, float** lambda_upper, wl_out_struct* wl_out) { #if HAVE_NETCDF4 int status = 0; int nbands = 0; int max_len_band_name = 0; double* wvlmin; double* wvlmax; char** band_name; char cdf_filename[FILENAME_MAX] = ""; int ncid = 0; int idd_nbands = 0; int idd_max_len_band_name = 0; int id_wvlmin = 0; int id_wvlmax = 0; int id_band_name = 0; size_t dimlen = 0; int i_band; int i_band_start = -1; int i_band_end = -1; int it; size_t start[] = {0, 0}; size_t count[] = {1, 1}; /* determine filename with the parameterization */ status = reptran_filename (input, -1, cdf_filename); if (input.verbose) fprintf (stderr, " reading bands from %s.\n", cdf_filename); /* open netcdf file and read the minimum and maximum wavelengths of the bands */ status = nc_open (cdf_filename, NC_NOWRITE, &ncid); if (status == NC_NOERR) { status = NC_NOERR; status += nc_inq_dimid (ncid, "nbands", &idd_nbands); status += nc_inq_dimid (ncid, "max_len_band_name", &idd_max_len_band_name); status += nc_inq_dimlen (ncid, idd_nbands, &dimlen); nbands = dimlen; status += nc_inq_dimlen (ncid, idd_max_len_band_name, &dimlen); max_len_band_name = dimlen; status += nc_inq_varid (ncid, "wvlmin", &id_wvlmin); status += nc_inq_varid (ncid, "wvlmax", &id_wvlmax); wvlmin = (double*)calloc (nbands, sizeof (double)); wvlmax = (double*)calloc (nbands, sizeof (double)); if (status != NC_NOERR) return err_out ("Error %d while reading the representative wavelengths file.\n", status); status += nc_get_var_double (ncid, id_wvlmin, wvlmin); status += nc_get_var_double (ncid, id_wvlmax, wvlmax); status += nc_inq_varid (ncid, "band_name", &id_band_name); ASCII_calloc_char (&band_name, nbands, max_len_band_name); count[1] = max_len_band_name; for (i_band = 0; i_band < nbands; i_band++) { start[0] = i_band; status += nc_get_vara_text (ncid, id_band_name, start, count, band_name[i_band]); } if (status != NC_NOERR) return err_out ("Error %d while reading the representative wavelengths file.\n", status); nc_close (ncid); } else { if (status) { fprintf (stderr, "**********************************************************************************\n"); fprintf (stderr, "*Error: Data files for REPTRAN not found in directory data/correlated_k/reptran. *'\n"); fprintf (stderr, "* Please check whether you have downloaded the required REPTRAN data files *\n"); fprintf (stderr, "* from http://www.libradtran.org/doku.php?id=download and unzipped the data*\n"); fprintf (stderr, "* in the libRadtran folder. *\n"); fprintf (stderr, "**********************************************************************************\n"); } return err_out ("Error %d while opening the representative wavelengths file.\n", status); } if (wl_out->type == WLGRID_UVSPEC) { if (input.ck_scheme == CK_REPTRAN_CHANNEL) { /* channel is searched here in the netcdf file */ for (i_band = 0; i_band < nbands; i_band++) if (strncasecmp (input.ck_reptran_channel, band_name[i_band], strlen (input.ck_reptran_channel)) == 0) { i_band_start = i_band; i_band_end = i_band; /* setting the wavelength range to the wavelength range required for selected channel */ wl_out->start = wvlmin[i_band]; wl_out->end = wvlmax[i_band]; } if (i_band_start < 0 || i_band_end < 0) return err_out ("Error: Channel not found in reptran_file.\n", -1); } else if (input.wl.start > 0) { /* select bands required for user-specified wavelength range */ i_band_start = nbands; for (i_band = nbands - 1; i_band >= 0; i_band--) if (input.wl.start < (float)wvlmax[i_band]) i_band_start = i_band; i_band_end = -1; for (i_band = 0; i_band < nbands; i_band++) if (input.wl.end > (float)wvlmin[i_band]) i_band_end = i_band; if (input.wl.start == input.wl.end && i_band_start > i_band_end) /* special case when a) only a single wavelength was specified and b) this wavelength is a band boundary */ i_band_end = i_band_start; if (i_band_start == nbands || i_band_end == -1 || input.wl.end > (float)wvlmax[nbands - 1] || input.wl.start < (float)wvlmin[0]) { fprintf (stderr, "*****************************************************************\n"); fprintf (stderr, "Error: User-specified wavelength range not covered by REPTRAN.\n"); fprintf (stderr, " Wavelength range covered by REPTRAN is from %f nm to %f nm\n", wvlmin[0], wvlmax[nbands - 1]); fprintf (stderr, "*****************************************************************\n"); return err_out ("Error: User-specified wavelength range not covered by the representative wavelengths parameterization.\n", -1); } } else if (input.wl.start_index > 0) { i_band_start = input.wl.start_index - 1; i_band_end = input.wl.end_index - 1; if (i_band_end > nbands - 1) return err_out ("Error: Wavelength index too large for activated representative wavelengths parameterization.\n", -1); wl_out->start = wvlmin[i_band_start]; wl_out->end = wvlmax[i_band_end]; } else { fprintf (stderr, "***************************************************************************************\n"); fprintf (stderr, "Error: No wavelength range selected. Please use options wavelength or wavelength_index.\n"); fprintf (stderr, "***************************************************************************************\n"); return err_out ("Error: No wavelength range selected.\n", -1); /* i_band_start=0; i_band_end=nbands-1; wl_out->start=wvlmin[i_band_start]; wl_out->end=wvlmax[i_band_end]; */ } if (wl_out->start < 1.0e7 / 49000.5) { fprintf (stderr, "**************************************************************************************\n"); fprintf (stderr, "Warning: The wavelength resolution of REPTRAN at wavelength < 204.1nm might be too low\n"); fprintf (stderr, " for fully resolving the spectral absorption features.\n"); fprintf (stderr, " Use 'mol_abs_param crs' if you need higher spectral resolution.\n"); fprintf (stderr, "**************************************************************************************\n"); } wl_out->nlambda_t = i_band_end - i_band_start + 1; wl_out->lambda_t = (float*)calloc (wl_out->nlambda_t, sizeof (float)); *lambda_lower = (float*)calloc (wl_out->nlambda_t, sizeof (float)); *lambda_upper = (float*)calloc (wl_out->nlambda_t, sizeof (float)); wl_out->reptran_band_t = (int*)calloc (wl_out->nlambda_t, sizeof (int)); for (i_band = i_band_start; i_band <= i_band_end; i_band++) { wl_out->lambda_t[i_band - i_band_start] = (wvlmin[i_band] + wvlmax[i_band]) / 2; (*lambda_lower)[i_band - i_band_start] = wvlmin[i_band]; (*lambda_upper)[i_band - i_band_start] = wvlmax[i_band]; wl_out->reptran_band_t[i_band - i_band_start] = i_band; } } else if (wl_out->type == WLGRID_USER) { if (input.wl.start > 0) return err_out ("Error: Combination of 'mol_abs_param reptran' and 'wavelength_grid_file' with 'wavelength' not supported.\n", -1); /* only need to find suitable bands for the transmission wavelengths given by the user */ wl_out->reptran_band_t = (int*)calloc (wl_out->nlambda_t, sizeof (int)); for (it = 0; it < wl_out->nlambda_t; it++) { wl_out->reptran_band_t[it] = -1; for (i_band = 0; i_band < nbands; i_band++) if (wl_out->lambda_t[it] >= wvlmin[i_band] && wl_out->lambda_t[it] < wvlmax[i_band]) wl_out->reptran_band_t[it] = i_band; if (wl_out->reptran_band_t[it] == -1) return err_out ( "Error: Wavelengths in wavelength_grid_file not covered by range of representative wavelengths parameterization.\n", -1); } } if (input.verbose) fprintf (stderr, " %d wavelengths set by set_transmittance_wl_grid_reptran().\n", wl_out->nlambda_t); ASCII_free_char (band_name, nbands); free (wvlmin); free (wvlmax); return 0; #else fprintf (stderr, " ***********************************************************************\n"); fprintf (stderr, " * You have built uvspec without libnetcdf and hence cannot *\n"); fprintf (stderr, " * use any netCDF option. Please get netcdf and rebuild. *\n"); fprintf (stderr, " ***********************************************************************\n"); return -1; #endif } /*****************************************************************************/ /* Define the rte wavelength grid when using the representative wavelengths. */ /*****************************************************************************/ static int set_rte_wl_grid_reptran (input_struct input, output_struct* output) { #if HAVE_NETCDF4 int status = 0; int nbands = 0; int nwvl = 0; int max_nwvl_in_band = 0; char cdf_filename[FILENAME_MAX] = ""; int ncid = 0; int idd_nwvl = 0; int idd_nbands = 0; int idd_max_nwvl_in_band = 0; int id_wvl = 0; int id_iwvl = 0; int id_weight_wvl = 0; int id_nwvl_in_band = 0; int id_extra = 0; int id_wvl_integral = 0; size_t dimlen = 0; double* wvl; int* wvl_active; int n_active; int i_wvl; int* index_in_lambda_r; double* extra; int no_extra; int i_band; int i; int* i_wvl_tmp; double* weight_wvl_tmp; int isp; int add_tau_wvl_to_lambda_r; double wvl_min_tmp; double wvl_max_tmp; double tau_wvl; /* determine file with the parameterization */ status = reptran_filename (input, -1, cdf_filename); /* open this file for reading */ status = nc_open (cdf_filename, NC_NOWRITE, &ncid); if (status == NC_NOERR) { /* read dimensions */ status = 0; status += nc_inq_dimid (ncid, "nwvl", &idd_nwvl); status += nc_inq_dimid (ncid, "nbands", &idd_nbands); status += nc_inq_dimid (ncid, "max_nwvl_in_band", &idd_max_nwvl_in_band); status += nc_inq_dimlen (ncid, idd_nwvl, &dimlen); nwvl = dimlen; status += nc_inq_dimlen (ncid, idd_nbands, &dimlen); nbands = dimlen; status += nc_inq_dimlen (ncid, idd_max_nwvl_in_band, &dimlen); max_nwvl_in_band = dimlen; if (status != 0) { fprintf (stderr, "Error %d reading dimensions from %s\n", status, cdf_filename); return status; } status = nc_inq_varid (ncid, "wvl", &id_wvl); wvl = (double*)calloc (nwvl, sizeof (double)); status = nc_get_var_double (ncid, id_wvl, wvl); status = nc_inq_varid (ncid, "nwvl_in_band", &id_nwvl_in_band); output->wl.nlambda_in_reptran_band = (int*)calloc (nbands, sizeof (int)); status = nc_get_var_int (ncid, id_nwvl_in_band, (output->wl.nlambda_in_reptran_band)); status = nc_inq_varid (ncid, "iwvl", &id_iwvl); status = nc_inq_varid (ncid, "iwvl_weight", &id_weight_wvl); status = ASCII_calloc_int (&(output->wl.reptran_band), nbands, max_nwvl_in_band); status = ASCII_calloc_double (&(output->wl.weight_reptran_band), nbands, max_nwvl_in_band); i_wvl_tmp = (int*)calloc (nbands * max_nwvl_in_band, sizeof (int)); weight_wvl_tmp = (double*)calloc (nbands * max_nwvl_in_band, sizeof (double)); status = nc_get_var_int (ncid, id_iwvl, i_wvl_tmp); status = nc_get_var_double (ncid, id_weight_wvl, weight_wvl_tmp); for (i_band = 0; i_band < nbands; i_band++) { for (i_wvl = 0; i_wvl < max_nwvl_in_band; i_wvl++) { output->wl.reptran_band[i_band][i_wvl] = i_wvl_tmp[(i_band + i_wvl * nbands)]; output->wl.weight_reptran_band[i_band][i_wvl] = weight_wvl_tmp[(i_band + i_wvl * nbands)]; } } free (i_wvl_tmp); free (weight_wvl_tmp); status = nc_inq_varid (ncid, "extra", &id_extra); if (status != 0) { no_extra = 1; /* representative wavelengths for thermal calculations */ if (input.source != SRC_THERMAL) { fprintf (stderr, "Error: Representative wavelengths file %s was created for\n", cdf_filename); fprintf (stderr, " thermal source, but in the input file 'source' is not set to 'thermal'."); return -4; } } else { no_extra = 0; /* representative wavelengths for solar calculations */ if (input.source != SRC_SOLAR) { fprintf (stderr, "Error: Representative wavelengths file %s was created for\n", cdf_filename); fprintf (stderr, " solar source, but in the input file 'source' is not set to 'solar'."); return -5; } } extra = (double*)calloc (nwvl, sizeof (double)); if (no_extra) { status = 0; for (i_wvl = 0; i_wvl < nwvl; i_wvl++) extra[i_wvl] = 1; /* In case of thermal calculations extra is set to 1 */ } else status = nc_get_var_double (ncid, id_extra, extra); status += nc_inq_varid (ncid, "wvl_integral", &id_wvl_integral); output->wl.width_of_reptran_band = (double*)calloc (nbands, sizeof (double)); status += nc_get_var_double (ncid, id_wvl_integral, output->wl.width_of_reptran_band); if (status != 0) { fprintf (stderr, "Error %d reading %s\n", status, cdf_filename); return status; } nc_close (ncid); } else return status; /* find required ('active') representative wavelengths */ wvl_active = (int*)calloc (nwvl, sizeof (int)); for (i = 0; i < nwvl; i++) wvl_active[i] = 0; /* go through transmission wavelength grid */ for (i_wvl = 0; i_wvl < output->wl.nlambda_t; i_wvl++) for (i = 0; i < output->wl.nlambda_in_reptran_band[output->wl.reptran_band_t[i_wvl]]; i++) wvl_active[output->wl.reptran_band[output->wl.reptran_band_t[i_wvl]][i] - 1] = 1; /* count active wavelengths */ n_active = 0; for (i_wvl = 0; i_wvl < nwvl; i_wvl++) if (wvl_active[i_wvl] == 1) n_active++; /* find out wavelength for tau scaling*/ tau_wvl = -1; if (input.aer.modify[MODIFY_VAR_TAU550][MODIFY_TYPE_SET] >= 0) { if (input.aer.tau_wvl_lambda > 0) tau_wvl = input.aer.tau_wvl_lambda; else tau_wvl = 550; } for (isp = 0; isp < input.n_caoth; isp++) { if (input.caoth[isp].modify[MODIFY_VAR_TAU550][MODIFY_TYPE_SET] >= 0) tau_wvl = 550; } /* find out if tau_wvl needs to be added to wavelength grid */ add_tau_wvl_to_lambda_r = 0; if (tau_wvl > 0) { /* find out wavelength range */ wvl_min_tmp = 1e12; wvl_max_tmp = 0; for (i_wvl = 0; i_wvl < nwvl; i_wvl++) { if (wvl_active[i_wvl] == 1) { if (wvl_min_tmp > wvl[i_wvl]) wvl_min_tmp = wvl[i_wvl]; if (wvl_max_tmp < wvl[i_wvl]) wvl_max_tmp = wvl[i_wvl]; } } /* check if tau_wvl is outside the wavelength range */ if (wvl_max_tmp < tau_wvl || wvl_min_tmp > tau_wvl) { add_tau_wvl_to_lambda_r = 1; n_active += 1; } } /* allocate required arrays */ output->wl.nlambda_r = n_active; output->wl.lambda_r = (float*)calloc (output->wl.nlambda_r, sizeof (float)); output->wl.iwvl_in_reptran_file_r = (int*)calloc (output->wl.nlambda_r, sizeof (int)); output->wl.extra_reptran_r = (float*)calloc (output->wl.nlambda_r, sizeof (float)); output->wl.wvnmlo_r = (float*)calloc (output->wl.nlambda_r, sizeof (float)); output->wl.wvnmhi_r = (float*)calloc (output->wl.nlambda_r, sizeof (float)); index_in_lambda_r = (int*)calloc (nwvl, sizeof (int)); /* reset counter */ n_active = 0; /* if tau_wvl needs to be added before the representative wavelengths */ if (add_tau_wvl_to_lambda_r == 1 && wvl_min_tmp > tau_wvl) { output->wl.lambda_r[n_active] = tau_wvl; output->wl.iwvl_in_reptran_file_r[n_active] = -1; output->wl.extra_reptran_r[n_active] = 0; n_active++; if (input.verbose) fprintf (stderr, " Added the hidden wavelength %f nm at which the aerosol/cloud amount is scaled.\n", tau_wvl); } /* add the required representative wavelengths */ for (i_wvl = 0; i_wvl < nwvl; i_wvl++) { if (wvl_active[i_wvl] == 1) { output->wl.lambda_r[n_active] = wvl[i_wvl]; output->wl.iwvl_in_reptran_file_r[n_active] = i_wvl; output->wl.extra_reptran_r[n_active] = extra[i_wvl]; index_in_lambda_r[i_wvl] = n_active; n_active++; } else index_in_lambda_r[i_wvl] = -1; } /* if tau_wvl needs to be added after the representative wavelengths */ if (add_tau_wvl_to_lambda_r == 1 && wvl_max_tmp < tau_wvl) { output->wl.lambda_r[n_active] = tau_wvl; output->wl.iwvl_in_reptran_file_r[n_active] = -1; output->wl.extra_reptran_r[n_active] = 0; n_active++; if (input.verbose) fprintf (stderr, " Added the hidden wavelength %f nm at which the aerosol amount is scaled.\n", tau_wvl); } /* calculate wavenumbers according to given bandwidth */ if (output->bandwidth_unit == UNIT_PER_CM_1) { for (i_wvl = 0; i_wvl < n_active; i_wvl++) { output->wl.wvnmlo_r[i_wvl] = 1.0E7 / output->wl.lambda_r[i_wvl] - input.bandwidth / 2.0; output->wl.wvnmhi_r[i_wvl] = output->wl.wvnmlo_r[i_wvl] + input.bandwidth; } } else if (output->bandwidth_unit == UNIT_PER_NM) { for (i_wvl = 0; i_wvl < n_active; i_wvl++) { output->wl.wvnmlo_r[i_wvl] = 1.0E7 / (output->wl.lambda_r[i_wvl] + input.bandwidth / 2.0); output->wl.wvnmhi_r[i_wvl] = 1.0E7 / (output->wl.lambda_r[i_wvl] - input.bandwidth / 2.0); } } else { fprintf (stderr, "Error, unsupported bandwidth_unit %d in while setting up rte_wavelength_grid.\n", output->bandwidth_unit); return -1; } /* change reptran_band from pointing to wvl (netcdf variable) to pointing to lambda_r (internal grid) */ for (i_band = 0; i_band < nbands; i_band++) for (i = 0; i < output->wl.nlambda_in_reptran_band[i_band]; i++) if (output->wl.reptran_band[i_band][i] > -1) output->wl.reptran_band[i_band][i] = index_in_lambda_r[output->wl.reptran_band[i_band][i] - 1]; if (input.verbose) fprintf (stderr, " %d wavelengths set by set_rte_wl_grid_reptran().\n", output->wl.nlambda_r); free (wvl); free (wvl_active); free (extra); free (index_in_lambda_r); return 0; #else fprintf (stderr, " ***********************************************************************\n"); fprintf (stderr, " * You have built uvspec without libnetcdf and hence cannot *\n"); fprintf (stderr, " * use any netCDF option. Please get netcdf and rebuild. *\n"); fprintf (stderr, " ***********************************************************************\n"); return -1; #endif } /**************************************************************/ /* Convolve the spectrum with a user-defined slit function */ /**************************************************************/ int convolve (input_struct input, output_struct* output) { int is = 0, js = 0, ks = 0, iv = 0, iu = 0, ip = 0, ic = 0, j = 0, lev = 0; int status = 0; double* x_data = NULL; int n_slit = 0; double *x_slit = NULL, *y_slit = NULL; int nstokes = 0; nstokes = input.rte.mc.nstokes; /* read slit function from file */ status = read_2c_file (input.filename[FN_SLITFUNCTION], &x_slit, &y_slit, &n_slit); if (status != 0) { fprintf (stderr, "Error %d reading file %s\n", status, input.filename[FN_SLITFUNCTION]); return status; } x_data = calloc (output->wl.nlambda_h, sizeof (double)); for (iv = 0; iv < output->wl.nlambda_h; iv++) x_data[iv] = (double)output->wl.lambda_h[iv]; if (input.rte.solver == SOLVER_POLRADTRAN) { for (lev = 0; lev < output->atm.nzout; lev++) { /* Convolve up_flux and down_flux*/ for (is = 0; is < input.rte.polradtran[POLRADTRAN_NSTOKES]; is++) { status = cnvlv (x_data, output->up_flux[lev][is], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting up_flux\n", status); return status; } status = cnvlv (x_data, output->down_flux[lev][is], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting down_flux\n", status); return status; } /* Convolve up_rad and down_rad*/ for (j = 0; j < input.rte.nphi; j++) { for (iu = 0; iu < input.rte.numu; iu++) { status = cnvlv (x_data, output->down_rad[lev][j][iu][is], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting down_rad\n", status); return status; } status = cnvlv (x_data, output->up_rad[lev][j][iu][is], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting up_rad\n", status); return status; } } } } } } else { /* convolve albmed and trnmed */ /* albmed */ for (iu = 0; iu < input.rte.numu; iu++) { status = cnvlv (x_data, output->albmed[iu], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting albmed\n", status); return status; } } /* trnmed */ for (iu = 0; iu < input.rte.numu; iu++) { status = cnvlv (x_data, output->trnmed[iu], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting trnmed\n", status); return status; } } for (lev = 0; lev < output->atm.nzout; lev++) { /* Convolve rfldir, rfldn, flup and uavg for all cases */ /* rfldir */ status = cnvlv (x_data, output->rfldir[lev], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting rfldir\n", status); return status; } /* rfldn */ status = cnvlv (x_data, output->rfldn[lev], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting rfldn\n", status); return status; } /* flup */ status = cnvlv (x_data, output->flup[lev], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting flup\n", status); return status; } /* uavg */ status = cnvlv (x_data, output->uavg[lev], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting uavg\n", status); return status; } /* 3D fields */ if (output->mc.sample.passback3D) for (is = output->islower; is <= output->isupper; is += output->isstep) for (js = output->jslower; js <= output->jsupper; js += output->jsstep) { /* rfldir3d */ status = cnvlv (x_data, output->rfldir3d[lev][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting rfldir3d\n", status); return status; } /* rfldn3d */ status = cnvlv (x_data, output->rfldn3d[lev][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting rfldn3d\n", status); return status; } /* flup3d */ status = cnvlv (x_data, output->flup3d[lev][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting flup3d\n", status); return status; } /* uavgso3d */ status = cnvlv (x_data, output->uavgso3d[lev][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting uavgso3d\n", status); return status; } /* uavgdn3d */ status = cnvlv (x_data, output->uavgdn3d[lev][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting uavgdn3d\n", status); return status; } /* uavgup3d */ status = cnvlv (x_data, output->uavgup3d[lev][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting uavgup3d\n", status); return status; } for (ic = 0; ic < output->mc.alis.Nc; ic++) { /* fl3d_is */ status = cnvlv (x_data, output->fl3d_is[lev][is][js][ic], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting fl3d_is\n", status); return status; } } for (ip = 0; ip < nstokes; ip++) { for (ic = 0; ic < output->mc.alis.Nc; ic++) { /* radiance3d */ status = cnvlv (x_data, output->radiance3d[lev][is][js][ip][ic], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting radiance3d\n", status); return status; } } } /* absback3d */ if (input.rte.mc.backward.absorption) { status = cnvlv (x_data, output->absback3d[lev][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting absback3d\n", status); return status; } } /* variances */ if (input.rte.mc.std) { /* rfldir3d_var */ status = cnvlv (x_data, output->rfldir3d_var[lev][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 1); if (status != 0) { fprintf (stderr, "Error %d convoluting rfldir3d_var\n", status); return status; } /* rfldn3d_var */ status = cnvlv (x_data, output->rfldn3d_var[lev][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 1); if (status != 0) { fprintf (stderr, "Error %d convoluting rfldn3d_var\n", status); return status; } /* flup3d_var */ status = cnvlv (x_data, output->flup3d_var[lev][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 1); if (status != 0) { fprintf (stderr, "Error %d convoluting flup3d_var\n", status); return status; } /* uavgso3d_var */ status = cnvlv (x_data, output->uavgso3d_var[lev][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 1); if (status != 0) { fprintf (stderr, "Error %d convoluting uavgso3d_var\n", status); return status; } /* uavgdn3d_var */ status = cnvlv (x_data, output->uavgdn3d_var[lev][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 1); if (status != 0) { fprintf (stderr, "Error %d convoluting uavgdn3d_var\n", status); return status; } /* uavgup3d_var */ status = cnvlv (x_data, output->uavgup3d_var[lev][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 1); if (status != 0) { fprintf (stderr, "Error %d convoluting uavgup3d_var\n", status); return status; } for (ip = 0; ip < nstokes; ip++) { /* radiance3d_var */ status = cnvlv (x_data, output->radiance3d_var[lev][is][js][ip], output->wl.nlambda_h, x_slit, y_slit, n_slit, 1); if (status != 0) { fprintf (stderr, "Error %d convoluting radiance3d_var\n", status); return status; } } /* absback3d_var */ if (input.rte.mc.backward.absorption) { status = cnvlv (x_data, output->absback3d_var[lev][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 1); if (status != 0) { fprintf (stderr, "Error %d convoluting absback3d_var\n", status); return status; } } } } if (input.rte.solver != SOLVER_FTWOSTR) { /* Convolve uavgso, uavgdn, uavgup */ /* uavgso */ status = cnvlv (x_data, output->uavgso[lev], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting uavgso\n", status); return status; } /* uavgdn */ status = cnvlv (x_data, output->uavgdn[lev], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting uavgdn\n", status); return status; } /* uavgup */ status = cnvlv (x_data, output->uavgup[lev], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting uavgup\n", status); return status; } /* Convolve u0u */ for (iu = 0; iu < input.rte.numu; iu++) { status = cnvlv (x_data, output->u0u[lev][iu], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting u0u\n", status); return status; } } if (output->print_phi > 0) { /* Convolve uu */ for (j = 0; j < input.rte.nphi; j++) { for (iu = 0; iu < input.rte.numu; iu++) { status = cnvlv (x_data, output->uu[lev][j][iu], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting u0u\n", status); return status; } } } } } } /* 3D absorption */ if (output->mc.sample.passback3D && input.rte.mc.absorption != MCFORWARD_ABS_NONE) for (ks = 0; ks < output->atm.Nzcld; ks++) if (output->atm.threed[ks]) for (is = 0; is < output->atm.Nxcld; is++) for (js = 0; js < output->atm.Nycld; js++) { status = cnvlv (x_data, output->abs3d[ks][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (input.rte.mc.std) /* **CK added for forward mc_std */ status = cnvlv (x_data, output->abs3d_var[ks][is][js], output->wl.nlambda_h, x_slit, y_slit, n_slit, 0); if (status != 0) { fprintf (stderr, "Error %d convoluting abs3d\n", status); return status; } } } free (x_slit); free (y_slit); free (x_data); return 0; } /**************************************************************/ /* Convolve a spectrum with a slit function. */ /* Internal function, used by convolve(). */ /**************************************************************/ static int cnvlv (double* x_spec, float* y_spec, int n_spec, double* x_slit, double* y_slit, int n_slit, int std) { int iv = 0, status = 0; double* y_data = calloc (n_spec, sizeof (double)); double* tmp = NULL; for (iv = 0; iv < n_spec; iv++) y_data[iv] = (double)y_spec[iv]; status = int_convolute (x_spec, y_data, n_spec, x_slit, y_slit, n_slit, &tmp, std); if (status != 0) return status; for (iv = 0; iv < n_spec; iv++) y_spec[iv] = (float)tmp[iv]; free (y_data); free (tmp); return 0; } /**************************************************************/ /* Interpolate the high resolution spectrum *_h */ /* to a user-defined wavelength grid. *_s */ /**************************************************************/ int spline_interpolate (input_struct input, output_struct* output) { int is = 0, js = 0, ks = 0, iu = 0, iv = 0, ip = 0, ic = 0, j = 0, lev = 0; int status = 0; double* x_user = NULL; int linear = 0; if (strlen (input.filename[FN_SPLINE]) > 0) { /* read file with user x values */ status = read_1c_file (input.filename[FN_SPLINE], &x_user, &output->wl.nlambda_s); if (status != 0) { fprintf (stderr, "Error %d reading spline_interpolate file %s (line %d, function %s in %s)\n", status, input.filename[FN_SPLINE], __LINE__, __func__, __FILE__); return (status); } output->wl.lambda_s = (float*)calloc (output->wl.nlambda_s, sizeof (float)); for (iv = 0; iv < output->wl.nlambda_s; iv++) output->wl.lambda_s[iv] = (float)x_user[iv]; /* Check wavelength ranges */ status = 0; if (output->wl.start > output->wl.lambda_s[0]) { fprintf (stderr, "Error, spline wavelength %f is smaller than wvn %f\n", output->wl.lambda_s[0], output->wl.start); status--; } if (output->wl.end < output->wl.lambda_s[output->wl.nlambda_s - 1]) { fprintf (stderr, "Error, spline wavelength %f is greater than wvn %f\n", output->wl.lambda_s[output->wl.nlambda_s - 1], output->wl.end); status--; } if (status != 0) return status; } else { output->wl.nlambda_s = (int)((input.spline_lambda_1 - input.spline_lambda_0) / input.spline_lambda_step + 1); output->wl.lambda_s = (float*)calloc (output->wl.nlambda_s, sizeof (float)); for (iv = 0; iv < output->wl.nlambda_s; iv++) output->wl.lambda_s[iv] = input.spline_lambda_0 + (float)iv * input.spline_lambda_step; } /* Allocation of the solar zenith angle */ if ((output->sza_s = calloc (output->wl.nlambda_s, sizeof (float))) == NULL) { fprintf (stderr, "Error: Allocation of sza_s in spline_interpolate (ancillary.c)\n"); return -1; } /* Interpolation of the solar zenith angle to output wavelength grid */ status += arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->sza_h, output->wl.nlambda_s, output->wl.lambda_s, output->sza_s, linear, 0); if (status != 0) { fprintf (stderr, "Error %d returned by arb_wvn()\n", status); return status; } /* copy to sza_h for print_output */ free (output->sza_h); output->sza_h = (float*)calloc (output->wl.nlambda_s, sizeof (float)); for (iv = 0; iv < output->wl.nlambda_s; iv++) output->sza_h[iv] = (float)output->sza_s[iv]; /* Interpolation to output wavelength grid */ if (input.rte.solver == SOLVER_POLRADTRAN) { for (lev = 0; lev < output->atm.nzout; lev++) { /* Spline interpolate up_flux and down_flux*/ for (is = 0; is < input.rte.polradtran[POLRADTRAN_NSTOKES]; is++) { status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->up_flux[lev][is], output->wl.nlambda_s, output->wl.lambda_s, output->up_flux[lev][is], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating up_flux\n", status); return status; } status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->down_flux[lev][is], output->wl.nlambda_s, output->wl.lambda_s, output->down_flux[lev][is], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating down_flux\n", status); return status; } /* Spline interpolate up_rad and down_rad */ for (j = 0; j < input.rte.nphi; j++) { for (iu = 0; iu < input.rte.numu; iu++) { status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->down_rad[lev][j][iu][is], output->wl.nlambda_s, output->wl.lambda_s, output->down_rad[lev][j][iu][is], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating down_rad\n", status); return status; } status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->up_rad[lev][j][iu][is], output->wl.nlambda_s, output->wl.lambda_s, output->up_rad[lev][j][iu][is], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating up_rad\n", status); return status; } } } } /* Spline interpolate heating rate */ if (input.heating != HEAT_NONE) { status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->heat[lev], output->wl.nlambda_s, output->wl.lambda_s, output->heat[lev], linear, 0); status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->emis[lev], output->wl.nlambda_s, output->wl.lambda_s, output->emis[lev], linear, 0); status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->w_zout[lev], output->wl.nlambda_s, output->wl.lambda_s, output->w_zout[lev], linear, 0); } } } else { /* albmed */ for (iu = 0; iu < input.rte.numu; iu++) { status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->albmed[iu], output->wl.nlambda_s, output->wl.lambda_s, output->albmed[iu], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating albmed\n", status); return status; } } /* trnmed */ for (iu = 0; iu < input.rte.numu; iu++) { status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->trnmed[iu], output->wl.nlambda_s, output->wl.lambda_s, output->trnmed[iu], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating trnmed\n", status); return status; } } for (lev = 0; lev < output->atm.nzout; lev++) { /* Spline interpolate rfldir, rfldn, flup, uavg and heat for all cases */ /* rfldir */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->rfldir[lev], output->wl.nlambda_s, output->wl.lambda_s, output->rfldir[lev], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating rfldir\n", status); return status; } /* rfldn */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->rfldn[lev], output->wl.nlambda_s, output->wl.lambda_s, output->rfldn[lev], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating rfldn\n", status); return status; } /* flup */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->flup[lev], output->wl.nlambda_s, output->wl.lambda_s, output->flup[lev], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating flup\n", status); return status; } /* uavg */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->uavg[lev], output->wl.nlambda_s, output->wl.lambda_s, output->uavg[lev], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating uavg\n", status); return status; } /* heating rates */ if (input.heating != HEAT_NONE) { status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->heat[lev], output->wl.nlambda_s, output->wl.lambda_s, output->heat[lev], linear, 0); status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->emis[lev], output->wl.nlambda_s, output->wl.lambda_s, output->emis[lev], linear, 0); status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->w_zout[lev], output->wl.nlambda_s, output->wl.lambda_s, output->w_zout[lev], linear, 0); } /* 3D fields */ if (output->mc.sample.passback3D) for (is = output->islower; is <= output->isupper; is += output->isstep) for (js = output->jslower; js <= output->jsupper; js += output->jsstep) { /* rfldir3d */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->rfldir3d[lev][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->rfldir3d[lev][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating rfldir3d\n", status); return status; } /* rfldn3d */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->rfldn3d[lev][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->rfldn3d[lev][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating rfldn3d\n", status); return status; } /* rflup3d */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->flup3d[lev][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->flup3d[lev][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating flup3d\n", status); return status; } /* uavgso3d */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->uavgso3d[lev][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->uavgso3d[lev][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating uavgso3d\n", status); return status; } /* uavgdn3d */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->uavgdn3d[lev][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->uavgdn3d[lev][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating uavgdn3d\n", status); return status; } /* uavgup3d */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->uavgup3d[lev][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->uavgup3d[lev][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating uavgup3d\n", status); return status; } if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) { for (ic = 0; ic < output->mc.alis.Nc; ic++) { status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->fl3d_is[lev][is][js][ic], output->wl.nlambda_s, output->wl.lambda_s, output->fl3d_is[lev][is][js][ic], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating fl3d_is\n", status); return status; } } } for (ip = 0; ip < input.rte.mc.nstokes; ip++) { for (ic = 0; ic < output->mc.alis.Nc; ic++) { /* radiance3d */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->radiance3d[lev][is][js][ip][ic], output->wl.nlambda_s, output->wl.lambda_s, output->radiance3d[lev][is][js][ip][ic], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating radiance3d\n", status); return status; } } } /* absback3d */ if (input.rte.mc.backward.absorption) { status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->absback3d[lev][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->absback3d[lev][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating absback3d\n", status); return status; } } /* variances */ if (input.rte.mc.std) { /* rfldir3d_var */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->rfldir3d_var[lev][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->rfldir3d_var[lev][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating rfldir3d_var\n", status); return status; } /* rfldn3d_var */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->rfldn3d_var[lev][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->rfldn3d_var[lev][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating rfldn3d_var\n", status); return status; } /* rflup3d_var */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->flup3d_var[lev][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->flup3d_var[lev][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating flup3d_var\n", status); return status; } /* uavgso3d_var */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->uavgso3d_var[lev][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->uavgso3d_var[lev][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating uavgso3d_var\n", status); return status; } /* uavgdn3d_var */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->uavgdn3d_var[lev][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->uavgdn3d_var[lev][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating uavgdn3d_var\n", status); return status; } /* uavgup3d_var */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->uavgup3d_var[lev][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->uavgup3d_var[lev][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating uavgup3d_var\n", status); return status; } for (ip = 0; ip < input.rte.mc.nstokes; ip++) { /* radiance3d_var */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->radiance3d_var[lev][is][js][ip], output->wl.nlambda_s, output->wl.lambda_s, output->radiance3d_var[lev][is][js][ip], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating radiance3d_var\n", status); return status; } } /* absback3d_var */ if (input.rte.mc.backward.absorption) { status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->absback3d_var[lev][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->absback3d_var[lev][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating absback3d_var\n", status); return status; } } } } if (input.rte.solver != SOLVER_FTWOSTR) { /* Spline interpolate uavgso, uavgdn, uavgup */ /* uavgso */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->uavgso[lev], output->wl.nlambda_s, output->wl.lambda_s, output->uavgso[lev], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating uavgso\n", status); return status; } /* uavgdn */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->uavgdn[lev], output->wl.nlambda_s, output->wl.lambda_s, output->uavgdn[lev], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating uavgdn\n", status); return status; } /* uavgup */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->uavgup[lev], output->wl.nlambda_s, output->wl.lambda_s, output->uavgup[lev], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating uavgup\n", status); return status; } /* Spline interpolate u0u */ for (iu = 0; iu < input.rte.numu; iu++) { status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->u0u[lev][iu], output->wl.nlambda_s, output->wl.lambda_s, output->u0u[lev][iu], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating u0u\n", status); return status; } } if (output->print_phi > 0) { /* Spline interpolate uu */ for (j = 0; j < input.rte.nphi; j++) { for (iu = 0; iu < input.rte.numu; iu++) { status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->uu[lev][j][iu], output->wl.nlambda_s, output->wl.lambda_s, output->uu[lev][j][iu], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating uu\n", status); return status; } } } } } } if (output->mc.sample.passback3D && input.rte.mc.absorption != MCFORWARD_ABS_NONE) for (ks = 0; ks < output->atm.Nzcld; ks++) if (output->atm.threed[ks]) for (is = 0; is < output->atm.Nxcld; is++) for (js = 0; js < output->atm.Nycld; js++) { status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->abs3d[ks][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->abs3d[ks][is][js], linear, 0); if (input.rte.mc.std) /* **CK added for forward mc_std */ status = arb_wvn (output->wl.nlambda_h, output->wl.lambda_h, output->abs3d_var[ks][is][js], output->wl.nlambda_s, output->wl.lambda_s, output->abs3d_var[ks][is][js], linear, 0); if (status != 0) { fprintf (stderr, "Error %d interpolating abs3d\n", status); return status; } } } free (output->wl.lambda_h); output->wl.nlambda_h = output->wl.nlambda_s; output->wl.lambda_h = (float*)calloc (output->wl.nlambda_s, sizeof (float)); for (iv = 0; iv < output->wl.nlambda_s; iv++) output->wl.lambda_h[iv] = (float)output->wl.lambda_s[iv]; return status; } /****************************************************************************************************************/ /* Transfer radiative transfer results from radiative transfer wavelength grid to transmittance wavelength grid */ /****************************************************************************************************************/ int internal_to_transmittance_grid (input_struct input, output_struct* output) { int status = 0; int lu = 0, is = 0, js = 0, ks = 0, iu = 0, ip = 0, ic = 0, j = 0; int isp = 0, i = 0, lc = 0; double ffactor = 0.0, rfactor = 0.0, hfactor = 0.0; int iv = 0; double unit_factor; char function_name[] = "internal_to_transmittance_grid"; char file_name[] = "ancillary.c"; /* Pointers to results are just copied, if no representative wavelengths are used */ if (output->wl.use_reptran == 0) { if (input.rte.solver != SOLVER_POLRADTRAN) { output->albmed_t = output->albmed_r; output->trnmed_t = output->trnmed_r; } if (input.rte.solver == SOLVER_POLRADTRAN) { output->up_flux_t = output->up_flux_r; output->down_flux_t = output->down_flux_r; output->down_rad_t = output->down_rad_r; output->up_rad_t = output->up_rad_r; if (input.heating != HEAT_NONE) { output->heat_t = output->heat_r; output->emis_t = output->emis_r; output->w_zout_t = output->w_zout_r; } } else { output->flup_t = output->flup_r; output->rfldir_t = output->rfldir_r; output->rfldn_t = output->rfldn_r; output->uavg_t = output->uavg_r; output->uavgdn_t = output->uavgdn_r; output->uavgso_t = output->uavgso_r; output->uavgup_t = output->uavgup_r; output->sslidar_nphot_t = output->sslidar_nphot_r; output->sslidar_nphot_q_t = output->sslidar_nphot_q_r; output->sslidar_ratio_t = output->sslidar_ratio_r; if (input.heating != HEAT_NONE) { output->heat_t = output->heat_r; output->emis_t = output->emis_r; output->w_zout_t = output->w_zout_r; } /* radiances */ output->u0u_t = output->u0u_r; output->uu_t = output->uu_r; /* 3D fields */ if (output->mc.sample.passback3D) { output->rfldir3d_t = output->rfldir3d_r; output->rfldn3d_t = output->rfldn3d_r; output->flup3d_t = output->flup3d_r; output->uavgso3d_t = output->uavgso3d_r; output->uavgdn3d_t = output->uavgdn3d_r; output->uavgup3d_t = output->uavgup3d_r; output->radiance3d_t = output->radiance3d_r; if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) output->fl3d_is_t = output->fl3d_is_r; if (input.rte.mc.jacobian[DIM_1D]) output->jacobian_t = output->jacobian_r; if (input.rte.mc.backward.absorption) output->absback3d_t = output->absback3d_r; /* variances */ if (input.rte.mc.std) { output->rfldir3d_var_t = output->rfldir3d_var_r; output->rfldn3d_var_t = output->rfldn3d_var_r; output->flup3d_var_t = output->flup3d_var_r; output->uavgso3d_var_t = output->uavgso3d_var_r; output->uavgdn3d_var_t = output->uavgdn3d_var_r; output->uavgup3d_var_t = output->uavgup3d_var_r; output->radiance3d_var_t = output->radiance3d_var_r; if (input.rte.mc.backward.absorption) output->absback3d_var_t = output->absback3d_var_r; } } } /* absorption is defined on the 3D caoth grid, not on the user-defined grid */ if (output->mc.sample.passback3D && input.rte.mc.absorption != MCFORWARD_ABS_NONE) { /* **CK added bracket */ output->abs3d_t = output->abs3d_r; if (input.rte.mc.std) /* **CK added for forward mc_std */ output->abs3d_var_t = output->abs3d_var_r; } /* copy solar zenith angle */ output->atm.sza_t = output->atm.sza_r; output->triangle_results_t = output->triangle_results_r; } else { /* if representative wavelengths are used, the results at the radiative transfer wavelength grid need to be weighted */ output->atm.sza_t = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 0, output->atm.sza_r, output->atm.sza_t); if (input.rte.solver != SOLVER_POLRADTRAN) { output->albmed_t = calloc (input.rte.numu, sizeof (float*)); output->trnmed_t = calloc (input.rte.numu, sizeof (float*)); for (iu = 0; iu < input.rte.numu; iu++) { output->albmed_t[iu] = calloc (output->wl.nlambda_t, sizeof (float)); output->trnmed_t[iu] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 0, output->albmed_r[iu], output->albmed_t[iu]); status += weighting_reptran (&output->wl, 0, output->trnmed_r[iu], output->trnmed_t[iu]); } } if (input.heating != HEAT_NONE) { output->heat_t = calloc (output->atm.nzout, sizeof (float*)); output->emis_t = calloc (output->atm.nzout, sizeof (float*)); output->w_zout_t = calloc (output->atm.nzout, sizeof (float*)); } if (input.rte.solver == SOLVER_POLRADTRAN) { output->down_flux_t = calloc (output->atm.nzout, sizeof (float**)); output->up_flux_t = calloc (output->atm.nzout, sizeof (float**)); } else { output->flup_t = calloc (output->atm.nzout, sizeof (float*)); output->rfldir_t = calloc (output->atm.nzout, sizeof (float*)); output->rfldn_t = calloc (output->atm.nzout, sizeof (float*)); output->uavg_t = calloc (output->atm.nzout, sizeof (float*)); output->uavgdn_t = calloc (output->atm.nzout, sizeof (float*)); output->uavgso_t = calloc (output->atm.nzout, sizeof (float*)); output->uavgup_t = calloc (output->atm.nzout, sizeof (float*)); output->sslidar_nphot_t = calloc (output->atm.nzout, sizeof (float*)); output->sslidar_nphot_q_t = calloc (output->atm.nzout, sizeof (float*)); output->sslidar_ratio_t = calloc (output->atm.nzout, sizeof (float*)); if (output->mc.sample.passback3D) { output->rfldir3d_t = calloc (output->atm.nzout, sizeof (float***)); output->rfldn3d_t = calloc (output->atm.nzout, sizeof (float***)); output->flup3d_t = calloc (output->atm.nzout, sizeof (float***)); output->uavgso3d_t = calloc (output->atm.nzout, sizeof (float***)); output->uavgdn3d_t = calloc (output->atm.nzout, sizeof (float***)); output->uavgup3d_t = calloc (output->atm.nzout, sizeof (float***)); output->radiance3d_t = calloc (output->atm.nzout, sizeof (float****)); if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) { if ((status = ASCII_calloc_float_5D (&output->fl3d_is_t, output->atm.nzout, output->mc.sample.Nx, output->mc.sample.Ny, output->mc.alis.Nc, output->wl.nlambda_t)) != 0) return status; } if (input.rte.mc.jacobian[DIM_1D]) { if ((status = ASCII_calloc_float_7D (&output->jacobian_t, output->atm.nzout, 1, 1, input.n_caoth + 2, 2, output->atm.nlev - 1, output->wl.nlambda_t)) != 0) return status; } if (input.rte.mc.backward.absorption) output->absback3d_t = calloc (output->atm.nzout, sizeof (float***)); if (input.rte.mc.std) { output->rfldir3d_var_t = calloc (output->atm.nzout, sizeof (float***)); output->rfldn3d_var_t = calloc (output->atm.nzout, sizeof (float***)); output->flup3d_var_t = calloc (output->atm.nzout, sizeof (float***)); output->uavgso3d_var_t = calloc (output->atm.nzout, sizeof (float***)); output->uavgdn3d_var_t = calloc (output->atm.nzout, sizeof (float***)); output->uavgup3d_var_t = calloc (output->atm.nzout, sizeof (float***)); output->radiance3d_var_t = calloc (output->atm.nzout, sizeof (float****)); if (input.rte.mc.backward.absorption) output->absback3d_var_t = calloc (output->atm.nzout, sizeof (float***)); } } } if (input.rte.solver == SOLVER_POLRADTRAN) { output->down_rad_t = calloc (output->atm.nzout, sizeof (float****)); output->up_rad_t = calloc (output->atm.nzout, sizeof (float****)); } else { output->u0u_t = calloc (output->atm.nzout, sizeof (float**)); output->uu_t = calloc (output->atm.nzout, sizeof (float***)); } for (lu = 0; lu < output->atm.nzout; lu++) { if (input.heating != HEAT_NONE) { output->heat_t[lu] = calloc (output->wl.nlambda_t, sizeof (float)); output->emis_t[lu] = calloc (output->wl.nlambda_t, sizeof (float)); output->w_zout_t[lu] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 0, output->heat_r[lu], output->heat_t[lu]); status += weighting_reptran (&output->wl, 0, output->emis_r[lu], output->emis_t[lu]); status += weighting_reptran (&output->wl, 0, output->w_zout_r[lu], output->w_zout_t[lu]); } if (input.rte.solver == SOLVER_POLRADTRAN) { output->down_flux_t[lu] = calloc (input.rte.polradtran[POLRADTRAN_NSTOKES], sizeof (float*)); output->up_flux_t[lu] = calloc (input.rte.polradtran[POLRADTRAN_NSTOKES], sizeof (float*)); for (is = 0; is < input.rte.polradtran[POLRADTRAN_NSTOKES]; is++) { output->down_flux_t[lu][is] = calloc (output->wl.nlambda_t, sizeof (float)); output->up_flux_t[lu][is] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 0, output->down_flux_r[lu][is], output->down_flux_t[lu][is]); status += weighting_reptran (&output->wl, 0, output->up_flux_r[lu][is], output->up_flux_t[lu][is]); } } else { output->flup_t[lu] = calloc (output->wl.nlambda_t, sizeof (float)); output->rfldir_t[lu] = calloc (output->wl.nlambda_t, sizeof (float)); output->rfldn_t[lu] = calloc (output->wl.nlambda_t, sizeof (float)); output->uavg_t[lu] = calloc (output->wl.nlambda_t, sizeof (float)); output->uavgdn_t[lu] = calloc (output->wl.nlambda_t, sizeof (float)); output->uavgso_t[lu] = calloc (output->wl.nlambda_t, sizeof (float)); output->uavgup_t[lu] = calloc (output->wl.nlambda_t, sizeof (float)); output->sslidar_nphot_t[lu] = calloc (output->wl.nlambda_t, sizeof (float)); output->sslidar_nphot_q_t[lu] = calloc (output->wl.nlambda_t, sizeof (float)); output->sslidar_ratio_t[lu] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 0, output->flup_r[lu], output->flup_t[lu]); status += weighting_reptran (&output->wl, 0, output->rfldir_r[lu], output->rfldir_t[lu]); status += weighting_reptran (&output->wl, 0, output->rfldn_r[lu], output->rfldn_t[lu]); status += weighting_reptran (&output->wl, 0, output->uavg_r[lu], output->uavg_t[lu]); status += weighting_reptran (&output->wl, 0, output->uavgdn_r[lu], output->uavgdn_t[lu]); status += weighting_reptran (&output->wl, 0, output->uavgso_r[lu], output->uavgso_t[lu]); status += weighting_reptran (&output->wl, 0, output->uavgup_r[lu], output->uavgup_t[lu]); status += weighting_reptran (&output->wl, 0, output->sslidar_nphot_r[lu], output->sslidar_nphot_t[lu]); status += weighting_reptran (&output->wl, 0, output->sslidar_nphot_q_r[lu], output->sslidar_nphot_q_t[lu]); status += weighting_reptran (&output->wl, 0, output->sslidar_ratio_r[lu], output->sslidar_ratio_t[lu]); /* 3D fields */ if (output->mc.sample.passback3D) { output->rfldir3d_t[lu] = calloc (output->mc.sample.Nx, sizeof (float**)); output->rfldn3d_t[lu] = calloc (output->mc.sample.Nx, sizeof (float**)); output->flup3d_t[lu] = calloc (output->mc.sample.Nx, sizeof (float**)); output->uavgso3d_t[lu] = calloc (output->mc.sample.Nx, sizeof (float**)); output->uavgdn3d_t[lu] = calloc (output->mc.sample.Nx, sizeof (float**)); output->uavgup3d_t[lu] = calloc (output->mc.sample.Nx, sizeof (float**)); output->radiance3d_t[lu] = calloc (output->mc.sample.Nx, sizeof (float****)); if (input.rte.mc.backward.absorption) output->absback3d_t[lu] = calloc (output->mc.sample.Nx, sizeof (float**)); if (input.rte.mc.std) { output->rfldir3d_var_t[lu] = calloc (output->mc.sample.Nx, sizeof (float**)); output->rfldn3d_var_t[lu] = calloc (output->mc.sample.Nx, sizeof (float**)); output->flup3d_var_t[lu] = calloc (output->mc.sample.Nx, sizeof (float**)); output->uavgso3d_var_t[lu] = calloc (output->mc.sample.Nx, sizeof (float**)); output->uavgdn3d_var_t[lu] = calloc (output->mc.sample.Nx, sizeof (float**)); output->uavgup3d_var_t[lu] = calloc (output->mc.sample.Nx, sizeof (float**)); output->radiance3d_var_t[lu] = calloc (output->mc.sample.Nx, sizeof (float***)); if (input.rte.mc.backward.absorption) output->absback3d_var_t[lu] = calloc (output->mc.sample.Nx, sizeof (float**)); } for (is = output->islower; is <= output->isupper; is += output->isstep) { output->rfldir3d_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float*)); output->rfldn3d_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float*)); output->flup3d_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float*)); output->uavgso3d_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float*)); output->uavgdn3d_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float*)); output->uavgup3d_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float*)); output->radiance3d_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float***)); if (input.rte.mc.backward.absorption) output->absback3d_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float*)); if (input.rte.mc.std) { output->rfldir3d_var_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float*)); output->rfldn3d_var_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float*)); output->flup3d_var_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float*)); output->uavgso3d_var_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float*)); output->uavgdn3d_var_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float*)); output->uavgup3d_var_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float*)); output->radiance3d_var_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float**)); if (input.rte.mc.backward.absorption) output->absback3d_var_t[lu][is] = calloc (output->mc.sample.Ny, sizeof (float*)); } for (js = output->jslower; js <= output->jsupper; js++) { output->rfldir3d_t[lu][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); output->rfldn3d_t[lu][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); output->flup3d_t[lu][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); output->uavgso3d_t[lu][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); output->uavgdn3d_t[lu][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); output->uavgup3d_t[lu][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 0, output->rfldir3d_r[lu][is][js], output->rfldir3d_t[lu][is][js]); status += weighting_reptran (&output->wl, 0, output->rfldn3d_r[lu][is][js], output->rfldn3d_t[lu][is][js]); status += weighting_reptran (&output->wl, 0, output->flup3d_r[lu][is][js], output->flup3d_t[lu][is][js]); status += weighting_reptran (&output->wl, 0, output->uavgso3d_r[lu][is][js], output->uavgso3d_t[lu][is][js]); status += weighting_reptran (&output->wl, 0, output->uavgdn3d_r[lu][is][js], output->uavgdn3d_t[lu][is][js]); status += weighting_reptran (&output->wl, 0, output->uavgup3d_r[lu][is][js], output->uavgup3d_t[lu][is][js]); output->radiance3d_t[lu][is][js] = calloc (input.rte.mc.nstokes, sizeof (float**)); if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) { for (ic = 0; ic < output->mc.alis.Nc; ic++) { status += weighting_reptran (&output->wl, 0, output->fl3d_is_r[lu][is][js][ic], output->fl3d_is_t[lu][is][js][ic]); } } for (ip = 0; ip < input.rte.mc.nstokes; ip++) { output->radiance3d_t[lu][is][js][ip] = calloc (output->mc.alis.Nc, sizeof (float*)); for (ic = 0; ic < output->mc.alis.Nc; ic++) { output->radiance3d_t[lu][is][js][ip][ic] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 0, output->radiance3d_r[lu][is][js][ip][ic], output->radiance3d_t[lu][is][js][ip][ic]); } } if (input.rte.mc.jacobian[DIM_1D]) { for (isp = 0; isp < input.n_caoth + 2; isp++) { for (i = 0; i < 2; i++) { for (lc = 0; lc < output->atm.nlev - 1; lc++) { status += weighting_reptran (&output->wl, 0, output->jacobian_r[lu][is][js][isp][i][lc], output->jacobian_t[lu][is][js][isp][i][lc]); } } } } if (input.rte.mc.backward.absorption) { output->absback3d_t[lu][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 0, output->absback3d_r[lu][is][js], output->absback3d_t[lu][is][js]); } /* variances */ if (input.rte.mc.std) { output->rfldir3d_var_t[lu][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); output->rfldn3d_var_t[lu][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); output->flup3d_var_t[lu][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); output->uavgso3d_var_t[lu][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); output->uavgdn3d_var_t[lu][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); output->uavgup3d_var_t[lu][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 1, output->rfldir3d_var_r[lu][is][js], output->rfldir3d_var_t[lu][is][js]); status += weighting_reptran (&output->wl, 1, output->rfldn3d_var_r[lu][is][js], output->rfldn3d_var_t[lu][is][js]); status += weighting_reptran (&output->wl, 1, output->flup3d_var_r[lu][is][js], output->flup3d_var_t[lu][is][js]); status += weighting_reptran (&output->wl, 1, output->uavgso3d_var_r[lu][is][js], output->uavgso3d_var_t[lu][is][js]); status += weighting_reptran (&output->wl, 1, output->uavgdn3d_var_r[lu][is][js], output->uavgdn3d_var_t[lu][is][js]); status += weighting_reptran (&output->wl, 1, output->uavgup3d_var_r[lu][is][js], output->uavgup3d_var_t[lu][is][js]); output->radiance3d_var_t[lu][is][js] = calloc (input.rte.mc.nstokes, sizeof (float*)); for (ip = 0; ip < input.rte.mc.nstokes; ip++) { output->radiance3d_var_t[lu][is][js][ip] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 1, output->radiance3d_var_r[lu][is][js][ip], output->radiance3d_var_t[lu][is][js][ip]); } if (input.rte.mc.backward.absorption) { output->absback3d_var_t[lu][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 1, output->absback3d_var_r[lu][is][js], output->absback3d_var_t[lu][is][js]); } } } } } } if (input.rte.solver == SOLVER_POLRADTRAN) { output->down_rad_t[lu] = calloc (input.rte.nphi, sizeof (float***)); output->up_rad_t[lu] = calloc (input.rte.nphi, sizeof (float***)); for (j = 0; j < input.rte.nphi; j++) { output->down_rad_t[lu][j] = calloc (input.rte.numu, sizeof (float**)); output->up_rad_t[lu][j] = calloc (input.rte.numu, sizeof (float**)); for (iu = 0; iu < input.rte.numu; iu++) { output->down_rad_t[lu][j][iu] = calloc (input.rte.polradtran[POLRADTRAN_NSTOKES], sizeof (float*)); output->up_rad_t[lu][j][iu] = calloc (input.rte.polradtran[POLRADTRAN_NSTOKES], sizeof (float*)); for (is = 0; is < input.rte.polradtran[POLRADTRAN_NSTOKES]; is++) { output->down_rad_t[lu][j][iu][is] = calloc (output->wl.nlambda_t, sizeof (float)); output->up_rad_t[lu][j][iu][is] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 0, output->down_rad_r[lu][j][iu][is], output->down_rad_t[lu][j][iu][is]); status += weighting_reptran (&output->wl, 0, output->up_rad_r[lu][j][iu][is], output->up_rad_t[lu][j][iu][is]); } } } } else { output->u0u_t[lu] = calloc (input.rte.numu, sizeof (float*)); for (iu = 0; iu < input.rte.numu; iu++) { output->u0u_t[lu][iu] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 0, output->u0u_r[lu][iu], output->u0u_t[lu][iu]); } output->uu_t[lu] = calloc (input.rte.nphi, sizeof (float**)); for (j = 0; j < input.rte.nphi; j++) { output->uu_t[lu][j] = calloc (input.rte.numu, sizeof (float*)); for (iu = 0; iu < input.rte.numu; iu++) { output->uu_t[lu][j][iu] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 0, output->uu_r[lu][j][iu], output->uu_t[lu][j][iu]); } } } } /* end of loop over layers */ if (output->mc.sample.passback3D && input.rte.mc.absorption != MCFORWARD_ABS_NONE) { output->abs3d_t = calloc (output->atm.Nzcld, sizeof (float***)); for (ks = 0; ks < output->atm.Nzcld; ks++) { if (output->atm.threed[ks]) { output->abs3d_t[ks] = calloc (output->atm.Nxcld, sizeof (float**)); for (is = 0; is < output->atm.Nxcld; is++) { output->abs3d_t[ks][is] = calloc (output->atm.Nycld, sizeof (float*)); for (js = 0; js < output->atm.Nycld; js++) { output->abs3d_t[ks][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 0, output->abs3d_r[ks][is][js], output->abs3d_t[ks][is][js]); if (input.rte.mc.std) { /* **CK added for forward mc_std */ output->abs3d_var_t[ks][is][js] = calloc (output->wl.nlambda_t, sizeof (float)); status += weighting_reptran (&output->wl, 1, output->abs3d_var_r[ks][is][js], output->abs3d_var_t[ks][is][js]); } } } } } } { // copy (and weight) triangle_result_r -> triangle_results_t int ierr; ierr = init_spectral_triangular_surface_result_struct (output->wl.nlambda_t, output->mc.triangular_surface.N_triangles, &(output->triangle_results_t)); CHKERR (ierr); // because the triangle_result output does not have wavelength as leading dimension // we have this wrapper here to copy and weight triangle_result_r -> triangle_results_t for (size_t id = 0; id < output->mc.triangular_surface.N_triangles; ++id) { const size_t Nvar = 6; float var_r[Nvar][output->wl.nlambda_r]; float var_t[Nvar][output->wl.nlambda_t]; for (size_t iv_r = 0; iv_r < output->wl.nlambda_r; ++iv_r) { var_r[0][iv_r] = output->triangle_results_r[iv_r]->ndir[id]; var_r[1][iv_r] = output->triangle_results_r[iv_r]->ndn[id]; var_r[2][iv_r] = output->triangle_results_r[iv_r]->nup[id]; var_r[3][iv_r] = output->triangle_results_r[iv_r]->edir[id]; var_r[4][iv_r] = output->triangle_results_r[iv_r]->edn[id]; var_r[5][iv_r] = output->triangle_results_r[iv_r]->eup[id]; } for (size_t ivar = 0; ivar < Nvar; ++ivar) { const int ierr = weighting_reptran (&output->wl, 0, var_r[ivar], var_t[ivar]); CHKERR (ierr); } for (size_t iv_t = 0; iv_t < output->wl.nlambda_t; ++iv_t) { output->triangle_results_t[iv_t]->ndir[id] = var_t[0][iv_t]; output->triangle_results_t[iv_t]->ndn[id] = var_t[1][iv_t]; output->triangle_results_t[iv_t]->nup[id] = var_t[2][iv_t]; output->triangle_results_t[iv_t]->edir[id] = var_t[3][iv_t]; output->triangle_results_t[iv_t]->edn[id] = var_t[4][iv_t]; output->triangle_results_t[iv_t]->eup[id] = var_t[5][iv_t]; } } } /* scale output to user-requested output unit (when no representative wavelengths are used, this was already done in solve_rte()) */ for (iv = 0; iv < output->wl.nlambda_t; iv++) { if (input.source == SRC_THERMAL && output->spectrum_unit == UNIT_PER_CM_1) { switch (input.output_unit) { case UNIT_PER_CM_1: unit_factor = 1; break; case UNIT_PER_NM: unit_factor = 1.0e+7 / (output->wl.lambda_t[iv] * output->wl.lambda_t[iv]); break; case UNIT_PER_BAND: unit_factor = output->wl.width_of_reptran_band[output->wl.reptran_band_t[iv]]; break; case UNIT_NOT_DEFINED: unit_factor = 1.0; break; default: fprintf (stderr, "Error: Program bug, unsupported output unit %d in %s (%s). \n", input.output_unit, function_name, file_name); return -1; } } else if (input.source == SRC_SOLAR && output->spectrum_unit == UNIT_PER_NM) { switch (input.output_unit) { case UNIT_PER_CM_1: unit_factor = (output->wl.lambda_t[iv] * output->wl.lambda_t[iv]) / 1.0e+7; break; case UNIT_PER_NM: unit_factor = 1; break; case UNIT_PER_BAND: unit_factor = output->wl.width_of_reptran_band[output->wl.reptran_band_t[iv]]; break; case UNIT_NOT_DEFINED: unit_factor = 1.0; break; default: fprintf (stderr, "Error: Program bug, unsupported output unit %d in %s (%s). \n", input.output_unit, function_name, file_name); return -1; } } else { fprintf (stderr, "Error: Program bug, unsupported comination of spectrum unit %d with source %d in %s (%s). \n", input.spectrum_unit, input.source, function_name, file_name); return -1; } ffactor = unit_factor; rfactor = unit_factor; hfactor = unit_factor; status = scale_output (input, &(output->rfldir_t), &(output->rfldn_t), &(output->flup_t), &(output->albmed_t), &(output->trnmed_t), &(output->uavgso_t), &(output->uavgdn_t), &(output->uavgup_t), &(output->uavg_t), &(output->u0u_t), &(output->uu_t), &(output->heat_t), &(output->emis_t), &(output->w_zout_t), &(output->down_flux_t), &(output->up_flux_t), &(output->down_rad_t), &(output->up_rad_t), &(output->rfldir3d_t), &(output->rfldn3d_t), &(output->flup3d_t), &(output->fl3d_is_t), &(output->uavgso3d_t), &(output->uavgdn3d_t), &(output->uavgup3d_t), &(output->radiance3d_t), &(output->jacobian_t), &(output->absback3d_t), &(output->rfldir3d_var_t), &(output->rfldn3d_var_t), &(output->flup3d_var_t), &(output->uavgso3d_var_t), &(output->uavgdn3d_var_t), &(output->uavgup3d_var_t), &(output->radiance3d_var_t), &(output->abs3d_var_t), &(output->absback3d_var_t), output->atm.nzout, output->atm.Nxcld, output->atm.Nycld, output->atm.Nzcld, output->mc.alis.Nc, output->atm.nlev - 1, output->atm.threed, output->mc.sample.passback3D, output->islower, output->isupper, output->jslower, output->jsupper, output->isstep, output->jsstep, &(output->abs3d_t), output->triangle_results_t, ffactor, rfactor, hfactor, iv); /* in ancillary.c */ CHKERR (status); } } return status; } int closest (float lambda, float* lambda_raw, int n_crs) { int iv = 0; float min_delta = FLT_MAX; int result = -1; for (iv = 0; iv < n_crs; iv++) { if (fabs (lambda - lambda_raw[iv]) < min_delta) { min_delta = fabs (lambda - lambda_raw[iv]); result = iv; } } return result; } /*****************************************************************************/ /* Interpolate the transmittance from the transmission wavelength grid to */ /* high resolution grid. */ /*****************************************************************************/ int interpolate_transmittance (input_struct input, output_struct* output) { int lu = 0, is = 0, js = 0, ks = 0, iv = 0, ivh = 0, iu = 0, ip = 0, j = 0, ic = 0; int isp = 0, i = 0, lc = 0; int linear = 1, status = 0; /* If only one wavelength is required, or in correlated-k mode, */ /* arrays are not interpolated but copied */ /* ??? need to add the condition that the extraterrestrial ??? */ /* ??? spectrum was not read from file but set to 1 ??? */ if (output->wl.nlambda_t == 1 || output->wl.use_reptran == 1 || (output->wl.use_reptran == 0 && input.ck_scheme != CK_CRS && input.ck_scheme != CK_RAMAN && input.ck_scheme != CK_LOWTRAN)) { for (ivh = 0; ivh < output->wl.nlambda_h; ivh++) { if (output->wl.nlambda_t == 1) iv = ivh; else if (output->wl.use_reptran == 1) iv = closest (output->wl.lambda_h[ivh], output->wl.lambda_t, output->wl.nlambda_t); else iv = output->wl.map_e2h[ivh]; if (input.rte.solver != SOLVER_POLRADTRAN) { for (iu = 0; iu < input.rte.numu; iu++) { output->albmed[iu][ivh] = output->albmed_t[iu][iv]; output->trnmed[iu][ivh] = output->trnmed_t[iu][iv]; } } for (lu = 0; lu < output->atm.nzout; lu++) { if (input.rte.solver == SOLVER_POLRADTRAN) { for (is = 0; is < input.rte.polradtran[POLRADTRAN_NSTOKES]; is++) { output->up_flux[lu][is][ivh] = output->up_flux_t[lu][is][iv]; output->down_flux[lu][is][ivh] = output->down_flux_t[lu][is][iv]; } for (j = 0; j < input.rte.nphi; j++) for (iu = 0; iu < input.rte.numu; iu++) for (is = 0; is < input.rte.polradtran[POLRADTRAN_NSTOKES]; is++) { output->down_rad[lu][j][iu][is][ivh] = output->down_rad_t[lu][j][iu][is][iv]; output->up_rad[lu][j][iu][is][ivh] = output->up_rad_t[lu][j][iu][is][iv]; } if (input.heating != HEAT_NONE) { output->heat[lu][ivh] = output->heat_t[lu][iv]; output->emis[lu][ivh] = output->emis_t[lu][iv]; output->w_zout[lu][ivh] = output->w_zout_t[lu][iv]; } } else { output->flup[lu][ivh] = output->flup_t[lu][iv]; output->rfldir[lu][ivh] = output->rfldir_t[lu][iv]; output->rfldn[lu][ivh] = output->rfldn_t[lu][iv]; output->uavg[lu][ivh] = output->uavg_t[lu][iv]; output->uavgdn[lu][ivh] = output->uavgdn_t[lu][iv]; output->uavgso[lu][ivh] = output->uavgso_t[lu][iv]; output->uavgup[lu][ivh] = output->uavgup_t[lu][iv]; output->sslidar_nphot[lu][ivh] = output->sslidar_nphot_t[lu][iv]; output->sslidar_nphot_q[lu][ivh] = output->sslidar_nphot_q_t[lu][iv]; output->sslidar_ratio[lu][ivh] = output->sslidar_ratio_t[lu][iv]; if (input.heating != HEAT_NONE) { output->heat[lu][ivh] = output->heat_t[lu][iv]; output->emis[lu][ivh] = output->emis_t[lu][iv]; output->w_zout[lu][ivh] = output->w_zout_t[lu][iv]; } /* radiances */ for (iu = 0; iu < input.rte.numu; iu++) { output->u0u[lu][iu][ivh] = output->u0u_t[lu][iu][iv]; for (j = 0; j < input.rte.nphi; j++) output->uu[lu][j][iu][ivh] = output->uu_t[lu][j][iu][iv]; } /* 3D fields */ if (output->mc.sample.passback3D) for (is = output->islower; is <= output->isupper; is += output->isstep) for (js = output->jslower; js <= output->jsupper; js += output->jsstep) { output->rfldir3d[lu][is][js][ivh] = output->rfldir3d_t[lu][is][js][iv]; output->rfldn3d[lu][is][js][ivh] = output->rfldn3d_t[lu][is][js][iv]; output->flup3d[lu][is][js][ivh] = output->flup3d_t[lu][is][js][iv]; output->uavgso3d[lu][is][js][ivh] = output->uavgso3d_t[lu][is][js][iv]; output->uavgdn3d[lu][is][js][ivh] = output->uavgdn3d_t[lu][is][js][iv]; output->uavgup3d[lu][is][js][ivh] = output->uavgup3d_t[lu][is][js][iv]; for (ip = 0; ip < input.rte.mc.nstokes; ip++) { for (ic = 0; ic < output->mc.alis.Nc; ic++) { output->radiance3d[lu][is][js][ip][ic][ivh] = output->radiance3d_t[lu][is][js][ip][ic][iv]; } } if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) { for (ic = 0; ic < output->mc.alis.Nc; ic++) { output->fl3d_is[lu][is][js][ic][ivh] = output->fl3d_is_t[lu][is][js][ic][iv]; } } if (input.rte.mc.jacobian[DIM_1D]) { for (isp = 0; isp < input.n_caoth + 2; isp++) for (i = 0; i < 2; i++) for (lc = 0; lc < output->atm.nlev - 1; lc++) { output->jacobian[lu][is][js][isp][i][lc][ivh] = output->jacobian_t[lu][is][js][isp][i][lc][iv]; } } if (input.rte.mc.backward.absorption) output->absback3d[lu][is][js][ivh] = output->absback3d_t[lu][is][js][iv]; /* variances */ if (input.rte.mc.std) { output->rfldir3d_var[lu][is][js][ivh] = output->rfldir3d_var_t[lu][is][js][iv]; output->rfldn3d_var[lu][is][js][ivh] = output->rfldn3d_var_t[lu][is][js][iv]; output->flup3d_var[lu][is][js][ivh] = output->flup3d_var_t[lu][is][js][iv]; output->uavgso3d_var[lu][is][js][ivh] = output->uavgso3d_var_t[lu][is][js][iv]; output->uavgdn3d_var[lu][is][js][ivh] = output->uavgdn3d_var_t[lu][is][js][iv]; output->uavgup3d_var[lu][is][js][ivh] = output->uavgup3d_var_t[lu][is][js][iv]; for (ip = 0; ip < input.rte.mc.nstokes; ip++) output->radiance3d_var[lu][is][js][ip][ivh] = output->radiance3d_var_t[lu][is][js][ip][iv]; if (input.rte.mc.backward.absorption) output->absback3d_var[lu][is][js][ivh] = output->absback3d_var_t[lu][is][js][iv]; } } } } /* absorption is defined on the 3D caoth grid, not on the user-defined grid */ if (output->mc.sample.passback3D && input.rte.mc.absorption != MCFORWARD_ABS_NONE) for (ks = 0; ks < output->atm.Nzcld; ks++) if (output->atm.threed[ks]) for (is = 0; is < output->atm.Nxcld; is++) for (js = 0; js < output->atm.Nycld; js++) { /* **CK added bracket */ output->abs3d[ks][is][js][ivh] = output->abs3d_t[ks][is][js][iv]; if (input.rte.mc.std) /* **CK added for forward mc_std */ output->abs3d_var[ks][is][js][ivh] = output->abs3d_var_t[ks][is][js][iv]; } /* copy solar zenith angle */ output->sza_h[ivh] = output->atm.sza_t[iv]; if (output->triangle_results_t) { // link triangle_results_t -> triangle_results_h if (!output->triangle_results_o) output->triangle_results_o = malloc (output->wl.nlambda_h * sizeof (t_triangle_radiation_field*)); output->triangle_results_o[ivh] = output->triangle_results_t[iv]; } } } else { /* interpolation to different wavelengths required */ /* interpolate solar zenith angle */ status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->atm.sza_t, output->wl.nlambda_h, output->wl.lambda_h, output->sza_h, linear, 0); CHKERR (status); if (input.rte.solver != SOLVER_POLRADTRAN) { for (iu = 0; iu < input.rte.numu; iu++) { status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->albmed_t[iu], output->wl.nlambda_h, output->wl.lambda_h, output->albmed[iu], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->trnmed_t[iu], output->wl.nlambda_h, output->wl.lambda_h, output->trnmed[iu], linear, 0); CHKERR (status); } } for (lu = 0; lu < output->atm.nzout; lu++) { if (input.rte.solver == SOLVER_POLRADTRAN) { for (is = 0; is < input.rte.polradtran[POLRADTRAN_NSTOKES]; is++) { status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->down_flux_t[lu][is], output->wl.nlambda_h, output->wl.lambda_h, output->down_flux[lu][is], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->up_flux_t[lu][is], output->wl.nlambda_h, output->wl.lambda_h, output->up_flux[lu][is], linear, 0); CHKERR (status); /* ??? */ /* ??? is it not nessesary to interpolate here also: down_rad and up_rad ??? */ /* ??? */ if (input.heating != HEAT_NONE) { status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->heat_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->heat[lu], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->emis_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->emis[lu], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->w_zout_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->w_zout[lu], linear, 0); CHKERR (status); } } } else { status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->flup_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->flup[lu], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->rfldir_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->rfldir[lu], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->rfldn_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->rfldn[lu], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->uavg_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->uavg[lu], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->uavgdn_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->uavgdn[lu], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->uavgso_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->uavgso[lu], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->uavgup_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->uavgup[lu], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->sslidar_nphot_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->sslidar_nphot[lu], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->sslidar_nphot_q_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->sslidar_nphot_q[lu], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->sslidar_ratio_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->sslidar_ratio[lu], linear, 0); CHKERR (status); if (input.heating != HEAT_NONE) { status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->heat_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->heat[lu], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->emis_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->emis[lu], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->w_zout_t[lu], output->wl.nlambda_h, output->wl.lambda_h, output->w_zout[lu], linear, 0); CHKERR (status); } /* 3D fields */ if (output->mc.sample.passback3D) for (is = output->islower; is <= output->isupper; is += output->isstep) for (js = output->jslower; js <= output->jsupper; js += output->jsstep) { status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->rfldir3d_t[lu][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->rfldir3d[lu][is][js], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->rfldn3d_t[lu][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->rfldn3d[lu][is][js], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->flup3d_t[lu][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->flup3d[lu][is][js], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->uavgso3d_t[lu][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->uavgso3d[lu][is][js], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->uavgdn3d_t[lu][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->uavgdn3d[lu][is][js], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->uavgup3d_t[lu][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->uavgup3d[lu][is][js], linear, 0); CHKERR (status); for (ip = 0; ip < input.rte.mc.nstokes; ip++) { for (ic = 0; ic < output->mc.alis.Nc; ic++) { status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->radiance3d_t[lu][is][js][ip][ic], output->wl.nlambda_h, output->wl.lambda_h, output->radiance3d[lu][is][js][ip][ic], linear, 0); CHKERR (status); } } if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) { for (ic = 0; ic < output->mc.alis.Nc; ic++) { status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->fl3d_is_t[lu][is][js][ic], output->wl.nlambda_h, output->wl.lambda_h, output->fl3d_is[lu][is][js][ic], linear, 0); } } if (input.rte.mc.jacobian[DIM_1D]) for (isp = 0; isp < input.n_caoth + 2; isp++) for (i = 0; i < 2; i++) for (lc = 0; lc < output->atm.nlev - 1; lc++) { status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->jacobian_t[lu][is][js][isp][i][lc], output->wl.nlambda_h, output->wl.lambda_h, output->jacobian[lu][is][js][isp][i][lc], linear, 0); CHKERR (status); } if (input.rte.mc.backward.absorption) { status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->absback3d_t[lu][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->absback3d[lu][is][js], linear, 0); CHKERR (status); } /* variances */ if (input.rte.mc.std) { status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->rfldir3d_var_t[lu][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->rfldir3d_var[lu][is][js], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->rfldn3d_var_t[lu][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->rfldn3d_var[lu][is][js], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->flup3d_var_t[lu][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->flup3d_var[lu][is][js], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->uavgso3d_var_t[lu][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->uavgso3d_var[lu][is][js], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->uavgdn3d_var_t[lu][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->uavgdn3d_var[lu][is][js], linear, 0); CHKERR (status); status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->uavgup3d_var_t[lu][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->uavgup3d_var[lu][is][js], linear, 0); CHKERR (status); for (ip = 0; ip < input.rte.mc.nstokes; ip++) { status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->radiance3d_var_t[lu][is][js][ip], output->wl.nlambda_h, output->wl.lambda_h, output->radiance3d_var[lu][is][js][ip], linear, 0); CHKERR (status); } if (input.rte.mc.backward.absorption) { status += arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->absback3d_var_t[lu][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->absback3d_var[lu][is][js], linear, 0); CHKERR (status); } } } } if (input.rte.solver == SOLVER_POLRADTRAN) { for (j = 0; j < input.rte.nphi; j++) { for (iu = 0; iu < input.rte.numu; iu++) { for (is = 0; is < input.rte.polradtran[POLRADTRAN_NSTOKES]; is++) { status = arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->down_rad_t[lu][j][iu][is], output->wl.nlambda_h, output->wl.lambda_h, output->down_rad[lu][j][iu][is], linear, 0); CHKERR (status); status = arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->up_rad_t[lu][j][iu][is], output->wl.nlambda_h, output->wl.lambda_h, output->up_rad[lu][j][iu][is], linear, 0); CHKERR (status); } } } } else { for (iu = 0; iu < input.rte.numu; iu++) { status = arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->u0u_t[lu][iu], output->wl.nlambda_h, output->wl.lambda_h, output->u0u[lu][iu], linear, 0); CHKERR (status); } for (j = 0; j < input.rte.nphi; j++) { for (iu = 0; iu < input.rte.numu; iu++) { status = arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->uu_t[lu][j][iu], output->wl.nlambda_h, output->wl.lambda_h, output->uu[lu][j][iu], linear, 0); CHKERR (status); } } } } if (output->mc.sample.passback3D && input.rte.mc.absorption != MCFORWARD_ABS_NONE) for (ks = 0; ks < output->atm.Nzcld; ks++) if (output->atm.threed[ks]) for (is = 0; is < output->atm.Nxcld; is++) for (js = 0; js < output->atm.Nycld; js++) { status = arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->abs3d_t[ks][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->abs3d[ks][is][js], linear, 0); CHKERR (status); if (input.rte.mc.std) { /* **CK added for forward mc_std */ status = arb_wvn (output->wl.nlambda_t, output->wl.lambda_t, output->abs3d_var_t[ks][is][js], output->wl.nlambda_h, output->wl.lambda_h, output->abs3d_var[ks][is][js], linear, 0); CHKERR (status); } } } return 0; } /**************************************************************/ /* Multiply the transmittance with the extraterrestrial */ /* irradiance. */ /**************************************************************/ int multiply_extraterrestrial (input_struct input, output_struct* output) { int iv = 0, status = 0; double ffactor = 0, rfactor = 0, hfactor = 0; double watt_factor = 1.0; double hfactor2 = 0.0; /* Figure out if we have to convert from mW to W. This should */ /* cover the input stuff that comes with uvspec. Now, if the user */ /* decides to change the input solar flux it may get interesting... */ /* THIS mW to W CONVERTION IS USED FOR HEATING RATES, SHOULD IT BECOME DEFAULT? */ switch (input.ck_scheme) { case CK_FU: case CK_KATO: case CK_KATO2: case CK_KATO2_96: case CK_KATO2ANDWANDJI: case CK_AVHRR_KRATZ: watt_factor = 1.0; break; case CK_LOWTRAN: case CK_FILE: case CK_CRS: case CK_REPTRAN: case CK_REPTRAN_CHANNEL: case CK_RAMAN: switch (input.source) { case SRC_SOLAR: case SRC_BLITZ: /* BCA */ case SRC_LIDAR: /* BCA */ watt_factor = mW2W; break; case SRC_THERMAL: watt_factor = 1.0; break; default: fprintf (stderr, "Error, unknown source %d\n", input.source); return -1; } break; default: fprintf (stderr, "Error: unsupported correlated-k scheme %d\n", input.ck_scheme); return -1; break; } hfactor = watt_factor * s2day; hfactor2 = hfactor; /* multiply with extraterrestrial irradiance */ for (iv = 0; iv < output->wl.nlambda_h; iv++) { switch (input.source) { case SRC_THERMAL: ffactor = output->wl.filter[iv]; rfactor = output->wl.filter[iv]; break; case SRC_SOLAR: case SRC_BLITZ: /* BCA */ case SRC_LIDAR: /* BCA */ switch (input.processing) { case PROCESS_INT: case PROCESS_SUM: case PROCESS_RGB: case PROCESS_RGBNORM: ffactor = output->wl.fbeam[iv] * output->sunshine_fraction * output->wl.filter[iv]; rfactor = output->wl.fbeam[iv] * output->sunshine_fraction * output->wl.filter[iv]; break; case PROCESS_NONE: case PROCESS_RAMAN: switch (input.calibration) { case OUTCAL_ABSOLUTE: ffactor = output->wl.fbeam[iv] * output->sunshine_fraction * output->wl.filter[iv]; rfactor = output->wl.fbeam[iv] * output->sunshine_fraction * output->wl.filter[iv]; break; case OUTCAL_TRANSMITTANCE: ffactor = 1.0 * output->wl.filter[iv]; rfactor = 1.0 * output->wl.filter[iv]; break; case OUTCAL_REFLECTIVITY: ffactor = 1.0 / cos (output->sza_h[iv] * PI / 180.0) * output->wl.filter[iv]; rfactor = PI / cos (output->sza_h[iv] * PI / 180.0) * output->wl.filter[iv]; break; default: fprintf (stderr, "Error, unknown output calibration %d\n", input.calibration); return -1; } break; default: fprintf (stderr, "Error, unknown output processing %d\n", input.processing); return -1; } break; default: fprintf (stderr, "Error, unknown source %d\n", input.source); return -1; } switch (input.source) { case SRC_SOLAR: case SRC_LIDAR: /* BCA */ case SRC_BLITZ: /* BCA */ hfactor = output->wl.fbeam[iv] * output->sunshine_fraction * hfactor2; break; case SRC_THERMAL: break; default: fprintf (stderr, "Error, unknown source %d\n", input.source); return -1; } /*****************************************************************************************/ /* now scale irradiances with ffactor, radiances with rfactor, heating rate with hfactor */ /*****************************************************************************************/ status = scale_output (input, &(output->rfldir), &(output->rfldn), &(output->flup), &(output->albmed), &(output->trnmed), &(output->uavgso), &(output->uavgdn), &(output->uavgup), &(output->uavg), &(output->u0u), &(output->uu), &(output->heat), &(output->emis), &(output->w_zout), &(output->down_flux), &(output->up_flux), &(output->down_rad), &(output->up_rad), &(output->rfldir3d), &(output->rfldn3d), &(output->flup3d), &(output->fl3d_is), &(output->uavgso3d), &(output->uavgdn3d), &(output->uavgup3d), &(output->radiance3d), &(output->jacobian), &(output->absback3d), &(output->rfldir3d_var), &(output->rfldn3d_var), &(output->flup3d_var), &(output->uavgso3d_var), &(output->uavgdn3d_var), &(output->uavgup3d_var), &(output->radiance3d_var), &(output->abs3d_var), &(output->absback3d_var), output->atm.nzout, output->atm.Nxcld, output->atm.Nycld, output->atm.Nzcld, output->mc.alis.Nc, output->atm.nlyr, output->atm.threed, output->mc.sample.passback3D, output->islower, output->isupper, output->jslower, output->jsupper, output->isstep, output->jsstep, &(output->abs3d), output->triangle_results_o, ffactor, rfactor, hfactor, iv); if (status != 0) { fprintf (stderr, "Error %d returned by scale_output()\n", status); return status; } } return 0; } // helper function to convert a caoth3d to consecutive memory and write it to a netcdf file static int dump_caoth3d_var (int ncid, int isp, char* caothname, caoth3d_out_struct caoth3d, char* varname, float*** caoth_data) { const size_t Nx = caoth3d.Nx; const size_t Ny = caoth3d.Ny; const size_t Nz = caoth3d.nthreed; float data[Nz][Nx][Ny]; int retval; int status = 0; if (Nz == 0) return status; // nothing to do char varprefix[FILENAME_MAX]; char ncvarname[FILENAME_MAX]; char xlabel[FILENAME_MAX]; char ylabel[FILENAME_MAX]; char zlabel[FILENAME_MAX]; char isp_char[11]; sprintf (isp_char, "%d", isp); strcpy (varprefix, "caoth3d_"); strcat (varprefix, isp_char); strcat (varprefix, "_"); strcat (varprefix, caothname); strcpy (ncvarname, varprefix); strcat (ncvarname, varname); strcat (strcpy (xlabel, varprefix), "_nlyr"); strcat (strcpy (ylabel, varprefix), "_Nx"); strcat (strcpy (zlabel, varprefix), "_Ny"); size_t kk = 0; for (size_t k = 0; k < caoth3d.nlyr; ++k) { if (caoth3d.threed[k]) { for (size_t i = 0; i < Nx; ++i) { for (size_t j = 0; j < Ny; ++j) { data[kk][i][j] = caoth_data[k][i][j]; } } ++kk; } } if ((retval = write_netcdf_3D_contiguous_float (ncid, (float*)data, Nz, Nx, Ny, ncvarname, xlabel, ylabel, zlabel))) ERR (retval); return status; } /***********************************************************************************/ /* Function: optical_properties @61_30i@ */ /* Description: */ /* Calculates optical depth, single scattering albedo and */ /* phase function from absorption and scattering cross sections */ /* corresponding gas and particulate matter concentrations for */ /* one wavelength. */ /* Parameters: */ /* Return value: */ /* Example: */ /* Files: */ /* Known bugs: */ /* Author: */ /* @i61_30@ */ /***********************************************************************************/ int optical_properties (input_struct input, output_struct* output, double wvl, int ir, int iv1, int iv2, int iq, int verbose, int skip_optical_properties) { static int first = 1; //TODO: Is this good, static int? in function interpolate_profile same variable first int k = 0, lc = 0, ip = 0, ips = 0, iv = 0, iv1r = 0, iv2r = 0, isp = 0, ispo = 0; int status = 0; /* vectors for ALL caoths; 0 is reserved for MOL, 1 for AER */ double *babs_s = NULL, babs_tot = 0.0; double *bsca_s = NULL, bsca_tot = 0.0; double babs_mol_md = 0.0, babs_tot_md = 0.0; double bext_tot_md = 0.0; /* The absorption of gases minus the one specified with a matrix using the dens_file command (for airmass factor calculations). */ double *ssa_s_unsc = NULL, *bsca_s_unsc = NULL, *babs_s_unsc = NULL; double *bsca_s_unsc_int = NULL, *babs_s_unsc_int = NULL; double* gg_s_unsc = NULL; double* babs_s_int = NULL; double* bsca_s_int = NULL; double tau_babs_sum = 0.0; /* ulrike: to add up the dtau's of babsa,... for all layers above the level considered*/ double** mom_s = NULL; double bext_tot = 0.0; double p_mom[6] = {0, 0, 0, 0, 0, 0}; FILE* fpol = NULL; int nphamat = 0; /* Output Extinction; needed for ARLEM */ char extfilename[FILENAME_MAX] = ""; FILE* extfile = NULL; int nlev = output->atm.nlev_common; int nlyr = nlev - 1; int n_caoth = input.n_caoth + 2; /* mol and aer included */ int n_caoth_alloc = 0; int i_wc = 0, i_ic = 0; int i_wcn = 0, i_wck = 0; int i_icn = 0, i_ick = 0; double *mom_s_g1 = NULL, *mom_s_g2 = NULL; double rayleigh_mom2 = 0; double Delta = 0, Deltap = 0; double rayleigh_depol = 0.0, momk = 0.0; double *ssa_s = NULL, *dscale_s = NULL, *f_s = NULL, *ff_s = NULL, *g1_s = NULL, *g2_s = NULL, *dtau_s = NULL, *mom0_s = NULL; double wvl1 = 0, wvl2 = 0; int* tmp_ntheta_in = 0; float ** tmp_theta_in = NULL, *tmp_theta_new = NULL; double **tmp_mu_in = NULL, *tmp_mu_new = NULL; float ** tmp_phase_in = NULL, *tmp_phase_new = NULL; int n_in = 0, n_tot = 0, i = 0, ntheta_new = 0; double* interp_optprop_weight = NULL; int* phase_calloced = NULL; double one = 1.0; /* phase matrix element indices for polradtran */ int ip_simp[6] = {0, 0, 0, 0, 0, 0}; int ip_full[6] = {0, 1, 2, 3, 4, 5}; int ip_red[6] = {0, 1, 2, 3, 0, 2}; int** ip_act = NULL; n_caoth_alloc = n_caoth; i_wc = input.i_wc + 2; i_ic = input.i_ic + 2; /* in case either wc or ic is not existent, allocate a dummy */ /* caoth, which is zero, and can be used for verbose output */ if (i_ic == 1 || i_wc == 1) n_caoth_alloc++; /* set index of wc/ic to dummy caoth */ if (i_wc == 1) i_wc = n_caoth_alloc - 1; if (i_ic == 1) i_ic = n_caoth_alloc - 1; /* allocate caoth arrays */ babs_s = calloc (n_caoth_alloc, sizeof (double)); bsca_s = calloc (n_caoth_alloc, sizeof (double)); ssa_s = calloc (n_caoth_alloc, sizeof (double)); babs_s_int = calloc (n_caoth_alloc, sizeof (double)); bsca_s_int = calloc (n_caoth_alloc, sizeof (double)); mom_s = calloc (n_caoth_alloc, sizeof (double*)); mom_s_g1 = calloc (n_caoth_alloc, sizeof (double)); mom_s_g2 = calloc (n_caoth_alloc, sizeof (double)); dscale_s = calloc (n_caoth_alloc, sizeof (double)); f_s = calloc (n_caoth_alloc, sizeof (double)); ff_s = calloc (n_caoth_alloc, sizeof (double)); g1_s = calloc (n_caoth_alloc, sizeof (double)); g2_s = calloc (n_caoth_alloc, sizeof (double)); dtau_s = calloc (n_caoth_alloc, sizeof (double)); mom0_s = calloc (n_caoth_alloc, sizeof (double)); babs_s_unsc = calloc (n_caoth_alloc, sizeof (double)); bsca_s_unsc = calloc (n_caoth_alloc, sizeof (double)); ssa_s_unsc = calloc (n_caoth_alloc, sizeof (double)); babs_s_unsc_int = calloc (n_caoth_alloc, sizeof (double)); bsca_s_unsc_int = calloc (n_caoth_alloc, sizeof (double)); gg_s_unsc = calloc (n_caoth_alloc, sizeof (double)); ip_act = calloc (n_caoth_alloc, sizeof (int*)); /* */ if (input.rte.polradtran[POLRADTRAN_NSTOKES] == 1) nphamat = 1; else if (input.rte.polradtran[POLRADTRAN_NSTOKES] > 1 && input.rte.polradtran[POLRADTRAN_NSTOKES] < 5) nphamat = 6; else fprintf (stderr, "Input variable pol_nstokes is wrong! \n"); output->nphamat = nphamat; if (nphamat == 6) { /* phase matrix element indices for last two elements in case of spherical symmetric particles */ if (output->aer.optprop.nphamat == 4) ip_act[CAOTH_AER] = ip_red; else ip_act[CAOTH_AER] = ip_full; for (isp = CAOTH_FIR; isp < n_caoth; isp++) { ispo = isp - CAOTH_FIR; /* phase matrix element indices for last two elements in case of spherical symmetric particles */ if (output->caoth[ispo].optprop.nphamat == 4) ip_act[isp] = ip_red; else ip_act[isp] = ip_full; } } /* end if nphamat == 6 */ else for (isp = CAOTH_AER; isp < n_caoth; isp++) ip_act[isp] = ip_simp; for (isp = 0; isp < n_caoth; isp++) mom_s[isp] = calloc (nphamat, sizeof (double)); iv = iv1; /* iv needed both for Raman and not Raman when calculating phase function moments */ if (input.raman) { iv1r = iv1 + output->wl.nlambda_rte_lower; iv2r = iv2 + output->wl.nlambda_rte_lower; } if ((input.raman && !input.raman_fast) || (input.raman_fast && ir == 1)) { wvl1 = output->wl.lambda_r[iv1]; wvl2 = output->wl.lambda_r[iv2]; rayleigh_depol = linpol (wvl1, wvl2, output->rayleigh_depol[iv1r], output->rayleigh_depol[iv2r], wvl); } else { rayleigh_depol = output->rayleigh_depol[iv]; } /* 2nd moment for Rayleigh scattering, including depolarization */ rayleigh_mom2 = 0.2 * (1.0 - rayleigh_depol) / (2.0 + rayleigh_depol); /* Deltas in Eq. 2.16, Hansen and Travis, Space Science Rev., 16, 527-610, 1974.*/ Delta = (1.0 - rayleigh_depol) / (1.0 + 0.5 * rayleigh_depol); Deltap = (1.0 + 2.0 * rayleigh_depol) / (1.0 - rayleigh_depol); if (verbose) { fprintf (stderr, "... second moment for Rayleigh scattering: %f\n", rayleigh_mom2); fprintf (stderr, "*** optical_properties()\n"); if (output->cf.nlev == 0 || input.rte.solver == SOLVER_TWOMAXRND || input.rte.solver == SOLVER_TWOMAXRND3C || input.rte.solver == SOLVER_DYNAMIC_TWOSTREAM || input.rte.solver == SOLVER_DYNAMIC_TENSTREAM) { fprintf (stderr, " ------------------------------------------------------------------------------------------------------------------" "------------------------------------------------\n"); fprintf (stderr, " lc | z[km] | Rayleigh | Aerosol | Water cloud | Ice cloud " " | Molecular \n"); fprintf (stderr, " | | dtau | scatter. abs. asy. | scatter. abs. asy. | scatter. " "abs. asy. ff g1 g2 f | absorption \n"); fprintf (stderr, " ------------------------------------------------------------------------------------------------------------------" "------------------------------------------------\n"); } else { fprintf (stderr, " ------------------------------------------------------------------------------------------------------------------" "---------------------------\n"); fprintf (stderr, " lc | z[km] | Rayleigh | Aerosol | effective cloud (water and ice) " " | Molecular | Cloud \n"); fprintf (stderr, " | | dtau | scatter. abs. asy. | scatter. abs. asy. ff g1 g2 f " " | absorption | fraction\n"); fprintf (stderr, " ------------------------------------------------------------------------------------------------------------------" "---------------------------\n"); } } if (first != 0) { first = 0; /* allocate memory for profiles */ output->dtauc = (float*)calloc (nlyr, sizeof (float)); output->dtauc_md = (float*)calloc (nlyr, sizeof (float)); output->ssalb = (float*)calloc (nlyr, sizeof (float)); output->dtauc_clr = (float*)calloc (nlyr, sizeof (float)); output->ssalb_clr = (float*)calloc (nlyr, sizeof (float)); output->dtauc_cldk = (float*)calloc (nlyr, sizeof (float)); output->ssalb_cldk = (float*)calloc (nlyr, sizeof (float)); output->dtauc_cldn = (float*)calloc (nlyr, sizeof (float)); output->ssalb_cldn = (float*)calloc (nlyr, sizeof (float)); if (input.tipa == TIPA_DIR) output->tausol = (float*)calloc (nlyr, sizeof (float)); /* ulrike: for tipa dir */ if (input.raman) { //TODO: Delete this, never used output->ssalbR = (float*)calloc (nlyr, sizeof (float)); output->ssalbRL = (float*)calloc (nlyr, sizeof (float)); } if (input.rte.solver == SOLVER_MONTECARLO) { output->mc.z = (float*)calloc (nlev, sizeof (float)); if (!input.atmosphere3d) ASCII_calloc_float_3D (&output->mc.temper, 1, 1, nlyr + 1); /* CE: temper is defined on levels, so cahnged nlyr -> nlyr+1 ??*/ /* alternative: output->mc.caoth = calloc (n_caoth, sizeof(mc_caoth_struct)); */ output->mc.dt = (float**)calloc (n_caoth, sizeof (float*)); output->mc.om = (float**)calloc (n_caoth, sizeof (float*)); output->mc.g1 = (float**)calloc (n_caoth, sizeof (float*)); output->mc.g2 = (float**)calloc (n_caoth, sizeof (float*)); output->mc.ff = (float**)calloc (n_caoth, sizeof (float*)); output->mc.ds = (float**)calloc (n_caoth, sizeof (float*)); output->mc.re = (float**)calloc (n_caoth, sizeof (float*)); for (isp = 0; isp < n_caoth; isp++) { output->mc.dt[isp] = (float*)calloc (nlev, sizeof (float)); output->mc.om[isp] = (float*)calloc (nlev, sizeof (float)); output->mc.g1[isp] = (float*)calloc (nlev, sizeof (float)); output->mc.g2[isp] = (float*)calloc (nlev, sizeof (float)); output->mc.ff[isp] = (float*)calloc (nlev, sizeof (float)); output->mc.ds[isp] = (float*)calloc (nlev, sizeof (float)); output->mc.re[isp] = (float*)calloc (nlev, sizeof (float)); } output->mc.refind = (float*)calloc (nlev, sizeof (float)); /* SBCA clean this later */ output->mc.nmomaer = (int*)calloc (nlev, sizeof (int)); output->mc.momaer = (float***)calloc (nlev, sizeof (float**)); //TODO: size: nlyr , nphamat, ntheta, Speicherzugriffsfehler, wenn nlev output->mc.nthetaaer = calloc (nlev, sizeof (int*)); output->mc.thetaaer = calloc (nlev, sizeof (float**)); output->mc.muaer = calloc (nlev, sizeof (double**)); output->mc.phaseaer = calloc (nlev, sizeof (float**)); for (isp = 0; isp < n_caoth; isp++) { for (lc = 0; lc < nlev; lc++) { output->mc.ff[isp][lc] = 1.0; } } } /* output->atm.nmom+1 stores the maximum number of moments */ /* for all wavelengths and layers, minus 1 */ status = ASCII_calloc_float_3D (&output->pmom, nlyr, nphamat, output->atm.nmom + 1); if (status != 0) { fprintf (stderr, "Error %d allocating memory for output->pmom\n", status); return status; } output->pmom01_clr = calloc (nlyr, sizeof (float)); output->pmom01_cldk = calloc (nlyr, sizeof (float)); output->pmom01_cldn = calloc (nlyr, sizeof (float)); output->ntheta = calloc (nlyr, sizeof (int*)); output->theta = calloc (nlyr, sizeof (float**)); output->mu = calloc (nlyr, sizeof (double**)); output->phase = calloc (nlyr, sizeof (float**)); } /* end if first */ else { if (!skip_optical_properties) { /* need to free mu theta phase */ for (lc = 0; lc < nlyr; lc++) { if (output->ntheta[lc] != NULL) { for (ip = 0; ip < nphamat; ip++) { if (output->theta[lc][ip] != NULL) free (output->theta[lc][ip]); if (output->mu[lc][ip] != NULL) free (output->mu[lc][ip]); if (output->phase[lc][ip] != NULL) free (output->phase[lc][ip]); } free (output->ntheta[lc]); free (output->theta[lc]); free (output->mu[lc]); free (output->phase[lc]); } } } } if (!skip_optical_properties) { /* identify indices of "wcn" and "wck" CAOTHs (thiN and thicK clouds) for twomaxrnd3c */ if (input.rte.solver == SOLVER_TWOMAXRND3C) { i_wcn = -1; i_wck = -1; i_icn = -1; i_ick = -1; for (isp = 0; isp < input.n_caoth; isp++) { if (strcasecmp (output->caoth[isp].name, "wcn") == 0) i_wcn = isp; if (strcasecmp (output->caoth[isp].name, "wck") == 0) i_wck = isp; if (strcasecmp (output->caoth[isp].name, "ick") == 0) i_ick = isp; if (strcasecmp (output->caoth[isp].name, "icn") == 0) i_icn = isp; } if (i_wcn < 0 || i_wck < 0) { fprintf (stderr, "Error. Please define wck and wcn profiles for solver twomaxrnd3c!\n"); return -1; } if ((i_icn >= 0 && i_ick < 0) || (i_ick >= 0 && i_icn < 0)) { fprintf (stderr, "Error, please define both thin and thick ice clouds, icn and ick!\n"); return -1; } if (!input.quiet) if (i_icn >= 0 && i_ick >= 0) fprintf (stderr, " ... ice clouds for twomaxrnd3c found!\n"); /* add offset CAOTH_FIR because elements 0 and 1 are reserved for MOL and AER */ i_wcn += CAOTH_FIR; i_wck += CAOTH_FIR; if (i_icn > 0) i_icn += CAOTH_FIR; if (i_ick > 0) i_ick += CAOTH_FIR; if (input.verbose) fprintf (stderr, " ... twomaxrnd3c(): i_wcn=%d, i_wck=%d\n", i_wcn, i_wck); if (!input.quiet) if (i_icn > 0 && i_ick > 0) fprintf (stderr, " ... twomaxrnd3c(): i_icn=%d, i_ick=%d\n", i_icn, i_ick); } for (isp = 0; isp < n_caoth; isp++) { babs_s_int[isp] = 0.0; bsca_s_int[isp] = 0.0; } if (input.write_ext_to_file) { strcpy (extfilename, input.rte.mc.filename[FN_MC_BASENAME]); strcat (extfilename, ".ext_r"); if ((extfile = fopen (extfilename, "w")) == NULL) return -1; } for (lc = 0; lc < nlyr; lc++) { switch (input.rte.solver) { case SOLVER_SSSI: /* special treatment for SOLVER_SSSI: caoth single scattering albedo is */ /* set to 0 because caoth scattering is treated explicitely */ /* through the tabulated caoth reflectivity; caoth then only */ /* reduce radiance through Lambert-Beer */ for (isp = CAOTH_FIR; isp < n_caoth; isp++) { ispo = isp - CAOTH_FIR; output->caoth[ispo].optprop.ssa[iv][lc] = 0; } break; default: break; } /* only in the Fu and Liou case, the Rayleigh scattering */ /* cross section depends on the subband */ switch (input.ck_scheme) { case CK_FU: bsca_s[CAOTH_MOL] = output->atm.optprop.tau_rayleigh_r[0][0][lc][iv][iq]; break; case CK_KATO: case CK_KATO2: case CK_KATO2_96: case CK_KATO2ANDWANDJI: case CK_AVHRR_KRATZ: case CK_FILE: case CK_LOWTRAN: case CK_CRS: case CK_REPTRAN: case CK_REPTRAN_CHANNEL: bsca_s[CAOTH_MOL] = output->atm.optprop.tau_rayleigh_r[0][0][lc][iv][0]; break; case CK_RAMAN: if ((input.raman && !input.raman_fast) || (input.raman_fast && ir == 1)) bsca_s[CAOTH_MOL] = linpol (wvl1, wvl2, output->atm.optprop.tau_rayleigh_r[0][0][lc][iv1r][0], output->atm.optprop.tau_rayleigh_r[0][0][lc][iv2r][0], wvl); else bsca_s[CAOTH_MOL] = output->atm.optprop.tau_rayleigh_r[0][0][lc][iq][0]; break; default: fprintf (stderr, "Error: unsupported correlated-k scheme %d\n", input.ck_scheme); return -1; break; } switch (output->atm.molabs) { case MOLABS_CALC: case MOLABS_LOOKUP: if ((input.raman && !input.raman_fast) || (input.raman_fast && ir == 1)) babs_s[CAOTH_MOL] = linpol (wvl1, wvl2, output->atm.optprop.tau_molabs_r[0][0][lc][iv1r][0], output->atm.optprop.tau_molabs_r[0][0][lc][iv2r][0], wvl); else if (input.raman_fast && ir == 0) babs_s[CAOTH_MOL] = output->atm.optprop.tau_molabs_r[0][0][lc][iv][0]; else babs_s[CAOTH_MOL] = output->atm.optprop.tau_molabs_r[0][0][lc][iv][iq]; if (input.rte.solver == SOLVER_SDISORT) babs_mol_md = output->atm.optprop.tau_molabs_md_r[0][0][lc][iv][iq]; else babs_mol_md = 0.0; break; case MOLABS_FILE_MONO: if ((input.raman && !input.raman_fast) || (input.raman_fast && ir == 1)) babs_s[CAOTH_MOL] = linpol (wvl1, wvl2, output->atm.optprop.tau_molabs_r[0][0][lc][iv1r][0], output->atm.optprop.tau_molabs_r[0][0][lc][iv2r][0], wvl); else if (input.raman_fast && ir == 0) babs_s[CAOTH_MOL] = output->atm.optprop.tau_molabs_r[0][0][lc][iv][0]; else /* babs_s[CAOTH_MOL] = output->atm.optprop.tau_molabs_user[lc]; */ babs_s[CAOTH_MOL] = output->atm.optprop.tau_molabs_r[0][0][lc][iv][iq]; babs_mol_md = 0; break; case MOLABS_FILE_SPEC: if ((input.raman && !input.raman_fast) || (input.raman_fast && ir == 1)) babs_s[CAOTH_MOL] = linpol (wvl1, wvl2, output->atm.optprop.tau_molabs_r[0][0][lc][iv1r][0], output->atm.optprop.tau_molabs_r[0][0][lc][iv2r][0], wvl); else if (input.raman_fast && ir == 0) babs_s[CAOTH_MOL] = output->atm.optprop.tau_molabs_r[0][0][lc][iq][0]; else babs_s[CAOTH_MOL] = output->atm.optprop.tau_molabs_r[0][0][lc][iv][iq]; babs_mol_md = 0; break; case MOLABS_NONE: babs_s[CAOTH_MOL] = 0; babs_mol_md = 0; break; default: fprintf (stderr, "Error, unknown molecular absorption option %d\n", output->atm.molabs); return -1; } if ((input.raman && !input.raman_fast) || (input.raman_fast && ir == 1)) { ssa_s[CAOTH_AER] = linpol (wvl1, wvl2, output->aer.optprop.ssa[iv1r][lc], output->aer.optprop.ssa[iv2r][lc], wvl); f_s[CAOTH_AER] = linpol (wvl1, wvl2, output->aer.optprop.f[iv1r][lc], output->aer.optprop.f[iv2r][lc], wvl); ff_s[CAOTH_AER] = linpol (wvl1, wvl2, output->aer.optprop.ff[iv1r][lc], output->aer.optprop.ff[iv2r][lc], wvl); g1_s[CAOTH_AER] = linpol (wvl1, wvl2, output->aer.optprop.g1[iv1r][lc], output->aer.optprop.g1[iv2r][lc], wvl); g2_s[CAOTH_AER] = linpol (wvl1, wvl2, output->aer.optprop.g2[iv1r][lc], output->aer.optprop.g2[iv2r][lc], wvl); dtau_s[CAOTH_AER] = linpol (wvl1, wvl2, output->aer.optprop.dtau[iv1r][lc], output->aer.optprop.dtau[iv2r][lc], wvl); for (isp = CAOTH_FIR; isp < n_caoth; isp++) { ispo = isp - CAOTH_FIR; ssa_s[isp] = linpol (wvl1, wvl2, output->caoth[ispo].optprop.ssa[iv1r][lc], output->caoth[ispo].optprop.ssa[iv2r][lc], wvl); f_s[isp] = linpol (wvl1, wvl2, output->caoth[ispo].optprop.f[iv1r][lc], output->caoth[ispo].optprop.f[iv2r][lc], wvl); ff_s[isp] = linpol (wvl1, wvl2, output->caoth[ispo].optprop.ff[iv1r][lc], output->caoth[ispo].optprop.ff[iv2r][lc], wvl); g1_s[isp] = linpol (wvl1, wvl2, output->caoth[ispo].optprop.g1[iv1r][lc], output->caoth[ispo].optprop.g1[iv2r][lc], wvl); g2_s[isp] = linpol (wvl1, wvl2, output->caoth[ispo].optprop.g2[iv1r][lc], output->caoth[ispo].optprop.g2[iv2r][lc], wvl); dtau_s[isp] = linpol (wvl1, wvl2, output->caoth[ispo].optprop.dtau[iv1r][lc], output->caoth[ispo].optprop.dtau[iv2r][lc], wvl); dscale_s[isp] = linpol (wvl1, wvl2, output->caoth[ispo].optprop.dscale[iv1r][lc], output->caoth[ispo].optprop.dscale[iv2r][lc], wvl); } //20120816ak refind is not used, commented // refind = linpol( wvl1, wvl2, output->atm.microphys.refind[iv1r][lc], // output->atm.microphys.refind[iv2r][lc], wvl); } else { ssa_s[CAOTH_AER] = output->aer.optprop.ssa[iv][lc]; f_s[CAOTH_AER] = output->aer.optprop.f[iv][lc]; ff_s[CAOTH_AER] = output->aer.optprop.ff[iv][lc]; g1_s[CAOTH_AER] = output->aer.optprop.g1[iv][lc]; g2_s[CAOTH_AER] = output->aer.optprop.g2[iv][lc]; dtau_s[CAOTH_AER] = output->aer.optprop.dtau[iv][lc]; for (isp = CAOTH_FIR; isp < n_caoth; isp++) { ispo = isp - CAOTH_FIR; if (output->caoth[ispo].optprop.ssa != NULL) { ssa_s[isp] = output->caoth[ispo].optprop.ssa[iv][lc]; f_s[isp] = output->caoth[ispo].optprop.f[iv][lc]; ff_s[isp] = output->caoth[ispo].optprop.ff[iv][lc]; g1_s[isp] = output->caoth[ispo].optprop.g1[iv][lc]; g2_s[isp] = output->caoth[ispo].optprop.g2[iv][lc]; dtau_s[isp] = output->caoth[ispo].optprop.dtau[iv][lc]; dscale_s[isp] = output->caoth[ispo].optprop.dscale[iv][lc]; /* ?????????? */ } } //20120816ak refind is not used, commented // refind = output->atm.microphys.refind[iv][lc]; } for (isp = CAOTH_FIR; isp < n_caoth; isp++) ssa_s_unsc[isp] = ssa_s[isp] / (1.0 + f_s[isp] * (ssa_s[isp] - 1.0)); /* absorption by aerosols and caoths */ if (input.absorption) { for (isp = CAOTH_AER; isp < n_caoth; isp++) babs_s[isp] = (1.0 - ssa_s[isp]) * dtau_s[isp]; for (isp = CAOTH_FIR; isp < n_caoth; isp++) babs_s_unsc[isp] = (1.0 - ssa_s_unsc[isp]) * dtau_s[isp] / (1.0 - ssa_s_unsc[isp] * f_s[isp]); babs_tot = 0.0; for (isp = 0; isp < n_caoth; isp++) babs_tot += babs_s[isp]; babs_tot_md = babs_tot - babs_s[CAOTH_MOL] + babs_mol_md; if (babs_tot > FLT_MAX) babs_tot = FLT_MAX; if (babs_tot_md > FLT_MAX) babs_tot_md = FLT_MAX; } else { for (isp = 0; isp < n_caoth; isp++) babs_s[isp] = 0.0; babs_tot = 0.0; babs_tot_md = 0.0; babs_mol_md = 0.0; } /* scattering by non-molecules */ for (isp = CAOTH_AER; isp < n_caoth; isp++) bsca_s[isp] = ssa_s[isp] * dtau_s[isp]; for (isp = CAOTH_FIR; isp < n_caoth; isp++) bsca_s_unsc[isp] = ssa_s_unsc[isp] * dtau_s[isp] / (1.0 - ssa_s_unsc[isp] * f_s[isp]); /* switch scattering off, if user wants so */ if (input.aer.no_scattering) { bsca_s[CAOTH_AER] = 0.0; bsca_s_unsc[CAOTH_AER] = 0.0; ssa_s[CAOTH_AER] = 0.0; } for (isp = CAOTH_FIR; isp < n_caoth; isp++) { ispo = isp - CAOTH_FIR; if (input.caoth[ispo].no_scattering) { bsca_s[isp] = 0.0; bsca_s_unsc[isp] = 0.0; ssa_s[isp] = 0.0; } } /* Scattering coefficient */ if (input.scattering) { bsca_tot = 0.0; for (isp = 0; isp < n_caoth; isp++) bsca_tot += bsca_s[isp]; if (bsca_tot > FLT_MAX) bsca_tot = FLT_MAX; if (bsca_tot < FLT_MIN) bsca_tot = FLT_MIN; } else { for (isp = 0; isp < n_caoth; isp++) { bsca_s[isp] = 0.0; bsca_s_unsc[isp] = 0.0; ssa_s[isp] = 0.0; } bsca_tot = FLT_MIN; } /* Extinction coefficient */ bext_tot = babs_tot + bsca_tot; if (bext_tot > FLT_MAX) bext_tot = FLT_MAX; if (bext_tot < FLT_MIN) bext_tot = FLT_MIN; bext_tot_md = babs_tot_md + bsca_tot; if (bext_tot_md > FLT_MAX) bext_tot_md = FLT_MAX; if (bext_tot_md < FLT_MIN) bext_tot_md = FLT_MIN; for (isp = CAOTH_FIR; isp < n_caoth; isp++) gg_s_unsc[isp] = g1_s[isp] * (1.0 - f_s[isp]) + f_s[isp]; if (input.write_ext_to_file) { if (verbose && lc == 0) printf ("...saving Extinction Data\n"); /* ??? do we need this here: */ /* if (output->cf.nlev == 0) { */ /* fprintf (extfile, "%5d %8.4f %9.6f %9.6f %9.6f %5.3f %11.6f %11.6f %5.3f %11.6f %11.6f %5.3f %5.3f %5.3f %6.3f %5.3f %11.6f\n", */ fprintf (extfile, "%5d %8.4f %11.6e %11.6e %11.6e %5.3f %11.6e %11.6e %5.3f %11.6e %11.6e %5.3f %5.3f %5.3f %6.3f %5.3f %11.6e\n", lc, output->atm.zd[lc + 1] + output->alt.altitude, /* Rayleigh */ bsca_s[CAOTH_MOL], /* Aerosol, averaging of delta scaling factors for mixture of aerosol types not yet implemented !!!*/ bsca_s[CAOTH_AER], babs_s[CAOTH_AER], g1_s[CAOTH_AER] * ff_s[CAOTH_AER] + (1.0 - ff_s[CAOTH_AER]) * g2_s[CAOTH_AER], /* Water cloud */ bsca_s_unsc[i_wc], babs_s_unsc[i_wc], gg_s_unsc[i_wc] * ff_s[i_wc] + (1.0 - ff_s[i_wc]) * g2_s[i_wc], /* ice cloud */ bsca_s_unsc[i_ic], babs_s_unsc[i_ic], gg_s_unsc[i_ic] * ff_s[i_ic] + (1.0 - ff_s[i_ic]) * g2_s[i_ic], ff_s[i_ic], gg_s_unsc[i_ic], g2_s[i_ic], f_s[i_ic], /* Molecules */ babs_s[CAOTH_MOL]); } /* CE: Modified verbose output. Un-deltascaled optical properties are printed.*/ if (verbose) { if (output->cf.nlev == 0 || input.rte.solver == SOLVER_TWOMAXRND || input.rte.solver == SOLVER_TWOMAXRND3C || input.rte.solver == SOLVER_DYNAMIC_TWOSTREAM || input.rte.solver == SOLVER_DYNAMIC_TENSTREAM) { fprintf (stderr, "%5d | %8.4f | %9.6e | %9.6f %9.6f %5.3f | %11.6f %11.6f %5.3f | %11.6f %11.6f %5.3f %5.3f %5.3f %6.3f %5.3f | " "%11.6e\n", lc, output->atm.zd[lc + 1] + output->alt.altitude, /* Rayleigh */ bsca_s[CAOTH_MOL], /* Aerosol, averaging of delta scaling factors for mixture of aerosol types not yet implemented !!!*/ bsca_s[CAOTH_AER], babs_s[CAOTH_AER], g1_s[CAOTH_AER] * ff_s[CAOTH_AER] + (1.0 - ff_s[CAOTH_AER]) * g2_s[CAOTH_AER], /* Water cloud */ bsca_s_unsc[i_wc], babs_s_unsc[i_wc], gg_s_unsc[i_wc] * ff_s[i_wc] + (1.0 - ff_s[i_wc]) * g2_s[i_wc], /* ice cloud */ bsca_s_unsc[i_ic], babs_s_unsc[i_ic], gg_s_unsc[i_ic] * ff_s[i_ic] + (1.0 - ff_s[i_ic]) * g2_s[i_ic], ff_s[i_ic], gg_s_unsc[i_ic], g2_s[i_ic], f_s[i_ic], /* Molecules */ babs_s[CAOTH_MOL]); } else { fprintf (stderr, "%5d | %8.4f | %9.6e | %9.6f %9.6f %5.3f | %11.6f %11.6f %5.3f %5.3f %5.3f %6.3f %5.3f | %11.6e | %.3f\n", lc, output->atm.zd[lc + 1] + output->alt.altitude, /* Rayleigh */ bsca_s[CAOTH_MOL], /* Aerosol, averaging of delta scaling factors for mixture of aerosol types not yet implemented !!!*/ bsca_s[CAOTH_AER], babs_s[CAOTH_AER], g1_s[CAOTH_AER] * ff_s[CAOTH_AER] + (1.0 - ff_s[CAOTH_AER]) * g2_s[CAOTH_AER], /* effective cloud, both */ bsca_s_unsc[i_wc], babs_s_unsc[i_wc], gg_s_unsc[i_wc] * ff_s[i_wc] + (1.0 - ff_s[i_wc]) * g2_s[i_wc], ff_s[i_wc], gg_s_unsc[i_ic], g2_s[i_wc], f_s[i_ic], /* question to CE: does this make sense??? mixing wc and ic properties! */ /* Molecules, Cloudfraction */ babs_s[CAOTH_MOL], output->cf.cf[lc]); } } /* open polradtran input file and write extinction and scattering coefficients; */ /* Legendre moments of the scattering phase function is added later - */ /* therefore don't close yet! */ if (input.rte.solver == SOLVER_POLRADTRAN) { if ((fpol = fopen (&output->atm.pol_scat_files[lc * 64], "w")) == NULL) return 1; fprintf (fpol, "%e\n", bext_tot); fprintf (fpol, "%e\n", bsca_tot); fprintf (fpol, "%e\n", bsca_tot / bext_tot); fprintf (fpol, "%d\n", input.rte.nstr); } for (isp = 0; isp < n_caoth; isp++) { mom_s_g1[isp] = bsca_s[isp]; mom_s_g2[isp] = bsca_s[isp]; } output->pmom[lc][0][0] = 1.0; /* zero'th moment of of the scattering matrix */ if (input.rte.solver == SOLVER_POLRADTRAN) { /* Initialization */ p_mom[0] = 1.0; for (ip = 1; ip < 6; ip++) p_mom[ip] = 0.0; if (nphamat == 6) { if (output->aer.optprop.nmom[iv][lc] > 0) { /* include polarization by aerosols */ for (ip = 1; ip < 6; ip++) p_mom[ip] = mom_s_g1[CAOTH_AER] / bsca_tot * output->aer.optprop.moment[iv][lc][ip_act[CAOTH_AER][ip]][0]; } for (isp = CAOTH_FIR; isp < n_caoth; isp++) { ispo = isp - CAOTH_FIR; if (output->caoth[ispo].optprop.nmom[iv][lc] > 0) { /* include polarization by caoth */ for (ip = 1; ip < 6; ip++) p_mom[ip] += mom_s_g1[isp] / bsca_tot * output->caoth[ispo].optprop.moment[iv][lc][ip_act[isp][ip]][0]; } } /* Rayleigh depolarization */ if (output->atm.rayleigh) { /* if statement should not be necessary */ p_mom[1] += mom_s_g1[CAOTH_MOL] / bsca_tot * -0.5 * Delta; p_mom[4] += mom_s_g1[CAOTH_MOL] / bsca_tot * Delta; } } /* end if nphamat == 6 */ fprintf (fpol, "%d", 0); for (ip = 0; ip < 6; ip++) fprintf (fpol, " %f", p_mom[ip]); fprintf (fpol, "\n"); } /* check that raman interpolation can work */ if ((input.raman && !input.raman_fast) || (input.raman_fast && ir == 1)) { if (output->aer.optprop.nmom[iv1r][lc] != output->aer.optprop.nmom[iv2r][lc]) { status = -1; fprintf (stderr, "Number of aerosol moments must be equal for all wavelengths\n"); fprintf (stderr, "when Raman scattering is included.\n"); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return status; } for (isp = CAOTH_FIR; isp < n_caoth; isp++) { ispo = isp - CAOTH_FIR; if (output->caoth[ispo].optprop.nmom[iv1][lc] != output->caoth[ispo].optprop.nmom[iv2][lc]) { status = -1; fprintf (stderr, "Number of %s moments must be equal for all wavelengths\n", output->caoth[ispo].fullname); fprintf (stderr, "when Raman scattering is included.\n"); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return status; } } } /* zeroth moment needed for normalization */ if (output->aer.optprop.nmom[iv][lc] > 0) { if ((input.raman && !input.raman_fast) || (input.raman_fast && ir == 1)) mom0_s[CAOTH_AER] = linpol (wvl1, wvl2, output->aer.optprop.moment[iv1][lc][0][0], output->aer.optprop.moment[iv2][lc][0][0], wvl); else mom0_s[CAOTH_AER] = output->aer.optprop.moment[iv][lc][0][0]; } for (isp = CAOTH_FIR; isp < n_caoth; isp++) { ispo = isp - CAOTH_FIR; if (output->caoth[ispo].optprop.nmom != NULL) { /* rather ask if !montecarlo, or if mom0_s needed, SBCA */ if (output->caoth[ispo].optprop.nmom[iv][lc] > 0) { if ((input.raman && !input.raman_fast) || (input.raman_fast && ir == 1)) mom0_s[isp] = linpol (wvl1, wvl2, output->caoth[ispo].optprop.moment[iv1][lc][0][0], output->caoth[ispo].optprop.moment[iv2][lc][0][0], wvl); else mom0_s[isp] = output->caoth[ispo].optprop.moment[iv][lc][0][0]; } } } for (k = 1; k <= output->atm.nmom; k++) { /* aerosol */ if (output->aer.optprop.nmom[iv][lc] > 0) { if (k < output->aer.optprop.nmom[iv][lc]) { if ((input.raman && !input.raman_fast) || (input.raman_fast && ir == 1)) momk = linpol (wvl1, wvl2, output->aer.optprop.moment[iv1r][lc][0][k], output->aer.optprop.moment[iv2r][lc][0][k], wvl); else momk = output->aer.optprop.moment[iv][lc][0][k]; mom_s[CAOTH_AER][0] = bsca_s[CAOTH_AER] * momk / mom0_s[CAOTH_AER]; if (input.rte.solver == SOLVER_POLRADTRAN) { for (ip = 1; ip < nphamat; ip++) mom_s[CAOTH_AER][ip] = bsca_s[CAOTH_AER] * output->aer.optprop.moment[iv][lc][ip_act[CAOTH_AER][ip]][k]; } } else { mom_s[CAOTH_AER][0] = 0; if (input.rte.solver == SOLVER_POLRADTRAN) for (ip = 0; ip < nphamat; ip++) mom_s[CAOTH_AER][ip] = 0; } } else { /* double Henyey-Greenstein */ mom_s_g1[CAOTH_AER] *= g1_s[CAOTH_AER]; mom_s_g2[CAOTH_AER] *= g2_s[CAOTH_AER]; mom_s[CAOTH_AER][0] = (mom_s_g1[CAOTH_AER] * ff_s[CAOTH_AER] + (1.0 - ff_s[CAOTH_AER]) * mom_s_g2[CAOTH_AER]); } /* caoth */ for (isp = CAOTH_FIR; isp < n_caoth; isp++) { ispo = isp - CAOTH_FIR; if (output->caoth[ispo].optprop.nmom != NULL) { /* dito, SBCA */ if (output->caoth[ispo].optprop.nmom[iv][lc] > 0) { if (k < output->caoth[ispo].optprop.nmom[iv][lc]) { if ((input.raman && !input.raman_fast) || (input.raman_fast && ir == 1)) momk = linpol (wvl1, wvl2, output->caoth[ispo].optprop.moment[iv1r][lc][0][k], output->caoth[ispo].optprop.moment[iv2r][lc][0][k], wvl); else momk = output->caoth[ispo].optprop.moment[iv][lc][0][k]; mom_s[isp][0] = bsca_s[isp] * momk / mom0_s[isp]; if (input.rte.solver == SOLVER_POLRADTRAN) { for (ip = 1; ip < output->caoth[ispo].optprop.nphamat; ip++) mom_s[isp][ip] = bsca_s[isp] * output->caoth[ispo].optprop.moment[iv][lc][ip_act[isp][ip]][k]; } } else { mom_s[isp][0] = 0; if (input.rte.solver == SOLVER_POLRADTRAN) for (ip = 1; ip < nphamat; ip++) mom_s[isp][ip] = 0; } } else { /* double Henyey-Greenstein */ mom_s_g1[isp] *= g1_s[isp]; mom_s_g2[isp] *= g2_s[isp]; mom_s[isp][0] = (mom_s_g1[isp] * ff_s[isp] + (1.0 - ff_s[isp]) * mom_s_g2[isp]); } } } if (k == 2) /* Rayleigh scattering, including depolarization */ mom_s[CAOTH_MOL][0] = bsca_s[CAOTH_MOL] * rayleigh_mom2; else mom_s[CAOTH_MOL][0] = 0.0; /* sum all moments, and normalize with bsca_tot */ output->pmom[lc][0][k] = 0.0; if (k == 1) { output->pmom01_clr[lc] = 0.0; output->pmom01_cldk[lc] = 0.0; output->pmom01_cldn[lc] = 0.0; } for (isp = 0; isp < n_caoth; isp++) { output->pmom[lc][0][k] += mom_s[isp][0]; if (input.rte.solver == SOLVER_TWOMAXRND || input.rte.solver == SOLVER_DYNAMIC_TWOSTREAM || input.rte.solver == SOLVER_DYNAMIC_TENSTREAM) if (isp != i_wc && isp != i_ic && k == 1) output->pmom01_clr[lc] += mom_s[isp][0]; if (input.rte.solver == SOLVER_TWOMAXRND3C) { // clear := everything except thin and thick water and ice clouds if (k == 1) if (isp != i_wcn && isp != i_wck && isp != i_icn && isp != i_ick) output->pmom01_clr[lc] += mom_s[isp][0]; // wck := everything except wcn and icn if (k == 1) if (isp != i_wcn && isp != i_icn) output->pmom01_cldk[lc] += mom_s[isp][0]; // wcn := everything except wck and ick if (k == 1) if (isp != i_wck && isp != i_ick) output->pmom01_cldn[lc] += mom_s[isp][0]; } } output->pmom[lc][0][k] /= bsca_tot; if (input.rte.solver == SOLVER_TWOMAXRND || input.rte.solver == SOLVER_DYNAMIC_TWOSTREAM || input.rte.solver == SOLVER_DYNAMIC_TENSTREAM) if (k == 1) { if (bsca_tot - bsca_s[i_wc] - bsca_s[i_ic] > 0) output->pmom01_clr[lc] /= (bsca_tot - bsca_s[i_wc] - bsca_s[i_ic]); else output->pmom01_clr[lc] = 0.0; } if (input.rte.solver == SOLVER_TWOMAXRND3C) if (k == 1) { double bsca_clr = bsca_tot - bsca_s[i_wck] - bsca_s[i_wcn]; double bsca_cldk = bsca_tot - bsca_s[i_wcn]; double bsca_cldn = bsca_tot - bsca_s[i_wck]; if (i_icn > 0 && i_ick > 0) { bsca_clr -= (bsca_s[i_ick] - bsca_s[i_icn]); bsca_cldn -= bsca_s[i_ick]; bsca_cldk -= bsca_s[i_icn]; } if (bsca_clr > 0) output->pmom01_clr[lc] /= bsca_clr; else output->pmom01_clr[lc] = 0.0; if (bsca_cldk > 0) output->pmom01_cldk[lc] /= bsca_cldk; else output->pmom01_cldk[lc] = 0.0; if (bsca_cldn > 0) output->pmom01_cldn[lc] /= bsca_cldn; else output->pmom01_cldn[lc] = 0.0; } if (input.rte.solver == SOLVER_POLRADTRAN) { for (ip = 0; ip < 6; ip++) p_mom[ip] = 0.0; /* already contains ip=0 element for rayleigh scattering (k==2) */ for (ip = 0; ip < nphamat; ip++) { for (isp = 0; isp < n_caoth; isp++) p_mom[ip] += mom_s[isp][ip]; p_mom[ip] *= (2 * k + 1) / bsca_tot; } /* add rayleigh depol for ip>0 */ if (output->atm.rayleigh && nphamat == 6) { /* if statement (atm.rayleigh) should not be necessary */ if (k == 1) { p_mom[2] += bsca_s[CAOTH_MOL] / bsca_tot * 1.5 * Delta; p_mom[5] += bsca_s[CAOTH_MOL] / bsca_tot * 1.5 * Delta * Deltap; } if (k == 2) { p_mom[1] += bsca_s[CAOTH_MOL] / bsca_tot * 0.5 * Delta; p_mom[4] += bsca_s[CAOTH_MOL] / bsca_tot * 0.5 * Delta; } } fprintf (fpol, "%d", k); for (ip = 0; ip < 6; ip++) fprintf (fpol, " %f", p_mom[ip]); fprintf (fpol, "\n"); } } /* end loop k */ if (input.rte.solver == SOLVER_POLRADTRAN) fclose (fpol); /* phase functions */ /* this should NOT be performed in case of MYSTIC solver! Basically, this should ONLY be done in case of SOLVER_FDISORT2/SOLVER_DISORT with new ICM, or with SOLVER_SSLIDAR */ if (((input.rte.solver == SOLVER_FDISORT2 || input.rte.solver == SOLVER_DISORT) && input.rte.disort_icm == DISORT_ICM_PHASE) || input.rte.solver == SOLVER_SSLIDAR) { /* the following has strong resemblance with the function sort_and_add_weighted_phase in phasetable.c */ n_in = 99; /* maximal possible number of n_ins to be defined */ tmp_ntheta_in = calloc (n_in, sizeof (int)); tmp_theta_in = calloc (n_in, sizeof (float*)); tmp_mu_in = calloc (n_in, sizeof (double*)); tmp_phase_in = calloc (n_in, sizeof (float*)); phase_calloced = calloc (n_in, sizeof (int)); interp_optprop_weight = calloc (n_in, sizeof (double)); output->ntheta[lc] = calloc (nphamat, sizeof (int)); output->theta[lc] = calloc (nphamat, sizeof (float*)); output->mu[lc] = calloc (nphamat, sizeof (double*)); output->phase[lc] = calloc (nphamat, sizeof (float*)); /* for sslidar, we want this not only for P_11, but also for P_12 and P_22 */ for (ip = 0; ip < nphamat; ip++) { /* find out number of phase functions already existent */ n_in = 0; n_tot = 0; if (bsca_s[CAOTH_AER] > 0.0) { ips = ip_act[CAOTH_AER][ip]; if (output->aer.optprop.ntheta[iv][lc][ips] > 0) { if ((input.raman && !input.raman_fast) || (input.raman_fast && ir == 1)) { /* for raman, FIXME, THIS MAY NEEDED TO BE IMPROVED AKY20111605 */ tmp_ntheta_in[n_in] = output->aer.optprop.ntheta[iv1r][lc][ips]; tmp_theta_in[n_in] = output->aer.optprop.theta[iv1r][lc][ips]; tmp_mu_in[n_in] = output->aer.optprop.mu[iv1r][lc][ips]; tmp_phase_in[n_in] = output->aer.optprop.phase[iv1r][lc][ips]; interp_optprop_weight[n_in] = bsca_s[CAOTH_AER] / bsca_tot * (wvl2 - wvl) / (wvl2 - wvl1); n_tot += tmp_ntheta_in[n_in]; n_in++; } else { tmp_ntheta_in[n_in] = output->aer.optprop.ntheta[iv][lc][ips]; tmp_theta_in[n_in] = output->aer.optprop.theta[iv][lc][ips]; tmp_mu_in[n_in] = output->aer.optprop.mu[iv][lc][ips]; tmp_phase_in[n_in] = output->aer.optprop.phase[iv][lc][ips]; interp_optprop_weight[n_in] = bsca_s[CAOTH_AER] / bsca_tot; n_tot += tmp_ntheta_in[n_in]; n_in++; } } else { if (output->aer.optprop.nmom[iv][lc] > 0) { /* only moments defined */ fprintf (stderr, "Error, you need to specify 'disort_intcor moments' in order to use these aerosol phase functions !\n"); return -1; } else { if (ip > 0) { fprintf (stderr, "Error, you are trying to combine aerosol Henyey-Greenstein with polarisation! This does not work!\n"); return -1; } /* HG */ status = create_phase_from_HG (g1_s[CAOTH_AER], &(tmp_ntheta_in[n_in]), &(tmp_theta_in[n_in]), &(tmp_mu_in[n_in]), &(tmp_phase_in[n_in]), input.rte.solver == SOLVER_SSLIDAR); if (status) return fct_err_out (status, "create_phase_from_HG", ERROR_POSITION); phase_calloced[n_in] = 1; interp_optprop_weight[n_in] = bsca_s[CAOTH_AER] / bsca_tot * ff_s[CAOTH_AER]; n_tot += tmp_ntheta_in[n_in]; n_in++; /* double HG */ if (ff_s[CAOTH_AER] < 1.) { status = create_phase_from_HG (g2_s[CAOTH_AER], &(tmp_ntheta_in[n_in]), &(tmp_theta_in[n_in]), &(tmp_mu_in[n_in]), &(tmp_phase_in[n_in]), input.rte.solver == SOLVER_SSLIDAR); if (status) return fct_err_out (status, "create_phase_from_HG", ERROR_POSITION); phase_calloced[n_in] = 1; interp_optprop_weight[n_in] = bsca_s[CAOTH_AER] / bsca_tot * (1. - ff_s[CAOTH_AER]); n_tot += tmp_ntheta_in[n_in]; n_in++; } } } } /* if bsca_s[CAOTH_AER] > 0 */ for (isp = CAOTH_FIR; isp < n_caoth; isp++) { ispo = isp - CAOTH_FIR; if (bsca_s[isp] > 0.0) { ips = ip_act[isp][ip]; if (output->caoth[ispo].optprop.ntheta[iv][lc][ips] > 0) { if ((input.raman && !input.raman_fast) || (input.raman_fast && ir == 1)) { /* for raman, FIXME, THIS MAY NEEDED TO BE IMPROVED AKY20111605 */ tmp_ntheta_in[n_in] = output->caoth[ispo].optprop.ntheta[iv1r][lc][ips]; tmp_theta_in[n_in] = output->caoth[ispo].optprop.theta[iv1r][lc][ips]; tmp_mu_in[n_in] = output->caoth[ispo].optprop.mu[iv1r][lc][ips]; tmp_phase_in[n_in] = output->caoth[ispo].optprop.phase[iv1r][lc][ips]; interp_optprop_weight[n_in] = bsca_s[isp] / bsca_tot * (wvl2 - wvl) / (wvl2 - wvl1); n_tot += tmp_ntheta_in[n_in]; n_in++; } else { tmp_ntheta_in[n_in] = output->caoth[ispo].optprop.ntheta[iv][lc][ips]; tmp_theta_in[n_in] = output->caoth[ispo].optprop.theta[iv][lc][ips]; tmp_mu_in[n_in] = output->caoth[ispo].optprop.mu[iv][lc][ips]; tmp_phase_in[n_in] = output->caoth[ispo].optprop.phase[iv][lc][ips]; interp_optprop_weight[n_in] = bsca_s[isp] / bsca_tot; n_tot += tmp_ntheta_in[n_in]; n_in++; } } else { if (output->caoth[ispo].optprop.nmom[iv][lc] > 0) { /* only moments defined */ fprintf (stderr, "Error, you need to specify 'disort_intcor moments' in order to use these %s phase functions !\n", output->caoth[ispo].fullname); return -1; } else { if (ip > 0) { fprintf (stderr, "Error, you are trying to combine %s Henyey-Greenstein with polarisation! This does not work!\n", output->caoth[ispo].fullname); return -1; } /* HG */ status = create_phase_from_HG (g1_s[isp], &(tmp_ntheta_in[n_in]), &(tmp_theta_in[n_in]), &(tmp_mu_in[n_in]), &(tmp_phase_in[n_in]), input.rte.solver == SOLVER_SSLIDAR); if (status) return fct_err_out (status, "create_phase_from_HG", ERROR_POSITION); phase_calloced[n_in] = 1; interp_optprop_weight[n_in] = bsca_s[isp] / bsca_tot * ff_s[isp]; n_tot += tmp_ntheta_in[n_in]; n_in++; /* double HG */ if (ff_s[isp] < 1.) { status = create_phase_from_HG (g2_s[isp], &(tmp_ntheta_in[n_in]), &(tmp_theta_in[n_in]), &(tmp_mu_in[n_in]), &(tmp_phase_in[n_in]), input.rte.solver == SOLVER_SSLIDAR); if (status) return fct_err_out (status, "create_phase_from_HG", ERROR_POSITION); phase_calloced[n_in] = 1; interp_optprop_weight[n_in] = bsca_s[isp] / bsca_tot * (1. - ff_s[isp]); n_tot += tmp_ntheta_in[n_in]; n_in++; } } } } /* end if (bsca_s[isp] > 0.0) */ } /* end for isp */ /* rayleigh */ if (bsca_s[CAOTH_MOL] > 0.0) { status = create_phase_from_Rayleigh (rayleigh_depol, &(tmp_ntheta_in[n_in]), &(tmp_theta_in[n_in]), &(tmp_mu_in[n_in]), &(tmp_phase_in[n_in]), input.rte.solver == SOLVER_SSLIDAR, ip); phase_calloced[n_in] = 1; interp_optprop_weight[n_in] = bsca_s[CAOTH_MOL] / bsca_tot; n_tot += tmp_ntheta_in[n_in]; n_in++; } if (input.rte.solver == SOLVER_SSLIDAR) { /* allocate theta dimension */ output->theta[lc][ip] = calloc (1, sizeof (float)); output->mu[lc][ip] = calloc (1, sizeof (double)); output->phase[lc][ip] = calloc (1, sizeof (float)); /* set trivial values for "phase function" */ output->ntheta[lc][ip] = 1; output->theta[lc][ip][0] = 0.0; output->mu[lc][ip][0] = -1.0; /* interpolate backscatter direction */ output->phase[lc][ip][0] = 0.0; for (i = 0; i < n_in; i++) output->phase[lc][ip][0] += interp_optprop_weight[i] * tmp_phase_in[i][0]; } else { tmp_theta_new = calloc (n_tot, sizeof (float)); tmp_mu_new = calloc (n_tot, sizeof (double)); output->ntheta[lc][ip] = sort_theta_and_mu (n_in, tmp_ntheta_in, tmp_theta_in, tmp_theta_new, tmp_mu_in, tmp_mu_new); if (output->ntheta[lc][ip] == -1) return fct_err_out (-1, "sort_theta_and_mu", ERROR_POSITION); /* allocate theta dimension */ output->theta[lc][ip] = calloc (output->ntheta[lc][ip], sizeof (float)); output->mu[lc][ip] = calloc (output->ntheta[lc][ip], sizeof (double)); output->phase[lc][ip] = calloc (output->ntheta[lc][ip], sizeof (float)); /* copy new theta grid into target */ for (i = 0; i < output->ntheta[lc][ip]; i++) { output->theta[lc][ip][i] = tmp_theta_new[i]; output->mu[lc][ip][i] = tmp_mu_new[i]; } /* interpolate phase */ status = interpolate_phase_weighted (n_in, tmp_ntheta_in, tmp_mu_in, tmp_phase_in, interp_optprop_weight, output->ntheta[lc][ip], output->mu[lc][ip], output->phase[lc][ip], 0); if (status) return fct_err_out (status, "interpolate_phase_weighted", ERROR_POSITION); free (tmp_theta_new); free (tmp_mu_new); for (i = 0; i < n_in; i++) { if (phase_calloced[i]) { free (tmp_theta_in[i]); free (tmp_mu_in[i]); free (tmp_phase_in[i]); } } } /* end else (SOLVER_SSLIDAR) */ } /* end for ip */ free (tmp_ntheta_in); free (tmp_theta_in); free (tmp_mu_in); free (tmp_phase_in); free (interp_optprop_weight); free (phase_calloced); if (input.rte.solver != SOLVER_SSLIDAR) normalize_phase (output->mu[lc], output->phase[lc], NULL, output->ntheta[lc], 1, 0); } /* end (disort && disort_icm phase) || sslidar */ /* total column for verbose output */ /* CE: print un-deltascaled optical thickness. For Aerosol the averaging of the delta scaling factor is not yet implemented !!! */ for (isp = 0; isp < n_caoth; isp++) { babs_s_int[isp] += babs_s[isp]; bsca_s_int[isp] += bsca_s[isp]; } for (isp = CAOTH_FIR; isp < n_caoth; isp++) { babs_s_unsc_int[isp] += babs_s_unsc[isp]; bsca_s_unsc_int[isp] += bsca_s_unsc[isp]; } /* Optical properties for the mc model */ if (input.rte.solver == SOLVER_MONTECARLO) { for (isp = 0; isp < n_caoth; isp++) output->mc.dt[isp][nlyr - 1 - lc] = bsca_s[isp] + babs_s[isp]; if (bsca_s[CAOTH_MOL] + babs_s[CAOTH_MOL] > 0.0) output->mc.om[CAOTH_MOL][nlyr - 1 - lc] = bsca_s[CAOTH_MOL] / (bsca_s[CAOTH_MOL] + babs_s[CAOTH_MOL]); else output->mc.om[CAOTH_MOL][nlyr - 1 - lc] = 0.0; /* in case absorption is turned off, molecular absorption is turned off somewhere else. Look for input.molabs */ if (input.absorption) for (isp = CAOTH_AER; isp < n_caoth; isp++) output->mc.om[isp][nlyr - 1 - lc] = ssa_s[isp]; else for (isp = CAOTH_AER; isp < n_caoth; isp++) output->mc.om[isp][nlyr - 1 - lc] = 1.0; for (isp = CAOTH_AER; isp < n_caoth; isp++) { output->mc.g1[isp][nlyr - 1 - lc] = g1_s[isp]; output->mc.g2[isp][nlyr - 1 - lc] = g2_s[isp]; output->mc.ff[isp][nlyr - 1 - lc] = ff_s[isp]; } for (isp = CAOTH_FIR; isp < n_caoth; isp++) { ispo = isp - CAOTH_FIR; /* effective droplet radius */ output->mc.ds[isp][nlyr - 1 - lc] = dscale_s[isp]; if (output->caoth[ispo].microphys.effr_layer != NULL) output->mc.re[isp][nlyr - 1 - lc] = output->caoth[ispo].microphys.effr_layer[lc]; } /* ??? consequently, one should apply input.atm.interpol_method_refind; for reasons */ /* ??? of lazyness we simply average the refractive index at the adjacent levels */ /* ??? to get the layer property for MYSTIC */ output->mc.refind[nlyr - 1 - lc] = 0.5 * (output->atm.microphys.refind[iv][lc] + output->atm.microphys.refind[iv][lc + 1]); if (input.rte.mc.spectral_is) { for (isp = 0; isp < n_caoth; isp++) { output->mc.alis.dt[iv][isp][nlyr - 1 - lc] = output->mc.dt[isp][nlyr - 1 - lc]; output->mc.alis.om[iv][isp][nlyr - 1 - lc] = output->mc.om[isp][nlyr - 1 - lc]; } } } /* Set optical depth and single scattering albedo */ output->dtauc[lc] = bext_tot; output->dtauc_md[lc] = bext_tot_md; output->ssalb[lc] = bsca_tot / bext_tot; if (input.rte.solver == SOLVER_TWOMAXRND || input.rte.solver == SOLVER_DYNAMIC_TWOSTREAM || input.rte.solver == SOLVER_DYNAMIC_TENSTREAM) { output->dtauc_clr[lc] = bext_tot - bsca_s[i_wc] - babs_s[i_wc] - bsca_s[i_ic] - babs_s[i_ic]; if (output->dtauc_clr[lc] > 0) output->ssalb_clr[lc] = (bsca_tot - bsca_s[i_wc] - bsca_s[i_ic]) / output->dtauc_clr[lc]; else output->ssalb_clr[lc] = 1.0; } if (input.rte.solver == SOLVER_TWOMAXRND3C) { double bsca_clr = 0; double bsca_cldn = 0; double bsca_cldk = 0; // clear := everything except thin and thick water and ice clouds output->dtauc_clr[lc] = bext_tot - bsca_s[i_wcn] - babs_s[i_wcn] - bsca_s[i_wck] - babs_s[i_wck]; if (i_icn > 0 && i_ick > 0) output->dtauc_clr[lc] -= (bsca_s[i_icn] + babs_s[i_icn] + bsca_s[i_ick] + babs_s[i_ick]); bsca_clr = bsca_tot - bsca_s[i_wcn] - bsca_s[i_wck]; if (i_icn > 0 && i_ick > 0) bsca_clr -= (bsca_s[i_icn] + bsca_s[i_ick]); if (output->dtauc_clr[lc] > 0) output->ssalb_clr[lc] = bsca_clr / output->dtauc_clr[lc]; else output->ssalb_clr[lc] = 1.0; // wck := everything except thin water and ice clouds output->dtauc_cldk[lc] = bext_tot - bsca_s[i_wcn] - babs_s[i_wcn]; if (i_icn > 0 && i_ick > 0) output->dtauc_cldk[lc] -= (bsca_s[i_icn] - babs_s[i_icn]); bsca_cldk = bsca_tot - bsca_s[i_wcn]; if (i_icn > 0 && i_ick > 0) bsca_cldk -= bsca_s[i_icn]; if (output->dtauc_cldk[lc] > 0) output->ssalb_cldk[lc] = bsca_cldk / output->dtauc_cldk[lc]; else output->ssalb_cldk[lc] = 1.0; // wcn := everything except thick water and ice clouds output->dtauc_cldn[lc] = bext_tot - bsca_s[i_wck] - babs_s[i_wck]; if (i_icn > 0 && i_ick > 0) output->dtauc_cldn[lc] -= (bsca_s[i_ick] + babs_s[i_ick]); bsca_cldn = bsca_tot - bsca_s[i_wck]; if (i_icn > 0 && i_ick > 0) bsca_cldn -= bsca_s[i_ick]; if (output->dtauc_cldn[lc] > 0) output->ssalb_cldn[lc] = bsca_cldn / output->dtauc_cldn[lc]; else output->ssalb_cldn[lc] = 1.0; } /* ulrike: add dtau_mol, dtau_aer and tau_wc and tau_ic (the latter two are obtained from tipa (dir), see solve_rte.c (tipa_calcdtau); With tausol[lc] the direct radiation is then calculated and used for the calculation of the diffuse radiation */ /* note: tausol is the SUM over all dtau's in layers above the level (lc) considered!!!*/ if (input.tipa == TIPA_DIR) { if (!input.quiet && lc == 0) fprintf (stderr, " ... (tipa dir) calculate total tau(sol) for every level\n"); for (isp = CAOTH_FIR; isp < n_caoth; isp++) tau_babs_sum += babs_s[isp]; output->tausol[lc] = tau_babs_sum; /* add dtau for caoth at the levels where caoth (due to tipa dir) contribute */ /* output->caoth.tipa.taudircld is the tau of caoth along the beam, i.e. a sum over all layers up to 'caoth top' */ for (isp = CAOTH_FIR; isp < n_caoth; isp++) { ispo = isp - CAOTH_FIR; if ((nlyr - lc) <= output->caoth[ispo].tipa.nztilt && output->caoth[ispo].tipa.nztilt > 0) output->tausol[lc] += output->caoth[ispo].tipa.taudircld[iv][nlyr - lc - 1]; } } if (1.0 - output->ssalb[lc] < FLT_MIN) output->ssalb[lc] = 1.0 - FLT_MIN; } /* endfor (lc=0; lc<nlyr; lc++) */ /* end writing extinction file for ARLEM */ if (input.write_ext_to_file) fclose (extfile); if ((input.rte.solver == SOLVER_FDISORT2 || input.rte.solver == SOLVER_DISORT) && input.rte.disort_icm == DISORT_ICM_PHASE) { /* we need to generalize the mu grid for the disort_icm phase, the explicit phase function is needed. It must be defined on a mu grid which has to be identical for all atmospheric layers. Before this point, the mu grid is not identical, now, we define a common mu grid. */ tmp_ntheta_in = calloc (nlyr, sizeof (int)); tmp_theta_in = calloc (nlyr, sizeof (float*)); tmp_mu_in = calloc (nlyr, sizeof (double*)); /* point to mu grid of each layer which contains phase function */ n_tot = 0; n_in = 0; for (lc = 0; lc < nlyr; lc++) { if (output->ntheta[lc][0] > 0) { n_tot += output->ntheta[lc][0]; tmp_ntheta_in[n_in] = output->ntheta[lc][0]; tmp_theta_in[n_in] = output->theta[lc][0]; tmp_mu_in[n_in] = output->mu[lc][0]; n_in++; } } tmp_theta_new = calloc (n_tot, sizeof (float)); tmp_mu_new = calloc (n_tot, sizeof (double)); /* find common mu grid */ ntheta_new = sort_theta_and_mu (n_in, tmp_ntheta_in, tmp_theta_in, tmp_theta_new, tmp_mu_in, tmp_mu_new); if (ntheta_new == -1) return fct_err_out (-1, "sort_theta_and_mu", ERROR_POSITION); if (verbose) fprintf (stderr, "The phase function for SOLVER_FDISORT2/cdisort is using %d grid points ...\n", output->ntheta[0][0]); for (lc = 0; lc < nlyr; lc++) { /* interpolate phase */ tmp_phase_new = calloc (ntheta_new, sizeof (float)); status = interpolate_phase_weighted (1, &(output->ntheta[lc][0]), &(output->mu[lc][0]), &(output->phase[lc][0]), &one, ntheta_new, tmp_mu_new, tmp_phase_new, 0); if (status) return fct_err_out (-1, "interpolate_phase_weighted", ERROR_POSITION); free (output->phase[lc][0]); output->phase[lc][0] = tmp_phase_new; } /* actually, only lc=0 is used from now on, but to avoid programming errors, we redefine all ntheta's, theta's, and mu's */ for (lc = 0; lc < nlyr; lc++) { output->ntheta[lc][0] = ntheta_new; free (output->theta[lc][0]); free (output->mu[lc][0]); /* allocate theta dimension */ output->theta[lc][0] = calloc (output->ntheta[0][0], sizeof (float)); output->mu[lc][0] = calloc (output->ntheta[0][0], sizeof (double)); /* copy new theta grid into target */ for (i = 0; i < output->ntheta[0][0]; i++) { output->theta[lc][0][i] = tmp_theta_new[i]; output->mu[lc][0][i] = tmp_mu_new[i]; } } free (tmp_ntheta_in); free (tmp_theta_in); free (tmp_mu_in); free (tmp_theta_new); free (tmp_mu_new); /* phase function needs to be renormalized */ for (lc = 0; lc < nlyr; lc++) normalize_phase (output->mu[0], output->phase[lc], NULL, output->ntheta[0], 1, 0); } /* end if disort && disort_icm phase */ if (input.rte.solver == SOLVER_MONTECARLO) { for (isp = 0; isp < n_caoth; isp++) { output->mc.dt[isp][nlyr] = 0.0; output->mc.om[isp][nlyr] = 0.0; } /* loop includes nlyr because altitude and temperature */ /* are defined per level */ for (lc = 0; lc <= nlyr; lc++) { output->mc.z[nlyr - lc] = output->atm.zd[lc]; if (!input.atmosphere3d) output->mc.temper[0][0][nlyr - lc] = output->atm.microphys.temper[0][0][lc]; } /* loop stops at nlyr-1 because aerosol properties */ /* are defined per layer*/ output->mc.nphamataer = output->aer.optprop.nphamat; for (lc = 0; lc < nlyr; lc++) { output->mc.nmomaer[nlyr - lc - 1] = output->aer.optprop.nmom[iv][lc]; output->mc.momaer[nlyr - lc - 1] = calloc (output->aer.optprop.nphamat, sizeof (float*)); for (ip = 0; ip < output->aer.optprop.nphamat; ip++) { output->mc.momaer[nlyr - lc - 1][ip] = calloc (output->mc.nmomaer[nlyr - lc - 1], sizeof (float)); /* ??? should free this memory later ??? */ for (k = 0; k < output->mc.nmomaer[nlyr - lc - 1]; k++) { output->mc.momaer[nlyr - lc - 1][ip][k] = output->aer.optprop.moment[iv][lc][ip][k] / output->aer.optprop.moment[iv][lc][0][0]; } } } /* we need to copy phase functions too */ if (output->aer.optprop.ntheta[iv] != NULL) { for (lc = 0; lc < nlyr; lc++) { output->mc.nthetaaer[nlyr - lc - 1] = output->aer.optprop.ntheta[iv][lc]; output->mc.thetaaer[nlyr - lc - 1] = output->aer.optprop.theta[iv][lc]; //TODO: Wird nicht benutzt? output->mc.muaer[nlyr - lc - 1] = output->aer.optprop.mu[iv][lc]; output->mc.phaseaer[nlyr - lc - 1] = output->aer.optprop.phase[iv][lc]; } } } #if HAVE_SSSI if (input.rte.solver == SOLVER_SSSI) { /* Total caoth optical thickness and top caoth level */ /* for the SOLVER_SSSI approximation */ output->sssi.tautot = 0.0; for (isp = 0; isp < n_caoth; isp++) output->sssi.tautot += bsca_s_int[isp]; for (lc = 0; lc < nlyr; lc++) { for (isp = CAOTH_FIR; isp < n_caoth; isp++) { ispo = isp - CAOTH_FIR; if (output->caoth[ispo].optprop.dtau[iv][lc] > 0.0) break; } if (isp != n_caoth) break; } output->sssi.lctop = lc; /* the uppermost layer determines cloud phase */ output->sssi.type = (output->caoth[i_wc].optprop.dtau[iv][lc] > output->caoth[i_ic].optprop.dtau[iv][lc] ? ISCCP_WATER : ISCCP_ICE); } #endif /* output vertical (total) sum */ if (verbose) { if (output->cf.nlev == 0 || input.rte.solver == SOLVER_TWOMAXRND || input.rte.solver == SOLVER_TWOMAXRND3C || input.rte.solver == SOLVER_DYNAMIC_TWOSTREAM || input.rte.solver == SOLVER_DYNAMIC_TENSTREAM) { fprintf (stderr, " ----------------------------------------------------------------------------------------------------------------" "-------------------------------------------------\n"); fprintf (stderr, "%5s | %7.3f | %12.6e | %9.6f %9.6f %5.3f | %11.6f %11.6f %5.3f | %11.6f %11.6f %5.3f %5.3f %5.3f %6.3f %5.3f | " "%11.6f\n", "sum", 0.0 / 0.0, bsca_s_int[CAOTH_MOL], bsca_s_int[CAOTH_AER], babs_s_int[CAOTH_AER], 0.0 / 0.0, bsca_s_unsc_int[i_wc], babs_s_unsc_int[i_wc], 0.0 / 0.0, bsca_s_unsc_int[i_ic], babs_s_unsc_int[i_ic], 0.0 / 0.0, 0.0 / 0.0, 0.0 / 0.0, 0.0 / 0.0, 0.0 / 0.0, babs_s_int[CAOTH_MOL]); fprintf (stderr, " ----------------------------------------------------------------------------------------------------------------" "-------------------------------------------------\n"); } else { fprintf (stderr, " ----------------------------------------------------------------------------------------------------------------" "----------------------\n"); fprintf (stderr, "%5s | %7.3f | %12.6e | %9.6f %9.6f %5.3f | %11.6f %11.6f %5.3f %5.3f %5.3f %6.3f %5.3f | %11.6f\n", "sum", 0.0 / 0.0, bsca_s_int[CAOTH_MOL], bsca_s_int[CAOTH_AER], babs_s_int[CAOTH_AER], 0.0 / 0.0, bsca_s_unsc_int[i_wc], babs_s_unsc_int[i_wc], 0.0 / 0.0, 0.0 / 0.0, 0.0 / 0.0, 0.0 / 0.0, 0.0 / 0.0, babs_s_int[CAOTH_MOL]); fprintf (stderr, " ----------------------------------------------------------------------------------------------------------------" "---------------------\n"); } } } /* if (!skip_optical_properties) { */ else { if (verbose) fprintf (stderr, " *** skip calculation of optical properties! iv = %4d, iq = %4d \n", iv, iq); } /* print only integrated optical properties to stderr */ /* fprintf (stderr, "%.0f %7.3f %12.6e %9.6f %9.6f %5.3f %11.6f %11.6f %5.3f %11.6f %11.6f %5.3f %5.3f %5.3f %6.3f %5.3f %11.6f\n", */ /* 0.0, 0.0/0.0, */ /* bsca_s_int[CAOTH_MOL], */ /* bsca_s_int[CAOTH_AER], babs_s_int[CAOTH_AER], 0.0/0.0, */ /* bsca_s_unsc_int[i_wc], babs_s_unsc_int[i_wc], 0.0/0.0, */ /* bsca_s_unsc_int[i_ic], babs_s_unsc_int[i_ic], 0.0/0.0, */ /* 0.0/0.0, 0.0/0.0, 0.0/0.0, 0.0/0.0, */ /* babs_s_int[CAOTH_MOL]); */ for (isp = 0; isp < n_caoth; isp++) free (mom_s[isp]); free (babs_s); free (bsca_s); free (ssa_s); free (babs_s_int); free (bsca_s_int); free (mom_s); free (mom_s_g1); free (mom_s_g2); free (dscale_s); free (f_s); free (ff_s); free (g1_s); free (g2_s); free (dtau_s); free (mom0_s); free (babs_s_unsc); free (bsca_s_unsc); free (ssa_s_unsc); free (babs_s_unsc_int); free (bsca_s_unsc_int); free (gg_s_unsc); free (ip_act); #if HAVE_NETCDF4 // write optical properties to file for test suite // if (input.test_optical_properties) { status = 0; int ncid, retval; /********** Create netcdf file **********/ if ((retval = nc_create ("test.optical_properties.nc", NC_CLOBBER, &ncid))) ERR (retval); if ((retval = nc_enddef (ncid))) ERR (retval); if ((retval = write_netcdf_3Dfloat (ncid, output->pmom, nlyr, nphamat, output->atm.nmom + 1, "output_pmom", "nlyr", "nphamat", "nmom+1"))) ERR (retval); if (((input.rte.solver == SOLVER_FDISORT2 || input.rte.solver == SOLVER_DISORT) && input.rte.disort_icm == DISORT_ICM_PHASE) || input.rte.solver == SOLVER_SSLIDAR) { if ((retval = write_netcdf_2Dint (ncid, output->ntheta, nlyr, nphamat, "output_ntheta", "nlyr", "nphamat"))) ERR (retval); if ((retval = write_netcdf_3Dirrfloat (ncid, output->phase, nlyr, nphamat, output->ntheta, "output_phase", "nlyr", "nphamat", "output_ntheta"))) ERR (retval); if ((retval = write_netcdf_3Dirrfloat (ncid, output->theta, nlyr, nphamat, output->ntheta, "output_theta", "nlyr", "nphamat", "output_ntheta"))) ERR (retval); if ((retval = write_netcdf_3Dirrdouble (ncid, output->mu, nlyr, nphamat, output->ntheta, "output_mu", "nlyr", "nphamat", "output_ntheta"))) ERR (retval); if ((retval = write_netcdf_1Dfloat (ncid, output->dtauc, nlyr, "output_dtauc", "nlyr"))) ERR (retval); if ((retval = write_netcdf_1Dfloat (ncid, output->dtauc_md, nlyr, "output_dtauc_md", "nlyr"))) ERR (retval); if ((retval = write_netcdf_1Dfloat (ncid, output->ssalb, nlyr, "output_ssalb", "nlyr"))) ERR (retval); } if (input.tipa == TIPA_DIR) { if ((retval = write_netcdf_1Dfloat (ncid, output->tausol, nlyr, "output_tausol", "nlyr"))) ERR (retval); } #if HAVE_SSSI if (input.rte.solver == SOLVER_SSSI) { if ((retval = write_netcdf_float (ncid, output->sssi.tautot, "output_sssi.tautot"))) ERR (retval); if ((retval = write_netcdf_int (ncid, output->sssi.lctop, "output_sssi.lctop"))) ERR (retval); if ((retval = write_netcdf_int (ncid, output->sssi.type, "output_sssi.type"))) ERR (retval); } #endif if (input.rte.solver == SOLVER_MONTECARLO) { if (output->mc.alis.dt != NULL) { if ((retval = write_netcdf_2Ddouble (ncid, output->mc.alis.dt[iv], n_caoth, nlev, "output_mc.alis.dt[iv]", "ncaoth", "nlev"))) ERR (retval); } if (output->mc.alis.om != NULL) { if ((retval = write_netcdf_2Ddouble (ncid, output->mc.alis.om[iv], n_caoth, nlev, "output_mc.alis.om[iv]", "ncaoth", "nlev"))) ERR (retval); } if ((retval = write_netcdf_2Dfloat (ncid, output->mc.dt, n_caoth, nlev, "output_dt", "ncaoth", "nlev"))) ERR (retval); if ((retval = write_netcdf_2Dfloat (ncid, output->mc.om, n_caoth, nlev, "output_om", "ncaoth", "nlev"))) ERR (retval); if ((retval = write_netcdf_2Dfloat (ncid, output->mc.g1, n_caoth, nlev, "output_g1", "ncaoth", "nlev"))) ERR (retval); if ((retval = write_netcdf_2Dfloat (ncid, output->mc.g2, n_caoth, nlev, "output_g2", "ncaoth", "nlev"))) ERR (retval); if ((retval = write_netcdf_2Dfloat (ncid, output->mc.ff, n_caoth, nlev, "output_ff", "ncaoth", "nlev"))) ERR (retval); if ((retval = write_netcdf_2Dfloat (ncid, output->mc.ds, n_caoth, nlev, "output_ds", "ncaoth", "nlev"))) ERR (retval); if ((retval = write_netcdf_2Dfloat (ncid, output->mc.re, n_caoth, nlev, "output_re", "ncaoth", "nlev"))) ERR (retval); if ((retval = write_netcdf_1Dfloat (ncid, output->mc.refind, nlev, "output_mc.refind", "nlev"))) ERR (retval); if ((retval = write_netcdf_1Dfloat (ncid, output->mc.z, nlev, "output_mc.z", "nlev"))) ERR (retval); if ((retval = write_netcdf_1Dfloat (ncid, output->mc.temper[0][0], nlev, "output_mc.temper", "nlev"))) ERR (retval); if ((retval = write_netcdf_int (ncid, output->mc.nphamataer, "output_mc.nphamataer"))) ERR (retval); if ((retval = write_netcdf_1Dint (ncid, output->mc.nmomaer, nlev, "output_mc.nmomaer", "nlev"))) ERR (retval); if ((retval = write_netcdf_3Dirr_row_float (ncid, output->mc.momaer, nlev, output->aer.optprop.nphamat, output->mc.nmomaer, "output_mc.momaer", "nlev", "output_aer.optprop.nphamat", "output_mc.nmomaer"))) ERR (retval); if (output->mc.nthetaaer != NULL) { if ((retval = write_netcdf_2Dint (ncid, output->mc.nthetaaer, nlyr, output->aer.optprop.nphamat, "output_mc.nthetaaer", "nlyr", "output_aer.optprop.nphamat"))) ERR (retval); if ((retval = write_netcdf_3Dirrfloat (ncid, output->mc.thetaaer, nlyr, output->aer.optprop.nphamat, output->mc.nthetaaer, "output_mc.thetaaer", "nlyr", "output_aer.optprop.nphamat", "output_mc.nthetaaer"))) ERR (retval); if ((retval = write_netcdf_3Dirrdouble (ncid, output->mc.muaer, nlyr, output->aer.optprop.nphamat, output->mc.nthetaaer, "output_mc.muaer", "nlyr", "output_aer.optprop.nphamat", "output_mc.nthetaaer"))) ERR (retval); if ((retval = write_netcdf_3Dirrfloat (ncid, output->mc.phaseaer, nlyr, output->aer.optprop.nphamat, output->mc.nthetaaer, "output_mc.phaseaer", "nlyr", "output_aer.optprop.nphamat", "output_mc.nthetaaer"))) ERR (retval); } } CHKERR (status); // Dump 3D optical properties in caoths for (isp = 0; isp < input.n_caoth; isp++) { status = dump_caoth3d_var (ncid, isp, input.caoth[isp].name, output->caoth3d[isp], "_lwc", output->caoth3d[isp].lwc); CHKERR (status); status = dump_caoth3d_var (ncid, isp, input.caoth[isp].name, output->caoth3d[isp], "_reff", output->caoth3d[isp].reff); CHKERR (status); status = dump_caoth3d_var (ncid, isp, input.caoth[isp].name, output->caoth3d[isp], "_ext", output->caoth3d[isp].ext); CHKERR (status); status = dump_caoth3d_var (ncid, isp, input.caoth[isp].name, output->caoth3d[isp], "_ssa", output->caoth3d[isp].ssa); CHKERR (status); status = dump_caoth3d_var (ncid, isp, input.caoth[isp].name, output->caoth3d[isp], "_g1", output->caoth3d[isp].g1); CHKERR (status); status = dump_caoth3d_var (ncid, isp, input.caoth[isp].name, output->caoth3d[isp], "_g2", output->caoth3d[isp].g2); CHKERR (status); status = dump_caoth3d_var (ncid, isp, input.caoth[isp].name, output->caoth3d[isp], "_ff", output->caoth3d[isp].ff); CHKERR (status); status = dump_caoth3d_var (ncid, isp, input.caoth[isp].name, output->caoth3d[isp], "_f", output->caoth3d[isp].f); CHKERR (status); } // end Dump 3D optical properties in caoths /********** Close the file. This frees up any internal netCDF resources * associated with the file, and flushes any buffers. **********/ if ((retval = nc_close (ncid))) ERR (retval); } // finish writing optical properties to file for test suite // return 0; #else fprintf (stderr, " ***********************************************************************\n"); fprintf (stderr, " * You have built uvspec without libnetcdf and hence cannot *\n"); fprintf (stderr, " * use any netCDF option. Please get netcdf and rebuild. *\n"); fprintf (stderr, " ***********************************************************************\n"); return -1; #endif } /***************************************/ /* Post - processing of the output */ /* sum or integration over wavelength, */ /* calculation of heating rates or */ /* conversion to RGB */ /***************************************/ int processing1D (input_struct input, output_struct* output) { int status = 0, iv = 0; char function_name[] = "processing1D"; char file_name[] = "ancillary.c"; switch (input.processing) { case PROCESS_NONE: if (input.calibration == OUTCAL_BRIGHTNESS) { if (!input.quiet) fprintf (stderr, " ... converting radiances to brightness temperatures\n"); /* convert irradiances / radiances to brightness temperatures */ for (iv = 0; iv < output->wl.nlambda_h; iv++) status = output2bt (input, output, iv, 0); if (status != 0) { fprintf (stderr, "Error %d returned by output2bt()\n", status); return status; } } break; case PROCESS_SUM: status = sum1D (input, output); /* multiply with extraterrestrial and sum up (function in ancillary.c) */ if (status != 0) { fprintf (stderr, "Error %d summing 1D output over wavelength in %s (%s)\n", status, function_name, file_name); return status; } break; case PROCESS_INT: status = integrate1D (input, output); /* multiply with extraterrestrial and integrate (function in ancillary.c) */ if (status != 0) { fprintf (stderr, "Error %d integrating 1D output over wavelength in %s (%s)\n", status, function_name, file_name); return status; } break; case PROCESS_RGB: case PROCESS_RGBNORM: status = spec2rgb (input, output); /* convert spectrum to red,green,blue space (function in ancillary.c) */ if (status != 0) { fprintf (stderr, "Error %d converting spectral output to RGB in %s (%s)\n", status, function_name, file_name); return status; } break; case PROCESS_RAMAN: status = raman_spec2spec (input, output); /* convert Raman spectrum with extra wavelengths to user spectrum */ if (status != 0) { fprintf (stderr, "Error %d converting Ra manspectral output to user spectrum in %s (%s)\n", status, function_name, file_name); return status; } break; default: fprintf (stderr, "Error, unknown processing scheme %d in %s (%s)\n", input.processing, function_name, file_name); return -1; } return 0; } /***********************************************************************************/ /* Sum the 1D results over wavelength and write data to the first array element */ /***********************************************************************************/ int sum1D (input_struct input, output_struct* output) { int lev = 0, iv = 0, iu = 0, j = 0, is = 0; int status = 0; /* calculate incident flux (output->incident) and sum extraterrestrial irradiance (wl.fbeam[0]) */ for (iv = 0; iv < output->wl.nlambda_h; iv++) { output->incident += output->wl.filter[iv] * output->wl.fbeam[iv] * cos (output->sza_h[iv] * PI / 180.0); if (iv != 0) output->wl.fbeam[0] += output->wl.filter[iv] * output->wl.fbeam[iv]; else /* iv == 0 */ output->wl.fbeam[0] = output->wl.filter[0] * output->wl.fbeam[0]; } /* callocate integrated values */ status = calloc_int_values (input, output); if (status != 0) { fprintf (stderr, "error allocating integrated values, status %d\n", status); return status; } /* fluxes and radiances */ for (lev = 0; lev < output->atm.nzout; lev++) { for (iv = 0; iv < output->wl.nlambda_h; iv++) { output->rfldir_int[lev] += (double)output->rfldir[lev][iv]; output->rfldn_int[lev] += (double)output->rfldn[lev][iv]; output->flup_int[lev] += (double)output->flup[lev][iv]; output->uavg_int[lev] += (double)output->uavg[lev][iv]; output->uavgso_int[lev] += (double)output->uavgso[lev][iv]; output->uavgdn_int[lev] += (double)output->uavgdn[lev][iv]; output->uavgup_int[lev] += (double)output->uavgup[lev][iv]; if (input.heating != HEAT_NONE) { output->heat_int[lev] += (double)output->heat[lev][iv]; output->emis_int[lev] += (double)output->emis[lev][iv]; output->w_zout_int[lev] += (double)output->w_zout[lev][iv]; } for (iu = 0; iu < input.rte.numu; iu++) { output->u0u_int[lev][iu] += output->u0u[lev][iu][iv]; for (j = 0; j < input.rte.nphi; j++) output->uu_int[lev][j][iu] += output->uu[lev][j][iu][iv]; } } } /* polarized fluxes and radiances */ if (input.rte.solver == SOLVER_POLRADTRAN) for (iv = 0; iv < output->wl.nlambda_h; iv++) { for (lev = 0; lev < output->atm.nzout; lev++) for (is = 0; is < input.rte.polradtran[POLRADTRAN_NSTOKES]; is++) { output->down_flux_int[lev][is] += output->down_flux[lev][is][iv]; output->up_flux_int[lev][is] += output->up_flux[lev][is][iv]; for (iu = 0; iu < input.rte.numu; iu++) for (j = 0; j < input.rte.nphi; j++) { output->down_rad_int[lev][j][iu][is] += output->down_rad[lev][j][iu][is][iv]; output->up_rad_int[lev][j][iu][is] += output->up_rad[lev][j][iu][is][iv]; } } } /* albedo, transmittance of total atmosphere */ for (iu = 0; iu < input.rte.numu; iu++) { for (iv = 0; iv < output->wl.nlambda_h; iv++) { output->albmed_int[iu] += (double)output->albmed[iu][iv]; output->trnmed_int[iu] += (double)output->trnmed[iu][iv]; } } /* store integrated values in the first entry of the wavelength index, if any result fields are defined */ if (output->wl.nlambda_h > 0) status += double2float_integrated_values (input, output); if (status != 0) { fprintf (stderr, "Error, conversion from double to float not possible"); fprintf (stderr, "sum1D(), ancillary.c, status %d\n", status); return status; } status += scaling_integrated_values (input, output); if (status != 0) { fprintf (stderr, "Error, scaling integrated values.\n"); fprintf (stderr, "scaling_integrated_values(), in ancillary.c, status %d\n", status); return status; } return status; } /**************************************************************************************/ /* Integrate the 1D results over wavelength and write data to the first array element */ /**************************************************************************************/ int integrate1D (input_struct input, output_struct* output) { int lev = 0, iv = 0, iu = 0, j = 0, is = 0; int status = 0, used_unit = 0; double *xint = NULL, *yint = NULL; double* cos_SZA = NULL; char function_name[] = "integrate1D"; char file_name[] = "ancillary.c"; /* callocate integrated values */ status = calloc_int_values (input, output); if (status != 0) { fprintf (stderr, "error allocating integrated values, status %d\n", status); return status; } /* calculate incident flux */ xint = (double*)calloc (output->wl.nlambda_h, sizeof (double)); if (xint == NULL) error_calloc ("xint", "integrate1D", &(status)); yint = (double*)calloc (output->wl.nlambda_h, sizeof (double)); if (yint == NULL) error_calloc ("yint", "integrate1D", &(status)); if ((cos_SZA = calloc (output->wl.nlambda_h, sizeof (double))) == NULL) { fprintf (stderr, "Error, allocating memory for cos_SZA in %s (%s)\n", function_name, file_name); return -1; } switch (input.source) { case SRC_SOLAR: case SRC_LIDAR: /* BCA */ case SRC_BLITZ: /* BCA */ for (iv = 0; iv < output->wl.nlambda_h; iv++) cos_SZA[iv] = cos (output->sza_h[iv] * PI / 180.0); break; case SRC_THERMAL: for (iv = 0; iv < output->wl.nlambda_h; iv++) cos_SZA[iv] = 1.0; break; default: fprintf (stderr, "Error, unknown source %d in %s (%s)\n", input.source, function_name, file_name); return -1; } used_unit = input.output_unit; if (used_unit == UNIT_NOT_DEFINED) /* if no output unit is specified, assume the same units as the input spectrum */ used_unit = output->spectrum_unit; switch (used_unit) { case UNIT_PER_NM: if (input.verbose) fprintf (stderr, " *** integration in wavelength space \n"); for (iv = 0; iv < output->wl.nlambda_h; iv++) { xint[iv] = (double)output->wl.lambda_h[iv]; yint[iv] = output->wl.filter[iv] * output->wl.fbeam[iv] * cos_SZA[iv]; } break; case UNIT_PER_CM_1: /* integration over wavenumber k, minus in order to get ascending wavenumbers */ if (input.verbose) fprintf (stderr, " *** integration in wavenumber space \n"); for (iv = 0; iv < output->wl.nlambda_h; iv++) { xint[iv] = -(double)1.0e+7 / output->wl.lambda_h[iv]; /* k = 10**7 / lambda */ /* 10**7 == nm -> cm */ yint[iv] = output->wl.filter[iv] * output->wl.fbeam[iv] * cos_SZA[iv]; } break; case UNIT_PER_BAND: fprintf (stderr, "Error, combination 'output_process integrate' and 'output_process per_band',\n"); fprintf (stderr, " or combination 'output_process integrate' and an input spectrum defined in \n"); fprintf (stderr, " (wavelength or wavenumber) bands does not make sense \n"); fprintf (stderr, " please use 'output_process sum', when dealing with band parametrisations. \n\n"); return -1; break; case UNIT_NOT_DEFINED: fprintf (stderr, "Error, in order to use 'output_process integrate' it is nessesary to specify the \n"); fprintf (stderr, " unit of the extraterrestrial spectrum with 'source solar filename unit' or\n"); fprintf (stderr, " unit of the output with 'output_process per_nm' or 'output_process per_cm-1' \n\n"); return -1; break; default: fprintf (stderr, "Error: Program bug, unsupported unit of extraterrestial flux %d or output unit %d\n", output->spectrum_unit, input.output_unit); return -1; } output->incident = integrate (xint, yint, output->wl.nlambda_h); /* sum extraterrestrial irradiance */ for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->wl.filter[iv] * output->wl.fbeam[iv]; /* unit factor should alway be 1 here == output always in W/(m2 nm) */ /* therefore it is not included here in the code */ output->wl.fbeam[0] = (float)integrate (xint, yint, output->wl.nlambda_h); /* fluxes and radiances */ for (iu = 0; iu < input.rte.numu; iu++) { for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->albmed[iu][iv]; output->albmed_int[iu] = integrate (xint, yint, output->wl.nlambda_h); } for (iu = 0; iu < input.rte.numu; iu++) { for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->trnmed[iu][iv]; output->trnmed_int[iu] = integrate (xint, yint, output->wl.nlambda_h); } for (lev = 0; lev < output->atm.nzout; lev++) { for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->rfldir[lev][iv]; output->rfldir_int[lev] = integrate (xint, yint, output->wl.nlambda_h); for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->rfldn[lev][iv]; output->rfldn_int[lev] = integrate (xint, yint, output->wl.nlambda_h); for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->flup[lev][iv]; output->flup_int[lev] = integrate (xint, yint, output->wl.nlambda_h); for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->uavg[lev][iv]; output->uavg_int[lev] = integrate (xint, yint, output->wl.nlambda_h); for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->uavgso[lev][iv]; output->uavgso_int[lev] = integrate (xint, yint, output->wl.nlambda_h); for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->uavgdn[lev][iv]; output->uavgdn_int[lev] = integrate (xint, yint, output->wl.nlambda_h); for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->uavgup[lev][iv]; output->uavgup_int[lev] = integrate (xint, yint, output->wl.nlambda_h); if (input.heating != HEAT_NONE) { for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->heat[lev][iv]; output->heat_int[lev] = integrate (xint, yint, output->wl.nlambda_h); for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->emis[lev][iv]; output->emis_int[lev] = integrate (xint, yint, output->wl.nlambda_h); for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->w_zout[lev][iv]; output->w_zout_int[lev] = integrate (xint, yint, output->wl.nlambda_h); } for (iu = 0; iu < input.rte.numu; iu++) { for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = (double)output->u0u[lev][iu][iv]; output->u0u_int[lev][iu] = integrate (xint, yint, output->wl.nlambda_h); for (j = 0; j < input.rte.nphi; j++) { for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = (double)output->uu[lev][j][iu][iv]; output->uu_int[lev][j][iu] = integrate (xint, yint, output->wl.nlambda_h); } } } /* polarized fluxes and radiances */ if (input.rte.solver == SOLVER_POLRADTRAN) { for (lev = 0; lev < output->atm.nzout; lev++) { for (is = 0; is < input.rte.polradtran[POLRADTRAN_NSTOKES]; is++) { for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->down_flux[lev][is][iv]; output->down_flux_int[lev][is] = (float)integrate (xint, yint, output->wl.nlambda_h); for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->up_flux[lev][is][iv]; output->up_flux_int[lev][is] = (float)integrate (xint, yint, output->wl.nlambda_h); for (iu = 0; iu < input.rte.numu; iu++) { for (j = 0; j < input.rte.nphi; j++) { for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->down_rad[lev][j][iu][is][iv]; output->down_rad_int[lev][j][iu][is] = (float)integrate (xint, yint, output->wl.nlambda_h); for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->up_rad[lev][j][iu][is][iv]; output->up_rad_int[lev][j][iu][is] = (float)integrate (xint, yint, output->wl.nlambda_h); } } } } } free (xint); free (yint); /* store integrated values in the first entry of the wavelength index */ if (output->wl.nlambda_h > 0) status += double2float_integrated_values (input, output); if (status != 0) { fprintf (stderr, "Error, conversion from double to float not possible"); fprintf (stderr, "integrate1D(), ancillary.c, status %d\n", status); return status; } status += scaling_integrated_values (input, output); if (status != 0) { fprintf (stderr, "Error, scaling integrated values.\n"); fprintf (stderr, "integrate1D(), in ancillary.c, status %d\n", status); return status; } return status; } static int write_triangular_surface_results (const input_struct input, const output_struct* output) { if (!output->triangle_results_o) return 0; if (!output->triangle_results_o[0]->N_triangles) return 0; const t_triangle_radiation_field* out = output->triangle_results_o[0]; const int ldebug = 0; if (ldebug) { for (size_t i = 0; i < out->N_triangles; ++i) { fprintf (stderr, "Surface fluxes on triangle %lu => %f %f %f\n", i, out->edir[i], out->edn[i], out->eup[i]); } } { // write netcdf int ierr; int ncid; char filename[FILENAME_MAX] = ""; strcpy (filename, input.rte.mc.filename[FN_MC_BASENAME]); strcat (filename, ".flx.triangle.nc"); // Create the file. overwrite this file, if it already exists if ((ierr = nc_create (filename, NC_CLOBBER, &ncid))) CHKERR (ierr); if ((ierr = nc_enddef (ncid))) CHKERR (ierr); if ((ierr = write_netcdf_1Dsize_t (ncid, out->ndir, out->N_triangles, "count_edir", "Ntriangles"))) CHKERR (ierr); if ((ierr = write_netcdf_1Dsize_t (ncid, out->ndn, out->N_triangles, "count_edn", "Ntriangles"))) CHKERR (ierr); if ((ierr = write_netcdf_1Dsize_t (ncid, out->nup, out->N_triangles, "count_eup", "Ntriangles"))) CHKERR (ierr); if ((ierr = write_netcdf_1Ddouble (ncid, out->edir, out->N_triangles, "edir", "Ntriangles"))) CHKERR (ierr); if ((ierr = write_netcdf_1Ddouble (ncid, out->edn, out->N_triangles, "edn", "Ntriangles"))) CHKERR (ierr); if ((ierr = write_netcdf_1Ddouble (ncid, out->eup, out->N_triangles, "eup", "Ntriangles"))) CHKERR (ierr); if ((ierr = nc_close (ncid))) CHKERR (ierr); } return 0; } /***************************************/ /* Post - processing of the output */ /* sum or integration over wavelength, */ /* calculation of heating rates or */ /* conversion to RGB */ /***************************************/ int processing3D (input_struct input, output_struct* output) { int status = 0; int is = 0, js = 0, ks = 0, iv = 0, lc = 0; float scale_factor_abs3d = 1.0; float dz = NOT_DEFINED_FLOAT; float c_p = NOT_DEFINED_FLOAT; float rho_air = NOT_DEFINED_FLOAT; char function_name[] = "processing3D"; char file_name[] = "ancillary.c"; if (input.rte.mc.locest) return 0; output->wl.nlambda_h_print3D = output->wl.nlambda_h; switch (input.processing) { case PROCESS_NONE: if (input.calibration == OUTCAL_BRIGHTNESS) { if (!input.quiet) fprintf (stderr, " ... converting radiances to brightness temperatures\n"); /* convert irradiances / radiances to brightness temperatures */ for (iv = 0; iv < output->wl.nlambda_h; iv++) status = output2bt (input, output, iv, 1); if (status != 0) { fprintf (stderr, "Error %d returned by output2bt()\n", status); return status; } } break; case PROCESS_SUM: status = sum3D (input, output); if (status != 0) { fprintf (stderr, "Error %d summing 3D output over wavelength in %s (%s)\n", status, function_name, file_name); return status; } output->wl.nlambda_h_print3D = 1; break; case PROCESS_INT: status = integrate3D (input, output); if (status != 0) { fprintf (stderr, "Error %d integrating 3D output over wavelength in %s (%s)\n", status, function_name, file_name); return status; } output->wl.nlambda_h_print3D = 1; break; case PROCESS_RGB: case PROCESS_RGBNORM: status = spec2rgb3D (input, output); /* convert spectrum to red,green,blue space (function in ancillary.c) */ if (status != 0) { fprintf (stderr, "Error %d converting spectral output to RGB in %s (%s)\n", status, function_name, file_name); return status; } output->wl.nlambda_h_print3D = 3; break; default: fprintf (stderr, "Error, unknown processing scheme %d in %s (%s)\n", input.processing, function_name, file_name); return -1; } /* convert unit of absorbed irradiance */ switch (input.rte.mc.abs_unit) { case MCABS_UNIT_W_PER_M2_AND_DZ: /* default -> no change */ break; case MCABS_UNIT_W_PER_M3: case MCABS_UNIT_K_PER_DAY: if (input.rte.mc.absorption != MCFORWARD_ABS_NONE) { for (ks = 0; ks < output->atm.Nzcld; ks++) if (output->atm.threed[ks]) { /* only for 3D layers, BM07122005 */ scale_factor_abs3d = 1.0; /* find model level corresponding to the user level - there must be a better way to do that! */ if (input.rte.mc.abs_unit == MCABS_UNIT_K_PER_DAY) { rho_air = output->atm.microphys.dens_avg[MOL_AIR][0][0][output->atm.Nzcld - 1 - ks] * 1.e+6 * 1.e-3 * input.atm.mol_mass[MOL_AIR] / AVOGADRO; /* 1.e+6: convert from cm-3 to m-3; 1.e-3: convert g -> kg */ status = specific_heat_capacity_moist_air (output->atm.microphys.temper_avg[0][0][output->atm.Nzcld - 1 - ks], output->atm.microphys.dens_avg[MOL_AIR][0][0][output->atm.Nzcld - 1 - ks], output->atm.microphys.dens_avg[MOL_H2O][0][0][output->atm.Nzcld - 1 - ks], &(c_p), input.quiet); if (status != 0) { fprintf (stderr, "Error, calculating 'c_p' of moist air in %s (%s)\n", function_name, file_name); return -1; } scale_factor_abs3d = scale_factor_abs3d * s2day / (c_p * rho_air); } dz = (output->atm.zd[output->atm.Nzcld - ks - 1] - output->atm.zd[output->atm.Nzcld - ks]) * 1000.0; /* 1000 == km -> m */ scale_factor_abs3d = scale_factor_abs3d / dz; for (is = 0; is < output->atm.Nxcld; is++) for (js = 0; js < output->atm.Nycld; js++) for (iv = 0; iv < output->wl.nlambda_h_print3D; iv++) { /* **CK added bracket */ output->abs3d[ks][is][js][iv] *= scale_factor_abs3d; if (input.rte.mc.std) /* **CK for forward mc_std */ output->abs3d_var[ks][is][js][iv] *= (scale_factor_abs3d * scale_factor_abs3d); } } } if (input.rte.mc.backward.absorption) { for (ks = 0; ks < output->atm.nzout; ks++) { scale_factor_abs3d = 1.0; /* determine the model layer corresponding to our zout layer; */ /* there must be a better way? */ for (lc = 0; lc < output->atm.nlev; lc++) if (float_equal (output->atm.zd[lc], output->atm.zout_sur[ks])) break; if (lc >= output->atm.nlev) lc = output->atm.nlev - 1; if (input.rte.mc.abs_unit == MCABS_UNIT_K_PER_DAY) { rho_air = output->atm.microphys.dens_avg[MOL_AIR][0][0][lc - 1] * 1.e+6 * 1.e-3 * input.atm.mol_mass[MOL_AIR] / AVOGADRO; /* 1.e+6: convert from cm-3 to m-3; 1.e-3: convert g -> kg */ status = specific_heat_capacity_moist_air (output->atm.microphys.temper_avg[0][0][lc - 1], output->atm.microphys.dens_avg[MOL_AIR][0][0][lc - 1], output->atm.microphys.dens_avg[MOL_H2O][0][0][lc - 1], &(c_p), input.quiet); if (status != 0) { fprintf (stderr, "Error, calculating 'c_p' of moist air in %s (%s)\n", function_name, file_name); return -1; } scale_factor_abs3d = scale_factor_abs3d * s2day / (c_p * rho_air); } dz = (output->atm.zd[lc - 1] - output->atm.zd[lc]) * 1000.0; /* 1000 == km -> m */ scale_factor_abs3d = scale_factor_abs3d / dz; if (!input.quiet && input.ipa3d != 1) /*ulrike added && input.ipa3d!=1*/ fprintf (stderr, "converting to heating rate, level %d %.3f - %.3f km, dens=%g, temper=%.3f, scale_factor %f\n", lc, output->atm.zd[lc], output->atm.zd[lc - 1], output->atm.microphys.dens_avg[MOL_AIR][0][0][lc - 1], output->atm.microphys.temper_avg[0][0][lc - 1], scale_factor_abs3d); /* ulrike 4.5.2010: absback3d is used to save the heating rates in case of ipa3d and absback3d is written into mc.abs.spc when mc_backward_output heat K_per_day is specified in the input-file; however, no further scaling of absback3d is necessary (scaling of absback3d was done in scale_output in ancillary.c) */ if (input.ipa3d != 1) for (is = output->islower; is <= output->isupper; is += output->isstep) for (js = output->jslower; js <= output->jsupper; js += output->jsstep) for (iv = 0; iv < output->wl.nlambda_h_print3D; iv++) { output->absback3d[ks][is][js][iv] *= scale_factor_abs3d; /* 27.02.2013 **CK **BM: for thermal backward heating rates std */ if (input.rte.mc.std) output->absback3d_var[ks][is][js][iv] *= (scale_factor_abs3d * scale_factor_abs3d); } } /* endfor (ks=0; ks<output->atm.nzout; ks++) */ } /* endif (input.rte.mc.backward.absorption) */ break; default: fprintf (stderr, "Error, unknown abs_unit %d in %s (%s)\n", input.rte.mc.abs_unit, function_name, file_name); return -1; } /* write 3D data to files */ status = write_spectral3D (input, output); if (status != 0) { fprintf (stderr, "Error %d writing spectral 3D output in %s (%s)\n", status, function_name, file_name); return status; } status = write_triangular_surface_results (input, output); CHKERR (status); return 0; } /***********************************************************************************/ /* Sum the 3D results over wavelength and print data to mc.sum */ /***********************************************************************************/ static int write_spectral3D (input_struct input, output_struct* output) { int status = 0; int doflx = 1; /* switch that turns off writing of flx files. Should be determined automatically */ int is = 0, js = 0, ks = 0, iv = 0, ip = 0, ic = 0, isp = 0, kc = 0, ivs = 0; char flxfilename[FILENAME_MAX] = ""; char radfilename[FILENAME_MAX] = ""; char absfilename[FILENAME_MAX] = ""; char jacfilename[FILENAME_MAX] = ""; char flxisfilename[FILENAME_MAX] = ""; char flxvarfilename[FILENAME_MAX] = ""; char radvarfilename[FILENAME_MAX] = ""; char absvarfilename[FILENAME_MAX] = ""; /* 27.02.2013 **CK **BM: add new variable for thermal backward heating rates std */ FILE *fflx = NULL, *fflxvar = NULL, *fabs = NULL, *fabsvar = NULL, *frad = NULL, *fradvar = NULL, *fjac = NULL, *fflxis = NULL; /* 27.02.2013 **CK **BM: add *fabsvar=NULL for thermal backward heating rates std */ char function_name[] = "write_spectral3D"; char file_name[] = "ancillary.c"; /* generate output file names */ strcpy (flxfilename, input.rte.mc.filename[FN_MC_BASENAME]); strcpy (absfilename, input.rte.mc.filename[FN_MC_BASENAME]); strcpy (radfilename, input.rte.mc.filename[FN_MC_BASENAME]); strcpy (flxvarfilename, input.rte.mc.filename[FN_MC_BASENAME]); strcpy (radvarfilename, input.rte.mc.filename[FN_MC_BASENAME]); strcpy (absvarfilename, input.rte.mc.filename[FN_MC_BASENAME]); /* 27.02.2013 **CK **BM: added for thermal backward heating rates std */ strcat (flxfilename, ".flx.spc"); strcat (radfilename, ".rad.spc"); strcat (flxvarfilename, ".flx.std.spc"); strcat (radvarfilename, ".rad.std.spc"); if (input.rte.mc.jacobian[DIM_1D]) { strcpy (jacfilename, input.rte.mc.filename[FN_MC_BASENAME]); strcat (jacfilename, ".jac.spc"); } if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) { strcpy (flxisfilename, input.rte.mc.filename[FN_MC_BASENAME]); strcat (flxisfilename, ".flx.is.spc"); } /* extension for absorption/heating/actinic etc. file */ switch (input.rte.mc.absorption) { case MCFORWARD_ABS_ACTINIC: strcat (absfilename, ".act.spc"); strcat (absvarfilename, ".act.std.spc"); /* 27.02.2013 **CK **BM: added for thermal backward heating rates std */ break; case MCFORWARD_ABS_ABSORPTION: case MCFORWARD_ABS_EMISSION: case MCFORWARD_ABS_HEATING: case MCFORWARD_ABS_NONE: strcat (absfilename, ".abs.spc"); strcat (absvarfilename, ".abs.std.spc"); /* 27.02.2013 **CK **BM: added for thermal backward heating rates std */ break; default: fprintf (stderr, "Error, unknown absorption type %d\n", input.rte.mc.absorption); break; } if (doflx == 1) { if ((fflx = fopen (flxfilename, "w")) == NULL) { fprintf (stderr, "Error opening %s for writing in %s (%s)\n", flxfilename, function_name, file_name); return -1; } if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) { if ((fflxis = fopen (flxisfilename, "w")) == NULL) { fprintf (stderr, "Error opening %s for writing in %s (%s)\n", flxisfilename, function_name, file_name); return -1; } } } if ((frad = fopen (radfilename, "w")) == NULL) { fprintf (stderr, "Error opening %s for writing in %s (%s)\n", radfilename, function_name, file_name); return -1; } if (input.rte.mc.jacobian[DIM_1D]) { if ((fjac = fopen (jacfilename, "w")) == NULL) { fprintf (stderr, "Error opening %s for writing in %s (%s)\n", jacfilename, function_name, file_name); return -1; } } /* variances */ if (input.rte.mc.std) { if (doflx == 1) { if ((fflxvar = fopen (flxvarfilename, "w")) == NULL) { fprintf (stderr, "Error opening %s for writing in %s (%s)\n", flxvarfilename, function_name, file_name); return -1; } } if ((fradvar = fopen (radvarfilename, "w")) == NULL) { fprintf (stderr, "Error opening %s for writing in %s (%s)\n", radvarfilename, function_name, file_name); return -1; } } for (iv = 0; iv < output->wl.nlambda_h_print3D; iv++) for (ks = 0; ks < output->atm.nzout; ks++) for (is = output->islower; is <= output->isupper; is += output->isstep) for (js = output->jslower; js <= output->jsupper; js += output->jsstep) { if (doflx == 1) { fprintf (fflx, "%9.5f %4d %4d %4d %.8e %.8e %.8e %.8e %.8e %.8e\n", output->wl.lambda_h[iv], is, js, ks, output->rfldir3d[ks][is][js][iv], output->rfldn3d[ks][is][js][iv], output->flup3d[ks][is][js][iv], output->uavgso3d[ks][is][js][iv], output->uavgdn3d[ks][is][js][iv], output->uavgup3d[ks][is][js][iv]); if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) { for (ic = 0; ic < output->mc.alis.Nc; ic++) { fprintf (fflxis, "%9.5f %4d %4d %4d %4d %g\n", output->wl.lambda_h[iv], ic, is, js, ks, output->fl3d_is[ks][is][js][ic][iv]); } } } /* FIXCE reformat output for concentration_is ???? */ for (ip = 0; ip < input.rte.mc.nstokes; ip++) for (ic = 0; ic < output->mc.alis.Nc; ic++) fprintf (frad, "%9.5f %4d %4d %4d %g\n", output->wl.lambda_h[iv], is, js, ks, output->radiance3d[ks][is][js][ip][ic][iv]); if (input.rte.mc.jacobian[DIM_1D]) { for (kc = 0; kc < output->atm.nlev - 1; kc++) { fprintf (fjac, "%9.5f %4d %4d %4d %4d ", output->wl.lambda_h[iv], is, js, ks, kc); for (isp = 0; isp < input.n_caoth + 2; isp++) { fprintf (fjac, "%.6e %.6e ", output->jacobian[ks][is][js][isp][0][kc][iv], output->jacobian[ks][is][js][isp][1][kc][iv]); } fprintf (fjac, "\n"); } } /* variances */ if (input.rte.mc.std) { if (doflx == 1) fprintf (fflxvar, "%9.5f %4d %4d %4d %.8e %.8e %.8e %.8e %.8e %.8e\n", output->wl.lambda_h[iv], is, js, ks, sqrt (output->rfldir3d_var[ks][is][js][iv]), sqrt (output->rfldn3d_var[ks][is][js][iv]), sqrt (output->flup3d_var[ks][is][js][iv]), sqrt (output->uavgso3d_var[ks][is][js][iv]), sqrt (output->uavgdn3d_var[ks][is][js][iv]), sqrt (output->uavgup3d_var[ks][is][js][iv])); for (ip = 0; ip < input.rte.mc.nstokes; ip++) fprintf (fradvar, "%9.5f %4d %4d %4d %g\n", output->wl.lambda_h[iv], is, js, ks, sqrt (output->radiance3d_var[ks][is][js][ip][iv])); } } if (doflx == 1) fclose (fflx); fclose (frad); if (input.rte.mc.concentration_is) fclose (fflxis); if (input.rte.mc.jacobian[DIM_1D]) fclose (fjac); if (input.rte.mc.std) { if (doflx == 1) fclose (fflxvar); fclose (fradvar); } if (output->mc.sample.passback3D && (input.rte.mc.absorption != MCFORWARD_ABS_NONE || input.rte.mc.backward.absorption)) { if ((fabs = fopen (absfilename, "w")) == NULL) { fprintf (stderr, "Error opening %s for writing in %s (%s)\n", absfilename, function_name, file_name); return -1; } /* 27.02.2013 **CK **BM: added for thermal backward heating rates std */ if (input.rte.mc.std) if ((fabsvar = fopen (absvarfilename, "w")) == NULL) { fprintf (stderr, "Error opening %s for writing in %s (%s)\n", radvarfilename, function_name, file_name); return -1; } if (input.rte.mc.absorption != MCFORWARD_ABS_NONE) for (ks = 0; ks < output->atm.Nzcld; ks++) if (output->atm.threed[ks]) /* only for 3D layers, BM07122005 */ for (is = 0; is < output->atm.Nxcld; is++) for (js = 0; js < output->atm.Nycld; js++) for (iv = 0; iv < output->wl.nlambda_h_print3D; iv++) { fprintf (fabs, "%9.5f %4d %4d %4d %.8e\n", output->wl.lambda_h[iv], is, js, ks, output->abs3d[ks][is][js][iv]); /* 27.02.2013 **CK **BM: added for thermal backward heating rates std */ if (input.rte.mc.std) { fprintf (fabsvar, "%9.5f %4d %4d %4d %.8e\n", output->wl.lambda_h[iv], is, js, ks, sqrt (output->abs3d_var[ks][is][js][iv])); } } if (input.rte.mc.backward.absorption) for (iv = 0; iv < output->wl.nlambda_h_print3D; iv++) for (ks = 0; ks < output->atm.nzout; ks++) for (is = output->islower; is <= output->isupper; is += output->isstep) for (js = output->jslower; js <= output->jsupper; js += output->jsstep) { fprintf (fabs, "%9.5f %4d %4d %4d %.8e\n", output->wl.lambda_h[iv], is, js, ks, output->absback3d[ks][is][js][iv]); /* 27.02.2013 **CK **BM: added for thermal backward heating rates std */ if (input.rte.mc.std) { fprintf (fabsvar, "%9.5f %4d %4d %4d %.8e\n", output->wl.lambda_h[iv], is, js, ks, sqrt (output->absback3d_var[ks][is][js][iv])); } } fclose (fabs); /* 27.02.2013 **CK **BM: added for thermal backward heating rates std */ if (input.rte.mc.std) fclose (fabsvar); } return status; } /***********************************************************************************/ /* Sum the 3D results over wavelength and write data to the first array element */ /***********************************************************************************/ int sum3D (input_struct input, output_struct* output) { int iv = 0, is = 0, js = 0, ks = 0, ip = 0, ic = 0, isp = 0, i = 0, lc = 0; double ffactor = 0, ffactor2 = 0, rfactor = 0, rfactor2 = 0; float incident = 0, fbeam = 0; int status = 0; /* calculate incident flux (incident) and sum extraterrestrial irradiance (fbeam) */ for (iv = 0; iv < output->wl.nlambda_h; iv++) { incident += output->wl.filter[iv] * output->wl.fbeam[iv] * cos (output->sza_h[iv] * PI / 180.0); fbeam += output->wl.filter[iv] * output->wl.fbeam[iv]; } /* default scaling factors */ ffactor = 1.0; /* irradiance multiplicator */ rfactor = 1.0; /* radiance multiplicator */ /* if transmittance, then divide by the extraterrestrial flux */ if (input.source != SRC_THERMAL) { switch (input.calibration) { case OUTCAL_ABSOLUTE: break; case OUTCAL_TRANSMITTANCE: case OUTCAL_BRIGHTNESS: ffactor = 1.0 / fbeam; /* irradiance multiplicator */ rfactor = 1.0 / fbeam; /* radiance multiplicator */ break; case OUTCAL_REFLECTIVITY: ffactor = 1.0 / incident; rfactor = PI / incident; break; default: fprintf (stderr, "Error, unknown output calibration %d\n", input.calibration); return -1; } } /* fluxes and radiances */ for (ks = 0; ks < output->atm.nzout; ks++) for (is = output->islower; is <= output->isupper; is += output->isstep) for (js = output->jslower; js <= output->jsupper; js += output->jsstep) { for (iv = 1; iv < output->wl.nlambda_h; iv++) { output->rfldir3d[ks][is][js][0] += output->rfldir3d[ks][is][js][iv]; output->rfldn3d[ks][is][js][0] += output->rfldn3d[ks][is][js][iv]; output->flup3d[ks][is][js][0] += output->flup3d[ks][is][js][iv]; output->uavgso3d[ks][is][js][0] += output->uavgso3d[ks][is][js][iv]; output->uavgdn3d[ks][is][js][0] += output->uavgdn3d[ks][is][js][iv]; output->uavgup3d[ks][is][js][0] += output->uavgup3d[ks][is][js][iv]; for (ip = 0; ip < input.rte.mc.nstokes; ip++) for (ic = 0; ic < output->mc.alis.Nc; ic++) output->radiance3d[ks][is][js][ip][ic][0] += output->radiance3d[ks][is][js][ip][ic][iv]; if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) for (ic = 0; ic < output->mc.alis.Nc; ic++) { /* fprintf(stderr, "sum3D %f is %f \n", output->rfldn3d[ks][is][js][iv], output->fl3d_is [ks][is][js][ic][iv]); */ output->fl3d_is[ks][is][js][ic][0] += output->fl3d_is[ks][is][js][ic][iv]; } if (input.rte.mc.jacobian[DIM_1D]) for (isp = 0; isp < input.n_caoth + 2; isp++) for (i = 0; i < 2; i++) for (lc = 0; lc < output->atm.nlev - 1; lc++) { /* fprintf(stderr, "sum3D %f \n", output->jacobian [ks][is][js][isp][i][lc][iv]); */ output->jacobian[ks][is][js][isp][i][lc][0] += output->jacobian[ks][is][js][isp][i][lc][iv]; } if (input.rte.mc.backward.absorption) output->absback3d[ks][is][js][0] += output->absback3d[ks][is][js][iv]; /* variances */ if (input.rte.mc.std) { output->rfldir3d_var[ks][is][js][0] += output->rfldir3d_var[ks][is][js][iv]; output->rfldn3d_var[ks][is][js][0] += output->rfldn3d_var[ks][is][js][iv]; output->flup3d_var[ks][is][js][0] += output->flup3d_var[ks][is][js][iv]; output->uavgso3d_var[ks][is][js][0] += output->uavgso3d_var[ks][is][js][iv]; output->uavgdn3d_var[ks][is][js][0] += output->uavgdn3d_var[ks][is][js][iv]; output->uavgup3d_var[ks][is][js][0] += output->uavgup3d_var[ks][is][js][iv]; for (ip = 0; ip < input.rte.mc.nstokes; ip++) output->radiance3d_var[ks][is][js][ip][0] += output->radiance3d_var[ks][is][js][ip][iv]; if (input.rte.mc.backward.absorption) output->absback3d_var[ks][is][js][0] += output->absback3d_var[ks][is][js][iv]; } } /* calibrate */ if (output->wl.nlambda_h > 0) { output->rfldir3d[ks][is][js][0] *= ffactor; output->rfldn3d[ks][is][js][0] *= ffactor; output->flup3d[ks][is][js][0] *= ffactor; output->uavgso3d[ks][is][js][0] *= ffactor; output->uavgdn3d[ks][is][js][0] *= ffactor; output->uavgup3d[ks][is][js][0] *= ffactor; if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) for (ic = 0; ic < output->mc.alis.Nc; ic++) output->fl3d_is[ks][is][js][ic][0] *= ffactor; for (ip = 0; ip < input.rte.mc.nstokes; ip++) for (ic = 0; ic < output->mc.alis.Nc; ic++) output->radiance3d[ks][is][js][ip][ic][0] *= rfactor; if (input.rte.mc.jacobian[DIM_1D]) { for (isp = 0; isp < input.n_caoth + 2; isp++) for (i = 0; i < 2; i++) for (lc = 0; lc < output->atm.nlev - 1; lc++) if (input.rte.mc.backward.output == MCBACKWARD_EDN || input.rte.mc.backward.output == MCBACKWARD_EDNPV || input.rte.mc.backward.output == MCBACKWARD_EUP) output->jacobian[ks][is][js][isp][i][lc][0] *= ffactor; else // radiance output->jacobian[ks][is][js][isp][i][lc][0] *= rfactor; } if (input.rte.mc.backward.absorption) output->absback3d[ks][is][js][0] *= ffactor; /* variances */ if (input.rte.mc.std) { ffactor2 = ffactor * ffactor; rfactor2 = rfactor * rfactor; output->rfldir3d_var[ks][is][js][0] *= ffactor2; output->rfldn3d_var[ks][is][js][0] *= ffactor2; output->flup3d_var[ks][is][js][0] *= ffactor2; output->uavgso3d_var[ks][is][js][0] *= ffactor2; output->uavgdn3d_var[ks][is][js][0] *= ffactor2; output->uavgup3d_var[ks][is][js][0] *= ffactor2; for (ip = 0; ip < input.rte.mc.nstokes; ip++) output->radiance3d_var[ks][is][js][ip][0] *= rfactor2; if (input.rte.mc.backward.absorption) output->absback3d_var[ks][is][js][0] *= ffactor2; } } } if (output->mc.sample.passback3D && input.rte.mc.absorption != MCFORWARD_ABS_NONE) for (ks = 0; ks < output->atm.Nzcld; ks++) if (output->atm.threed[ks]) /* only for 3D layers, BM07122005 */ for (is = 0; is < output->atm.Nxcld; is++) for (js = 0; js < output->atm.Nycld; js++) { for (iv = 1; iv < output->wl.nlambda_h; iv++) { /* **CK added bracket and var-line for forward mc_std */ output->abs3d[ks][is][js][0] += output->abs3d[ks][is][js][iv]; if (input.rte.mc.std) output->abs3d_var[ks][is][js][0] += output->abs3d_var[ks][is][js][iv]; } output->abs3d[ks][is][js][0] *= ffactor; if (input.rte.mc.std) output->abs3d_var[ks][is][js][0] *= ffactor2; /* **CK added for forward mc_std */ } /* finally, convert to brightness temperature if requested */ if (input.source == SRC_THERMAL && input.calibration == OUTCAL_BRIGHTNESS) { if (!input.quiet) fprintf (stderr, " ... converting 3D radiances and fluxes to brightness temperatures\n"); /* convert irradiances / radiances to brightness temperatures */ iv = 0; status = output2bt (input, output, iv, 1); if (status != 0) { fprintf (stderr, "Error %d returned by output2bt()\n", status); return status; } } if (output->triangle_results_o) { // sum up triangle_results for (iv = 1; iv < output->wl.nlambda_h; iv++) { status = add_triangular_surface_result (ffactor, output->triangle_results_o[iv], output->triangle_results_o[0]); CHKERR (status); } } return status; } /**************************************************************************************/ /* Integrate the 3D results over wavelength and write data to the first array element */ /**************************************************************************************/ int integrate3D (input_struct input, output_struct* output) { int iv = 0, is = 0, js = 0, ks = 0, ip = 0, ic = 0, isp = 0, i = 0, lc = 0; int status = 0, used_unit = 0; double* cos_SZA = NULL; double ffactor = 0, ffactor2 = 0, rfactor = 0, rfactor2 = 0; float * xint = NULL, *yint = NULL; float incident = 0, fbeam = 0; char function_name[] = "integrate3D"; char file_name[] = "ancillary.c"; /* calculate integrated incident flux */ if ((xint = (float*)calloc (output->wl.nlambda_h, sizeof (float))) == NULL) error_calloc ("xint", "integrate3D", &(status)); if ((yint = (float*)calloc (output->wl.nlambda_h, sizeof (float))) == NULL) error_calloc ("yint", "integrate3D", &(status)); if ((cos_SZA = calloc (output->wl.nlambda_h, sizeof (double))) == NULL) { fprintf (stderr, "Error, allocating memory for cos_SZA in %s (%s)\n", function_name, file_name); return -1; } switch (input.source) { case SRC_SOLAR: case SRC_LIDAR: /* BCA */ case SRC_BLITZ: /* BCA */ for (iv = 0; iv < output->wl.nlambda_h; iv++) cos_SZA[iv] = cos (output->sza_h[iv] * PI / 180.0); break; case SRC_THERMAL: for (iv = 0; iv < output->wl.nlambda_h; iv++) cos_SZA[iv] = 1.0; break; default: fprintf (stderr, "Error, unknown source %d in %s (%s)\n", input.source, function_name, file_name); return -1; } used_unit = input.output_unit; if (used_unit == UNIT_NOT_DEFINED) /* if no output unit is specified, assume the same units as the input spectrum */ used_unit = output->spectrum_unit; switch (used_unit) { case UNIT_PER_NM: if (input.verbose) fprintf (stderr, " *** integration in wavelength space \n"); for (iv = 0; iv < output->wl.nlambda_h; iv++) { xint[iv] = (double)output->wl.lambda_h[iv]; yint[iv] = output->wl.filter[iv] * output->wl.fbeam[iv] * cos_SZA[iv]; } break; case UNIT_PER_CM_1: /* integration over wavenumber k, minus in order to get ascending wavenumbers */ if (input.verbose) fprintf (stderr, " *** integration in wavenumber space \n"); for (iv = 0; iv < output->wl.nlambda_h; iv++) { xint[iv] = -(double)1.0e+7 / output->wl.lambda_h[iv]; /* k = 10**7 / lambda */ /* 10**7 == nm -> cm */ yint[iv] = output->wl.filter[iv] * output->wl.fbeam[iv] * cos_SZA[iv]; } break; case UNIT_PER_BAND: fprintf (stderr, "Error, combination 'output_process integrate' and 'output_process per_band',\n"); fprintf (stderr, " or combination 'output_process integrate' and an input spectrum defined in \n"); fprintf (stderr, " (wavelength or wavenumber) bands does not make sense \n"); fprintf (stderr, " please use 'output_process sum', when dealing with band parametrisations. \n\n"); return -1; break; case UNIT_NOT_DEFINED: fprintf (stderr, "Error, in order to use 'output_process integrate' it is nessesary to specify the \n"); fprintf (stderr, " unit of the extraterrestrial spectrum with 'source solar filename unit' or\n"); fprintf (stderr, " unit of the output with 'output_process per_nm' or 'output_process per_cm-1' \n\n"); return -1; break; default: fprintf (stderr, "Error: Program bug, unsupported unit of extraterrestrial flux %d or output unit %d\n", output->spectrum_unit, input.output_unit); return -1; } incident = integrate_float (xint, yint, output->wl.nlambda_h); /* calculate integrated extraterrestrial irradiance */ for (iv = 0; iv < output->wl.nlambda_h; iv++) yint[iv] = output->wl.filter[iv] * output->wl.fbeam[iv]; fbeam = integrate_float (xint, yint, output->wl.nlambda_h); /* default scaling factors */ ffactor = 1.0; /* irradiance multiplicator */ rfactor = 1.0; /* radiance multiplicator */ /* if transmittance, then divide by the extraterrestrial flux */ if (input.source != SRC_THERMAL) { switch (input.calibration) { case OUTCAL_ABSOLUTE: break; case OUTCAL_TRANSMITTANCE: case OUTCAL_BRIGHTNESS: ffactor = 1.0 / fbeam; /* irradiance multiplicator */ rfactor = 1.0 / fbeam; /* radiance multiplicator */ break; case OUTCAL_REFLECTIVITY: ffactor = 1.0 / incident; rfactor = PI / incident; break; default: fprintf (stderr, "Error, unknown output calibration %d\n", input.calibration); return -1; } } /* fluxes and radiances */ for (ks = 0; ks < output->atm.nzout; ks++) for (is = output->islower; is <= output->isupper; is += output->isstep) for (js = output->jslower; js <= output->jsupper; js += output->jsstep) { output->rfldir3d[ks][is][js][0] = integrate_float (xint, output->rfldir3d[ks][is][js], output->wl.nlambda_h); output->rfldn3d[ks][is][js][0] = integrate_float (xint, output->rfldn3d[ks][is][js], output->wl.nlambda_h); output->flup3d[ks][is][js][0] = integrate_float (xint, output->flup3d[ks][is][js], output->wl.nlambda_h); output->uavgso3d[ks][is][js][0] = integrate_float (xint, output->uavgso3d[ks][is][js], output->wl.nlambda_h); output->uavgdn3d[ks][is][js][0] = integrate_float (xint, output->uavgdn3d[ks][is][js], output->wl.nlambda_h); output->uavgup3d[ks][is][js][0] = integrate_float (xint, output->uavgup3d[ks][is][js], output->wl.nlambda_h); if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) for (ic = 0; ic < output->mc.alis.Nc; ic++) output->fl3d_is[ks][is][js][ic][0] = integrate_float (xint, output->fl3d_is[ks][is][js][ic], output->wl.nlambda_h); for (ip = 0; ip < input.rte.mc.nstokes; ip++) for (ic = 0; ic < output->mc.alis.Nc; ic++) output->radiance3d[ks][is][js][ip][ic][0] = integrate_float (xint, output->radiance3d[ks][is][js][ip][ic], output->wl.nlambda_h); if (input.rte.mc.jacobian[DIM_1D]) for (isp = 0; isp < input.n_caoth + 2; isp++) for (i = 0; i < 2; i++) for (lc = 0; lc < output->atm.nlev - 1; lc++) output->jacobian[ks][is][js][isp][i][lc][0] = integrate_float (xint, output->jacobian[ks][is][js][isp][i][lc], output->wl.nlambda_h); if (input.rte.mc.backward.absorption) output->absback3d[ks][is][js][0] = integrate_float (xint, output->absback3d[ks][is][js], output->wl.nlambda_h); /* variances */ if (input.rte.mc.std) { output->rfldir3d_var[ks][is][js][0] = integrate_float (xint, output->rfldir3d_var[ks][is][js], output->wl.nlambda_h); output->rfldn3d_var[ks][is][js][0] = integrate_float (xint, output->rfldn3d_var[ks][is][js], output->wl.nlambda_h); output->flup3d_var[ks][is][js][0] = integrate_float (xint, output->flup3d_var[ks][is][js], output->wl.nlambda_h); output->uavgso3d_var[ks][is][js][0] = integrate_float (xint, output->uavgso3d_var[ks][is][js], output->wl.nlambda_h); output->uavgdn3d_var[ks][is][js][0] = integrate_float (xint, output->uavgdn3d_var[ks][is][js], output->wl.nlambda_h); output->uavgup3d_var[ks][is][js][0] = integrate_float (xint, output->uavgup3d_var[ks][is][js], output->wl.nlambda_h); for (ip = 0; ip < input.rte.mc.nstokes; ip++) output->radiance3d_var[ks][is][js][ip][0] = integrate_float (xint, output->radiance3d_var[ks][is][js][ip], output->wl.nlambda_h); if (input.rte.mc.backward.absorption) output->absback3d_var[ks][is][js][0] = integrate_float (xint, output->absback3d_var[ks][is][js], output->wl.nlambda_h); } /* calibrate */ if (output->wl.nlambda_h > 0) { output->rfldir3d[ks][is][js][0] *= ffactor; output->rfldn3d[ks][is][js][0] *= ffactor; output->flup3d[ks][is][js][0] *= ffactor; output->uavgso3d[ks][is][js][0] *= ffactor; output->uavgdn3d[ks][is][js][0] *= ffactor; output->uavgup3d[ks][is][js][0] *= ffactor; if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) for (ic = 0; ic < output->mc.alis.Nc; ic++) output->fl3d_is[ks][is][js][ic][0] *= ffactor; for (ip = 0; ip < input.rte.mc.nstokes; ip++) for (ic = 0; ic < output->mc.alis.Nc; ic++) output->radiance3d[ks][is][js][ip][ic][0] *= rfactor; if (input.rte.mc.jacobian[DIM_1D]) { for (isp = 0; isp < input.n_caoth + 2; isp++) for (i = 0; i < 2; i++) for (lc = 0; lc < output->atm.nlev - 1; lc++) if (input.rte.mc.backward.output == MCBACKWARD_EDN || input.rte.mc.backward.output == MCBACKWARD_EDNPV || input.rte.mc.backward.output == MCBACKWARD_EUP) output->jacobian[ks][is][js][isp][i][lc][0] *= ffactor; else output->jacobian[ks][is][js][isp][i][lc][0] *= rfactor; } if (input.rte.mc.backward.absorption) output->absback3d[ks][is][js][0] *= ffactor; /* variances */ if (input.rte.mc.std) { ffactor2 = ffactor * ffactor; rfactor2 = rfactor * rfactor; output->rfldir3d_var[ks][is][js][0] *= ffactor2; output->rfldn3d_var[ks][is][js][0] *= ffactor2; output->flup3d_var[ks][is][js][0] *= ffactor2; output->uavgso3d_var[ks][is][js][0] *= ffactor2; output->uavgdn3d_var[ks][is][js][0] *= ffactor2; output->uavgup3d_var[ks][is][js][0] *= ffactor2; for (ip = 0; ip < input.rte.mc.nstokes; ip++) output->radiance3d_var[ks][is][js][ip][0] *= rfactor2; if (input.rte.mc.backward.absorption) output->absback3d_var[ks][is][js][0] *= ffactor2; } } } if (output->mc.sample.passback3D && input.rte.mc.absorption != MCFORWARD_ABS_NONE) for (ks = 0; ks < output->atm.Nzcld; ks++) if (output->atm.threed[ks]) /* only for 3D layers, BM07122005 */ for (is = 0; is < output->atm.Nxcld; is++) for (js = 0; js < output->atm.Nycld; js++) { for (iv = 1; iv < output->wl.nlambda_h; iv++) { /* **CK added bracket and var-line for forward mc_std */ output->abs3d[ks][is][js][0] = integrate_float (xint, output->abs3d[ks][is][js], output->wl.nlambda_h); output->abs3d_var[ks][is][js][0] = integrate_float (xint, output->abs3d_var[ks][is][js], output->wl.nlambda_h); } if (output->wl.nlambda_h > 0) { /* **CK added bracket and var-line for forward mc_std */ output->abs3d[ks][is][js][0] *= ffactor; output->abs3d_var[ks][is][js][0] *= ffactor2; } } /* finally, convert to brightness temperature if requested */ if (input.source == SRC_THERMAL && input.calibration == OUTCAL_BRIGHTNESS) { if (!input.quiet) fprintf (stderr, " ... converting 3D radiances and fluxes to brightness temperatures\n"); /* convert irradiances / radiances to brightness temperatures */ status = output2bt (input, output, 0, 1); if (status != 0) { fprintf (stderr, "Error %d returned by output2bt()\n", status); return status; } } if (output->triangle_results_o) CHKERROUT (-1, "Integrate not yet implemented for triangles"); return status; } /*********************************/ /* used by integrate1D and sum1D */ /*********************************/ int calloc_int_values (input_struct input, output_struct* output) { int status = 0; int lev = 0, j = 0, iu = 0; /* callocate wavelength integrated fluxes and radiances */ output->rfldir_int = (double*)calloc (output->atm.nzout, sizeof (double)); if (output->rfldir_int == NULL) error_calloc ("output->rfldir_int", "calloc_int_values", &(status)); output->rfldn_int = (double*)calloc (output->atm.nzout, sizeof (double)); if (output->rfldn_int == NULL) error_calloc ("output->rfldn_int", "calloc_int_values", &(status)); output->flup_int = (double*)calloc (output->atm.nzout, sizeof (double)); if (output->flup_int == NULL) error_calloc ("output->flup_int", "calloc_int_values", &(status)); output->uavg_int = (double*)calloc (output->atm.nzout, sizeof (double)); if (output->uavg_int == NULL) error_calloc ("output->uavg_int", "calloc_int_values", &(status)); output->uavgso_int = (double*)calloc (output->atm.nzout, sizeof (double)); if (output->uavgso_int == NULL) error_calloc ("output->uavgso_int", "calloc_int_values", &(status)); output->uavgdn_int = (double*)calloc (output->atm.nzout, sizeof (double)); if (output->uavgdn_int == NULL) error_calloc ("output->uavgdn_int", "calloc_int_values", &(status)); output->uavgup_int = (double*)calloc (output->atm.nzout, sizeof (double)); if (output->uavgup_int == NULL) error_calloc ("output->uavgup_int", "calloc_int_values", &(status)); if (input.heating != HEAT_NONE) { output->heat_int = (double*)calloc (output->atm.nzout, sizeof (double)); if (output->heat_int == NULL) error_calloc ("output->heat_int", "calloc_int_values", &(status)); output->emis_int = (double*)calloc (output->atm.nzout, sizeof (double)); if (output->emis_int == NULL) error_calloc ("output->emis_int", "calloc_int_values", &(status)); output->w_zout_int = (double*)calloc (output->atm.nzout, sizeof (double)); if (output->w_zout_int == NULL) error_calloc ("output->w_zout_int", "calloc_int_values", &(status)); } output->u0u_int = (double**)calloc (output->atm.nzout, sizeof (double*)); if (output->u0u_int == NULL) error_calloc ("output->u0u_int", "calloc_int_values", &(status)); for (lev = 0; lev < output->atm.nzout; lev++) output->u0u_int[lev] = (double*)calloc (input.rte.numu, sizeof (double)); output->uu_int = (double***)calloc (output->atm.nzout, sizeof (double*)); if (output->uu_int == NULL) error_calloc ("output->uu_int", "calloc_int_values", &(status)); for (lev = 0; lev < output->atm.nzout; lev++) { output->uu_int[lev] = (double**)calloc (input.rte.nphi, sizeof (double*)); for (j = 0; j < input.rte.nphi; j++) output->uu_int[lev][j] = (double*)calloc (input.rte.numu, sizeof (double)); } output->albmed_int = (double*)calloc (input.rte.numu, sizeof (double)); output->trnmed_int = (double*)calloc (input.rte.numu, sizeof (double)); /* callocate wavelength integrated PolRadtran flux and intensities on the output grid */ if (input.rte.solver == SOLVER_POLRADTRAN) { output->down_flux_int = (double**)calloc (output->atm.nzout, sizeof (double*)); if (output->down_flux_int == NULL) error_calloc ("output->down_flux_int", "calloc_int_values", &(status)); for (lev = 0; lev < output->atm.nzout; lev++) output->down_flux_int[lev] = (double*)calloc (input.rte.polradtran[POLRADTRAN_NSTOKES], sizeof (double)); output->up_flux_int = (double**)calloc (output->atm.nzout, sizeof (double*)); if (output->up_flux_int == NULL) error_calloc ("output->up_flux_int", "calloc_int_values", &(status)); for (lev = 0; lev < output->atm.nzout; lev++) output->up_flux_int[lev] = (double*)calloc (input.rte.polradtran[POLRADTRAN_NSTOKES], sizeof (double)); output->down_rad_int = (double****)calloc (output->atm.nzout, sizeof (double*)); if (output->down_rad_int == NULL) error_calloc ("output->down_rad_int", "calloc_int_values", &(status)); for (lev = 0; lev < output->atm.nzout; lev++) { output->down_rad_int[lev] = (double***)calloc (input.rte.nphi, sizeof (double*)); for (j = 0; j < input.rte.nphi; j++) { output->down_rad_int[lev][j] = (double**)calloc (input.rte.numu, sizeof (double*)); for (iu = 0; iu < input.rte.numu; iu++) { output->down_rad_int[lev][j][iu] = (double*)calloc (input.rte.polradtran[POLRADTRAN_NSTOKES], sizeof (double)); } } } output->up_rad_int = (double****)calloc (output->atm.nzout, sizeof (double*)); if (output->up_rad_int == NULL) error_calloc ("output->up_rad_int", "calloc_int_values", &(status)); for (lev = 0; lev < output->atm.nzout; lev++) { output->up_rad_int[lev] = (double***)calloc (input.rte.nphi, sizeof (double*)); for (j = 0; j < input.rte.nphi; j++) { output->up_rad_int[lev][j] = (double**)calloc (input.rte.numu, sizeof (double*)); for (iu = 0; iu < input.rte.numu; iu++) { output->up_rad_int[lev][j][iu] = (double*)calloc (input.rte.polradtran[POLRADTRAN_NSTOKES], sizeof (double)); } } } } return status; } void error_calloc (char* variable, char* function, int* status) { fprintf (stderr, "Error allocating memory for (%s) in %s (ancillary.c)\n", variable, function); fflush (stderr); *status = *status - 1; } /***************************************************************************/ /* abuse first entry of the wavelength index to store the integrated value */ /* and to convert double (nessesary for heating rates) to floats */ /* entry (n+1) or entry (-1) would be nicer */ /***************************************************************************/ int double2float_integrated_values (input_struct input, output_struct* output) { int lev, is = 0, j = 0, iu = 0; int status = 0; if (input.rte.solver == SOLVER_POLRADTRAN) { /* polRadtran output */ for (lev = 0; lev < output->atm.nzout; lev++) for (is = 0; is < input.rte.polradtran[POLRADTRAN_NSTOKES]; is++) { /* irradiances */ output->down_flux[lev][is][0] = (float)output->down_flux_int[lev][is]; output->up_flux[lev][is][0] = (float)output->up_flux_int[lev][is]; /* radiances */ for (iu = 0; iu < input.rte.numu; iu++) for (j = 0; j < input.rte.nphi; j++) { output->down_rad[lev][j][iu][is][0] = (float)output->down_rad_int[lev][j][iu][is]; output->up_rad[lev][j][iu][is][0] = (float)output->up_rad_int[lev][j][iu][is]; } } } else { /* unpolarized output */ for (iu = 0; iu < input.rte.numu; iu++) { output->albmed[iu][0] = (float)output->albmed_int[iu]; output->trnmed[iu][0] = (float)output->trnmed_int[iu]; } for (lev = 0; lev < output->atm.nzout; lev++) { /* irradiance / actinic flux */ output->rfldir[lev][0] = (float)output->rfldir_int[lev]; output->rfldn[lev][0] = (float)output->rfldn_int[lev]; output->flup[lev][0] = (float)output->flup_int[lev]; output->uavg[lev][0] = (float)output->uavg_int[lev]; output->uavgso[lev][0] = (float)output->uavgso_int[lev]; output->uavgdn[lev][0] = (float)output->uavgdn_int[lev]; output->uavgup[lev][0] = (float)output->uavgup_int[lev]; if (input.heating != HEAT_NONE) { output->heat[lev][0] = (float)output->heat_int[lev]; output->emis[lev][0] = (float)output->emis_int[lev]; output->w_zout[lev][0] = (float)output->w_zout_int[lev]; } /* radiances */ for (iu = 0; iu < input.rte.numu; iu++) { output->u0u[lev][iu][0] = (float)output->u0u_int[lev][iu]; for (j = 0; j < input.rte.nphi; j++) output->uu[lev][j][iu][0] = (float)output->uu_int[lev][j][iu]; } } } return status; } /*********************************/ /* used by integrate1D and sum1D */ /*********************************/ int scaling_integrated_values (input_struct input, output_struct* output) { double ffactor = 0, rfactor = 0, hfactor = 1; int status = 0; /* if transmittance, then divide by the extraterrestrial flux */ if (input.source != SRC_THERMAL) { switch (input.calibration) { case OUTCAL_ABSOLUTE: ffactor = 1.0; /* irradiance multiplicator */ rfactor = 1.0; /* radiance multiplicator */ break; case OUTCAL_TRANSMITTANCE: case OUTCAL_BRIGHTNESS: ffactor = 1.0 / output->wl.fbeam[0]; /* irradiance multiplicator */ rfactor = 1.0 / output->wl.fbeam[0]; /* radiance multiplicator */ break; case OUTCAL_REFLECTIVITY: ffactor = 1.0 / output->incident; rfactor = PI / output->incident; break; default: fprintf (stderr, "Error, unknown output calibration %d\n", input.calibration); return -1; } /**************************************************************/ /* now scale irradiances with ffactor, radiances with rfactor */ /**************************************************************/ if (output->wl.nlambda_h > 0) { /* in iv==0 the integrated values are stored */ status = scale_output (input, &(output->rfldir), &(output->rfldn), &(output->flup), &(output->albmed), &(output->trnmed), &(output->uavgso), &(output->uavgdn), &(output->uavgup), &(output->uavg), &(output->u0u), &(output->uu), &(output->heat), &(output->emis), &(output->w_zout), &(output->down_flux), &(output->up_flux), &(output->down_rad), &(output->up_rad), &(output->rfldir3d), &(output->rfldn3d), &(output->flup3d), &(output->fl3d_is), &(output->uavgso3d), &(output->uavgdn3d), &(output->uavgup3d), &(output->radiance3d), &(output->jacobian), &(output->absback3d), &(output->rfldir3d_var), &(output->rfldn3d_var), &(output->flup3d_var), &(output->uavgso3d_var), &(output->uavgdn3d_var), &(output->uavgup3d_var), &(output->radiance3d_var), &(output->abs3d_var), &(output->absback3d_var), output->atm.nzout, output->atm.Nxcld, output->atm.Nycld, output->atm.Nzcld, output->mc.alis.Nc, output->atm.nlyr, output->atm.threed, output->mc.sample.passback3D, output->islower, output->isupper, output->jslower, output->jsupper, output->isstep, output->jsstep, &(output->abs3d), output->triangle_results_o, ffactor, rfactor, hfactor, 0); if (status != 0) { fprintf (stderr, "Error %d returned by scale_output()\n", status); return status; } } } else { if (input.calibration == OUTCAL_BRIGHTNESS) { if (!input.quiet) fprintf (stderr, " ... converting radiances to brightness temperatures\n"); /* convert irradiances / radiances to brightness temperatures */ /* the first element (iv==0) stores the spectrally integrated quantities; */ status = output2bt (input, output, 0, 0); if (status != 0) { fprintf (stderr, "Error %d returned by output2bt()\n", status); return status; } } } /* abuse first entry to store the integrated value, this is not so nice programmed */ if (output->wl.nlambda_h > 0) output->wl.nlambda_h = 1; return 0; } /***********************************************************************************/ /* Convert Raman spectrum to user spectum */ /***********************************************************************************/ static int raman_spec2spec (input_struct input, output_struct* output) { int lev = 0, iu = 0, j = 0, status = 0, iv = 0; int start_id = 0, end_id = 0; float test_lower = 0, test_upper = 0; test_lower = output->wl.lambda_h[0] + output->wl.delta_wvl_raman_lower + output->wl.delta_wvl_raman_extra; test_upper = output->wl.lambda_h[output->wl.nlambda_h - 1] - output->wl.delta_wvl_raman_upper - output->wl.delta_wvl_raman_extra; for (iv = 0; iv < output->wl.nlambda_h; iv++) { if (output->wl.lambda_h[iv] < test_lower) start_id = iv + 1; if (output->wl.lambda_h[output->wl.nlambda_h - 1 - iv] > test_upper) end_id = output->wl.nlambda_h - iv - 2; } /* The old nlambda_h is always larger than what we are going to output. Furthermore, */ /* start_id is always > 0 and end_id always < nlambda_h, so we just shift and */ /* arys decrease nlambda_h */ output->wl.nlambda_h = end_id - start_id + 1; for (iv = 0; iv < output->wl.nlambda_h; iv++) { output->wl.lambda_h[iv] = output->wl.lambda_h[iv + start_id]; for (lev = 0; lev < output->atm.nzout; lev++) { output->rfldir[lev][iv] = output->rfldir[lev][iv + start_id]; output->rfldn[lev][iv] = output->rfldn[lev][iv + start_id]; output->flup[lev][iv] = output->flup[lev][iv + start_id]; output->uavgso[lev][iv] = output->uavgso[lev][iv + start_id]; output->uavgdn[lev][iv] = output->uavgdn[lev][iv + start_id]; output->uavgup[lev][iv] = output->uavgup[lev][iv + start_id]; for (iu = 0; iu < input.rte.numu; iu++) { output->u0u[lev][iu][iv] = output->u0u[lev][iu][iv + start_id]; for (j = 0; j < input.rte.nphi; j++) output->uu[lev][j][iu][iv] = output->uu[lev][j][iu][iv + start_id]; } } } return status; } /***********************************************************************************/ /* Convert spectra to RGB and write data to the first three array elements */ /***********************************************************************************/ static int spec2rgb (input_struct input, output_struct* output) { int lev = 0, iu = 0, j = 0, is = 0, status = 0, norm = 0; if (output->wl.nlambda_h < 3) { fprintf (stderr, "Fatal error, need at least 3 wavelengths to store RGB\n"); return -1; } /* normalized or weighted with brightness */ norm = 1; if (input.processing == PROCESS_RGB) norm = 0; for (lev = 0; lev < output->atm.nzout; lev++) { status += spectrum_to_rgb_overwrite (output->rfldir[lev], output->wl.nlambda_h, norm); status += spectrum_to_rgb_overwrite (output->rfldn[lev], output->wl.nlambda_h, norm); status += spectrum_to_rgb_overwrite (output->flup[lev], output->wl.nlambda_h, norm); status += spectrum_to_rgb_overwrite (output->uavgso[lev], output->wl.nlambda_h, norm); status += spectrum_to_rgb_overwrite (output->uavgdn[lev], output->wl.nlambda_h, norm); status += spectrum_to_rgb_overwrite (output->uavgup[lev], output->wl.nlambda_h, norm); for (iu = 0; iu < input.rte.numu; iu++) { status += spectrum_to_rgb_overwrite (output->u0u[lev][iu], output->wl.nlambda_h, norm); for (j = 0; j < input.rte.nphi; j++) status += spectrum_to_rgb_overwrite (output->uu[lev][j][iu], output->wl.nlambda_h, norm); } } /* polarized fluxes and radiances */ if (input.rte.solver == SOLVER_POLRADTRAN) for (lev = 0; lev < output->atm.nzout; lev++) for (is = 0; is < input.rte.polradtran[POLRADTRAN_NSTOKES]; is++) { status += spectrum_to_rgb_overwrite (output->down_flux[lev][is], output->wl.nlambda_h, norm); status += spectrum_to_rgb_overwrite (output->up_flux[lev][is], output->wl.nlambda_h, norm); for (iu = 0; iu < input.rte.numu; iu++) for (j = 0; j < input.rte.nphi; j++) { status += spectrum_to_rgb_overwrite (output->down_rad[lev][j][iu][is], output->wl.nlambda_h, norm); status += spectrum_to_rgb_overwrite (output->up_rad[lev][j][iu][is], output->wl.nlambda_h, norm); } } if (input.verbose) fprintf (stderr, "LEAVING spec2rgb()\n"); if (output->wl.nlambda_h > 0) output->wl.nlambda_h = 3; output->wl.lambda_h[0] = 600; output->wl.lambda_h[1] = 500; output->wl.lambda_h[2] = 400; return 0; } /***********************************************************************************/ /* Convert 3D spectra to RGB and write data to the first three array elements */ /***********************************************************************************/ static int spec2rgb3D (input_struct input, output_struct* output) { int is = 0, js = 0, ks = 0, status = 0, norm = 0; if (output->wl.nlambda_h < 3) { fprintf (stderr, "Fatal error, need at least 3 wavelengths to store RGB\n"); return -1; } /* normalized or weighted with brightness */ norm = 1; if (input.processing == PROCESS_RGB) norm = 0; for (ks = 0; ks < output->atm.nzout; ks++) for (is = output->islower; is <= output->isupper; is += output->isstep) for (js = output->jslower; js <= output->jsupper; js += output->jsstep) { if (input.verbose) fprintf (stderr, "R3D = %f\n", output->radiance3d[ks][is][js][0][0][0]); status += spectrum_to_rgb_overwrite (output->rfldir3d[ks][is][js], output->wl.nlambda_h, norm); status += spectrum_to_rgb_overwrite (output->rfldn3d[ks][is][js], output->wl.nlambda_h, norm); status += spectrum_to_rgb_overwrite (output->flup3d[ks][is][js], output->wl.nlambda_h, norm); status += spectrum_to_rgb_overwrite (output->uavgso3d[ks][is][js], output->wl.nlambda_h, norm); status += spectrum_to_rgb_overwrite (output->uavgdn3d[ks][is][js], output->wl.nlambda_h, norm); status += spectrum_to_rgb_overwrite (output->uavgup3d[ks][is][js], output->wl.nlambda_h, norm); /* ignore polarization for rgb output */ status += spectrum_to_rgb_overwrite (output->radiance3d[ks][is][js][0][0], output->wl.nlambda_h, norm); /* ignore variances for rgb output */ if (input.verbose) { fprintf (stderr, "SPEC2RGB %d %d %d %d\n", ks, is, js, output->wl.nlambda_h); fprintf (stderr, "R3D = %f\n", output->radiance3d[ks][is][js][0][0][0]); } } if (status != 0) { fprintf (stderr, "Error converting 3D fields to colors with spec2rgb3D()\n"); return status; } /* don't adjust lambda_h and nlambda_h here - we need the */ /* original numbers for processing1D()! */ /* if (output->wl.nlambda_h>0) output->wl.nlambda_h = 3; output->wl.lambda_h[0] = 600; output->wl.lambda_h[1] = 500; output->wl.lambda_h[2] = 400; */ if (input.verbose) fprintf (stderr, "LEAVING spec2rgb3D()\n"); if (output->triangle_results_o) CHKERROUT (-1, "Integrate not yet implemented for triangles"); return 0; } /***********************************************************************************/ /* Scale uvspec output fields with a given factor; ffactor is the scaling factor */ /* for irradiances/actinic fluxes and rfactor is the scaling factor for radiances */ /***********************************************************************************/ int scale_output (input_struct input, float*** p_rfldir, float*** p_rfldn, float*** p_flup, float*** p_albmed, float*** p_trnmed, float*** p_uavgso, float*** p_uavgdn, float*** p_uavgup, float*** p_uavg, float**** p_u0u, float***** p_uu, float*** p_heat, float*** p_emis, float*** p_w_zout, float**** p_down_flux, float**** p_up_flux, float****** p_down_rad, float****** p_up_rad, float***** p_rfldir3d, float***** p_rfldn3d, float***** p_flup3d, float****** p_fl3d_is, float***** p_uavgso3d, float***** p_uavgdn3d, float***** p_uavgup3d, float******* p_radiance3d, float******** p_jacobian, float***** p_absback3d, float***** p_rfldir3d_var, float***** p_rfldn3d_var, float***** p_flup3d_var, float***** p_uavgso3d_var, float***** p_uavgdn3d_var, float***** p_uavgup3d_var, float****** p_radiance3d_var, float***** p_abs3d_var, float***** p_absback3d_var, int nzout, int Nx, int Ny, int Nz, int Nc, int Nlyr, int* threed, int passback3D, int islower, int isupper, int jslower, int jsupper, int isstep, int jsstep, float***** p_abs3d, t_triangle_radiation_field** triangle_result, double ffactor, double rfactor, double hfactor, int iv) /* changed output_struc to a bunch of pointers, as this function is also used in solve_rte() with different arguements, UH, 2006-02 */ { double ffactor2 = 0, rfactor2 = 0, hfactor2 = 0; /* 27.02.2013 **CK **BM: add hfactor2 for thermal backward heating rate std */ int lev = 0, iu = 0, j = 0, is = 0, js = 0, ks = 0, ic = 0, ip = 0, isp = 0, i = 0, lc = 0; if (input.rte.solver == SOLVER_POLRADTRAN) { /* polRadtran output */ for (lev = 0; lev < nzout; lev++) { for (is = 0; is < input.rte.polradtran[POLRADTRAN_NSTOKES]; is++) { /* irradiances */ (*p_down_flux)[lev][is][iv] *= ffactor; (*p_up_flux)[lev][is][iv] *= ffactor; /* radiances */ for (iu = 0; iu < input.rte.numu; iu++) for (j = 0; j < input.rte.nphi; j++) { (*p_down_rad)[lev][j][iu][is][iv] *= rfactor; (*p_up_rad)[lev][j][iu][is][iv] *= rfactor; } } /* heating rate */ if (input.heating != HEAT_NONE) { (*p_heat)[lev][iv] *= hfactor; (*p_emis)[lev][iv] *= hfactor; (*p_w_zout)[lev][iv] *= hfactor; } } } else { /* unpolarized output */ /* Arve 20160121: spherical albedo and transmittance should not be scaled if disort_spherical_albedo is set. */ if (!input.rte.ibcnd) { for (iu = 0; iu < input.rte.numu; iu++) { (*p_albmed)[iu][iv] *= ffactor; (*p_trnmed)[iu][iv] *= ffactor; } } for (lev = 0; lev < nzout; lev++) { /* irradiance / actinic flux / heating rate */ (*p_rfldir)[lev][iv] *= ffactor; (*p_rfldn)[lev][iv] *= ffactor; (*p_flup)[lev][iv] *= ffactor; (*p_uavg)[lev][iv] *= ffactor; (*p_uavgso)[lev][iv] *= ffactor; (*p_uavgdn)[lev][iv] *= ffactor; (*p_uavgup)[lev][iv] *= ffactor; if (input.heating != HEAT_NONE) { (*p_heat)[lev][iv] *= hfactor; (*p_emis)[lev][iv] *= hfactor; (*p_w_zout)[lev][iv] *= hfactor; } /* radiances */ for (iu = 0; iu < input.rte.numu; iu++) { (*p_u0u)[lev][iu][iv] *= rfactor; for (j = 0; j < input.rte.nphi; j++) (*p_uu)[lev][j][iu][iv] *= rfactor; } /* 3D irradiances and radiances */ if (passback3D) for (is = islower; is <= isupper; is += isstep) for (js = jslower; js <= jsupper; js += jsstep) { (*p_rfldir3d)[lev][is][js][iv] *= ffactor; (*p_rfldn3d)[lev][is][js][iv] *= ffactor; (*p_flup3d)[lev][is][js][iv] *= ffactor; (*p_uavgso3d)[lev][is][js][iv] *= ffactor; (*p_uavgdn3d)[lev][is][js][iv] *= ffactor; (*p_uavgup3d)[lev][is][js][iv] *= ffactor; if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) { for (ic = 0; ic < Nc; ic++) (*p_fl3d_is)[lev][is][js][ic][iv] *= ffactor; } for (ip = 0; ip < input.rte.mc.nstokes; ip++) { for (ic = 0; ic < Nc; ic++) (*p_radiance3d)[lev][is][js][ip][ic][iv] *= rfactor; } if (input.rte.mc.jacobian[DIM_1D]) { for (isp = 0; isp < input.n_caoth + 2; isp++) for (i = 0; i < 2; i++) for (lc = 0; lc < Nlyr; lc++) { if (input.rte.mc.backward.output == MCBACKWARD_EDN || input.rte.mc.backward.output == MCBACKWARD_EDNPV || input.rte.mc.backward.output == MCBACKWARD_EUP) (*p_jacobian)[lev][is][js][isp][i][lc][iv] *= ffactor; else (*p_jacobian)[lev][is][js][isp][i][lc][iv] *= rfactor; } } if (input.rte.mc.backward.absorption && input.ipa3d != 1) /*ulrike: added && input.ipa3d!=1*/ (*p_absback3d)[lev][is][js][iv] *= ffactor; /* ulrike 3.5.2010: if we have ipa3d then we use absback3d to save the heating rate; thus, p_absback3d should not be multiplied with the ffactor, but with the hfactor, which is the heating rate factor*/ if (input.rte.mc.backward.absorption && input.ipa3d) (*p_absback3d)[lev][is][js][iv] *= hfactor; /* variances */ if (input.rte.mc.std) { ffactor2 = ffactor * ffactor; rfactor2 = rfactor * rfactor; hfactor2 = hfactor * hfactor; /* 27.02.2013 **CK **BM: add hfactor2 for thermal backward heating rate std */ (*p_rfldir3d_var)[lev][is][js][iv] *= ffactor2; (*p_rfldn3d_var)[lev][is][js][iv] *= ffactor2; (*p_flup3d_var)[lev][is][js][iv] *= ffactor2; (*p_uavgso3d_var)[lev][is][js][iv] *= ffactor2; (*p_uavgdn3d_var)[lev][is][js][iv] *= ffactor2; (*p_uavgup3d_var)[lev][is][js][iv] *= ffactor2; for (ip = 0; ip < input.rte.mc.nstokes; ip++) (*p_radiance3d_var)[lev][is][js][ip][iv] *= rfactor2; if (input.rte.mc.backward.absorption && input.ipa3d != 1) /* 27.02.2013 **CK **BM: add "&&" for thermal backward heating rate std */ (*p_absback3d_var)[lev][is][js][iv] *= ffactor2; /* 27.02.2013 **CK **BM: add hfactor2 for thermal backward heating rate std */ if (input.rte.mc.backward.absorption && input.ipa3d) (*p_absback3d_var)[lev][is][js][iv] *= hfactor2; } } } /* 3D absorption fields; here the loop goes over all 3D boxes */ if (passback3D && input.rte.mc.absorption != MCFORWARD_ABS_NONE) for (ks = 0; ks < Nz; ks++) if (threed[ks]) /* only for 3D layers, BM07122005 */ for (is = 0; is < Nx; is++) for (js = 0; js < Ny; js++) { /* **CK added bracket */ (*p_abs3d)[ks][is][js][iv] *= ffactor; if ( input.rte.mc .std) /* **CK added for forward mc_std */ /* ??????????????? **CK shouldn't we also switch between ffactor und hfactor for ipa3d/not ipa3d as in backward mode? */ (*p_abs3d_var)[ks][is][js][iv] *= ffactor2; } if (triangle_result) { const int ierr = scale_output_triangle_surface (ffactor, triangle_result[iv]); CHKERR (ierr); } } return 0; } int scale_output_triangle_surface (const double ffactor, t_triangle_radiation_field* result) { for (size_t i = 0; i < result->N_triangles; ++i) { result->edir[i] *= ffactor; result->edn[i] *= ffactor; result->eup[i] *= ffactor; } return 0; } /***********************************************************************************/ /* Convert radiance to brightness temperature for a given filter function. */ /***********************************************************************************/ float radiance2bt (float rad, float* wvnmlo, float* wvnmhi, float* filter, int n, int processing, int ivi) { int iv = 0, it = 0; double t1 = 0, t2 = 0, plkrad1 = 0, plkrad2 = 0, r = 0, wvlmlo = 0, wvlmhi = 0; double *xint = NULL, *yint1 = NULL, *yint2 = NULL; /* careful: if accuracy is set too small, it may not be reachable because */ /* the difference between two neighbouring floats may be larger! */ /* Should better tie this to the actual numerical precision */ double accur = 5e-5; double dt = 0.001; /* special treatment of NaN */ if (rad != rad) return rad; t1 = 273; t2 = t1 + dt; if (rad <= 0) return 0; plkrad1 = 0; plkrad2 = 0; if (processing == PROCESS_INT) { xint = (double*)calloc (n, sizeof (double)); yint1 = (double*)calloc (n, sizeof (double)); yint2 = (double*)calloc (n, sizeof (double)); for (iv = 0; iv < n; iv++) { wvlmlo = wvnmlo[iv]; wvlmhi = wvnmhi[iv]; // wavenumber is multiplied by -1 since the integrate()-function assumes increasing x-values xint[iv] = -(wvnmlo[iv] + wvnmhi[iv]) / 2.0; // the difference between wvlmlo and wvlmhi is assumed to be 1 cm^-1, otherwise the integrated value is probably wrong r = c_planck_func1 (wvlmlo, wvlmhi, t1); yint1[iv] = filter[iv] * r; r = c_planck_func1 (wvlmlo, wvlmhi, t2); yint2[iv] = filter[iv] * r; } plkrad1 = integrate (xint, yint1, n); plkrad2 = integrate (xint, yint2, n); } else if (processing == PROCESS_SUM) { for (iv = 0; iv < n; iv++) { wvlmlo = wvnmlo[iv]; wvlmhi = wvnmhi[iv]; r = c_planck_func1 (wvlmlo, wvlmhi, t1); plkrad1 += (double)filter[iv] * r; r = c_planck_func1 (wvlmlo, wvlmhi, t2); plkrad2 += (double)filter[iv] * r; } } else { wvlmlo = wvnmlo[ivi]; wvlmhi = wvnmhi[ivi]; plkrad1 = c_planck_func1 (wvlmlo, wvlmhi, t1); plkrad2 = c_planck_func1 (wvlmlo, wvlmhi, t2); } it = 0; while (fabs ((plkrad1 - rad) / rad) > accur) { t1 = t1 + (rad - plkrad1) / (plkrad2 - plkrad1) * dt; t2 = t1 + dt; plkrad1 = 0; plkrad2 = 0; if (processing == PROCESS_INT) { for (iv = 0; iv < n; iv++) { wvlmlo = wvnmlo[iv]; wvlmhi = wvnmhi[iv]; r = c_planck_func1 (wvlmlo, wvlmhi, t1); yint1[iv] = filter[iv] * r; r = c_planck_func1 (wvlmlo, wvlmhi, t2); yint2[iv] = filter[iv] * r; } plkrad1 = integrate (xint, yint1, n); plkrad2 = integrate (xint, yint2, n); } else if (processing == PROCESS_SUM) { for (iv = 0; iv < n; iv++) { wvlmlo = wvnmlo[iv]; wvlmhi = wvnmhi[iv]; r = c_planck_func1 (wvlmlo, wvlmhi, t1); plkrad1 += (double)filter[iv] * r; r = c_planck_func1 (wvlmlo, wvlmhi, t2); plkrad2 += (double)filter[iv] * r; } } else { wvlmlo = wvnmlo[ivi]; wvlmhi = wvnmhi[ivi]; plkrad1 = c_planck_func1 (wvlmlo, wvlmhi, t1); plkrad2 = c_planck_func1 (wvlmlo, wvlmhi, t2); } if (it > 999) { fprintf (stderr, "While loop in function %s, file %s, did not converge.\n", __func__, __FILE__); fprintf (stderr, "Continuing anyway, but be careful with results.\n"); fprintf (stderr, "The relevant variables in %s have the following values:\n", __func__); fprintf (stderr, "t1 = %12.6f\n", t1); fprintf (stderr, "t2 = %12.6f\n", t2); fprintf (stderr, "plkrad1 = %12.6e\n", plkrad1); fprintf (stderr, "plkrad2 = %12.6e\n", plkrad2); fprintf (stderr, "rad = %12.6e\n", rad); fprintf (stderr, "accur = %12.6e\n", accur); fprintf (stderr, "fabs((plkrad1 - rad)/rad) = %12.6e\n", fabs ((plkrad1 - rad) / rad)); break; } it++; } if (processing == PROCESS_INT) { free (xint); free (yint1); free (yint2); } return t1; } /***********************************************************************************/ /* Convert uvspec output to brightness temperatures; currently it is assumed that */ /* the first element (iv==0) stores the spectrally integrated quantities; */ /* only these will be considered. */ /***********************************************************************************/ static int output2bt (input_struct input, output_struct* output, int iv, int is_3d) { int lev = 0, iu = 0, j = 0, is = 0, ih = 0, js = 0, ic = 0, ip = 0; float *wvnmlo, *wvnmhi, *weight; int nlambda; int processing; processing = input.processing; /* Use the representative wavelengths (_r-grid) only when no integration or summation was requested by the user */ if (output->wl.use_reptran && input.processing == PROCESS_NONE) { processing = PROCESS_SUM; nlambda = output->wl.nlambda_in_reptran_band[output->wl.reptran_band_t[output->wl.map_e2h[iv]]]; wvnmlo = calloc (nlambda, sizeof (float)); wvnmhi = calloc (nlambda, sizeof (float)); weight = calloc (nlambda, sizeof (float)); for (ih = 0; ih < nlambda; ih++) { wvnmlo[ih] = output->wl.lambda_r[output->wl.reptran_band[output->wl.reptran_band_t[output->wl.map_e2h[iv]]][ih]]; wvnmlo[ih] = 1.0E7 / wvnmlo[ih] - input.bandwidth / 2.0; wvnmhi[ih] = wvnmlo[ih] + input.bandwidth; weight[ih] = output->wl.weight_reptran_band[output->wl.reptran_band_t[output->wl.map_e2h[iv]]][ih]; } } else { wvnmlo = output->wl.wvnmlo_h; wvnmhi = output->wl.wvnmhi_h; weight = output->wl.filter; nlambda = output->wl.nlambda_h; } if (is_3d) { /* 3D processing */ for (lev = 0; lev < output->atm.nzout; lev++) { /* 3D fields */ if (output->mc.sample.passback3D) for (is = output->islower; is <= output->isupper; is += output->isstep) for (js = output->jslower; js <= output->jsupper; js += output->jsstep) { output->rfldir3d[lev][is][js][iv] = radiance2bt (output->rfldir3d[lev][is][js][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); output->rfldn3d[lev][is][js][iv] = radiance2bt (output->rfldn3d[lev][is][js][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); output->flup3d[lev][is][js][iv] = radiance2bt (output->flup3d[lev][is][js][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); output->uavgso3d[lev][is][js][iv] = radiance2bt (output->uavgso3d[lev][is][js][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); output->uavgdn3d[lev][is][js][iv] = radiance2bt (output->uavgdn3d[lev][is][js][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); output->uavgup3d[lev][is][js][iv] = radiance2bt (output->uavgup3d[lev][is][js][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); if (input.rte.mc.concentration_is || input.rte.mc.spectral_is) for (ic = 0; ic < output->mc.alis.Nc; ic++) output->fl3d_is[lev][is][js][ic][iv] = radiance2bt (output->fl3d_is[lev][is][js][ic][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); for (ip = 0; ip < input.rte.mc.nstokes; ip++) for (ic = 0; ic < output->mc.alis.Nc; ic++) output->radiance3d[lev][is][js][ip][ic][iv] = radiance2bt (output->radiance3d[lev][is][js][ip][ic][iv], wvnmlo, wvnmhi, weight, nlambda, processing, iv); /* it would certainly be nonsense to convert standard deviations to */ /* brightness temperature; therefore set to NaN */ /* variances */ if (input.rte.mc.std) { output->rfldir3d_var[lev][is][js][iv] = 0.0 / 0.0; output->rfldn3d_var[lev][is][js][iv] = 0.0 / 0.0; output->flup3d_var[lev][is][js][iv] = 0.0 / 0.0; output->uavgso3d_var[lev][is][js][iv] = 0.0 / 0.0; output->uavgdn3d_var[lev][is][js][iv] = 0.0 / 0.0; output->uavgup3d_var[lev][is][js][iv] = 0.0 / 0.0; for (ip = 0; ip < input.rte.mc.nstokes; ip++) output->radiance3d_var[lev][is][js][ip][iv] = 0.0 / 0.0; if (input.rte.mc.backward.absorption) output->absback3d_var[lev][is][js][iv] = 0.0 / 0.0; } } } /* conversion of 3D absorption to BT is probably useless, hence we don't do it */ } else { /* 1D processing */ if (input.rte.solver == SOLVER_POLRADTRAN) { /* polRadtran output */ for (lev = 0; lev < output->atm.nzout; lev++) for (is = 0; is < input.rte.polradtran[POLRADTRAN_NSTOKES]; is++) { output->down_flux[lev][is][iv] = radiance2bt (output->down_flux[lev][is][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); output->up_flux[lev][is][iv] = radiance2bt (output->up_flux[lev][is][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); /* radiances */ for (iu = 0; iu < input.rte.numu; iu++) for (j = 0; j < input.rte.nphi; j++) { output->down_rad[lev][j][iu][is][iv] = radiance2bt (output->down_rad[lev][j][iu][is][iv], wvnmlo, wvnmhi, weight, nlambda, processing, iv); output->up_rad[lev][j][iu][is][iv] = radiance2bt (output->up_rad[lev][j][iu][is][iv], wvnmlo, wvnmhi, weight, nlambda, processing, iv); } } } else { /* unpolarized output */ for (lev = 0; lev < output->atm.nzout; lev++) { /* irradiance / actinic flux */ output->rfldir[lev][iv] = radiance2bt (output->rfldir[lev][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); output->rfldn[lev][iv] = radiance2bt (output->rfldn[lev][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); output->flup[lev][iv] = radiance2bt (output->flup[lev][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); output->uavg[lev][iv] = radiance2bt (output->uavg[lev][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); output->uavgso[lev][iv] = radiance2bt (output->uavgso[lev][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); output->uavgdn[lev][iv] = radiance2bt (output->uavgdn[lev][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); output->uavgup[lev][iv] = radiance2bt (output->uavgup[lev][iv] / PI, wvnmlo, wvnmhi, weight, nlambda, processing, iv); /* radiances */ for (iu = 0; iu < input.rte.numu; iu++) { output->u0u[lev][iu][iv] = radiance2bt (output->u0u[lev][iu][iv], wvnmlo, wvnmhi, weight, nlambda, processing, iv); for (j = 0; j < input.rte.nphi; j++) output->uu[lev][j][iu][iv] = radiance2bt (output->uu[lev][j][iu][iv], wvnmlo, wvnmhi, weight, nlambda, processing, iv); } } } } if (output->wl.use_reptran && input.processing == PROCESS_NONE) { free (wvnmlo); free (wvnmhi); free (weight); } return 0; } static int read_photon_file (char* filename, float* lambda_r, int nlambda_r, float** fraction) { float* wvl = NULL; int iv = 0, n = 0, status = 0; status = read_2c_file_float (filename, &wvl, fraction, &n); if (status != 0) { fprintf (stderr, "Error %d reading %s\n", status, filename); return status; } /* compare wavelength grids */ if (nlambda_r != n) { fprintf (stderr, "Error, number of wavelengths in %s differing from\n", filename); fprintf (stderr, "internal wavelength grid\n"); return -1; } for (iv = 0; iv < n; iv++) if (lambda_r[iv] != wvl[iv]) { fprintf (stderr, "Error, wavelengths in %s differ from internal wavelength grid: %f vs. %f\n", filename, lambda_r[iv], wvl[iv]); return -1; } return 0; } /******************************************************************************/ /* select the required wavelength range from an array of wavelengths (lambda) */ /******************************************************************************/ static int select_wavelength_indices (float* lambda, int nlambda, float* lambda_lower, float* lambda_upper, int start_index, int end_index, int quiet, int raman, int* lower, int* upper) { int iv = 0, tmp = 0; if (start_index > 0 && end_index > 0) { /* if wavelength indices are defined */ /* check if start_index and end_index sorted; if not, sort */ if (start_index > end_index) { tmp = start_index; start_index = end_index; end_index = tmp; } /* check if we are out of range */ if (end_index > nlambda) { fprintf (stderr, "Error, selected wavelength index %d is out of range \n", end_index); return -1; } /* the selected range is valid */ *lower = start_index - 1; *upper = end_index - 1; *lambda_lower = lambda[*lower]; *lambda_upper = lambda[*upper]; } else { /* if wavelength is specified search for suitable indices */ while (lambda[iv] <= *lambda_lower) if (++iv == nlambda) break; if (iv > 0) iv -= 1; *lower = iv; iv = 0; while (lambda[iv] < *lambda_upper) { if (++iv == nlambda) break; } /* Need to include larger wavelength range if Raman scattering is included */ if (raman) { iv = 0; while (lambda[iv] <= *lambda_lower) if (++iv == nlambda) break; if (iv > 0) iv -= 1; *lower = iv; iv = 0; while (lambda[iv] < *lambda_upper) { if (++iv == nlambda) break; } if (iv == nlambda) iv -= 1; *upper = iv; } if (iv == nlambda) iv -= 1; *upper = iv; } return 0; } /**************************************************************************/ /* average return the average density per layer in the array **y_average */ /* **y_average will be automatically callocated */ /* OUTPUT: dens_avg */ /**************************************************************************/ int average_dens (float* dens, float* dens_air, float* zd, int nlev, int interpol_method, float** dens_avg, int allocate) { int status = 0; int n_layer; float* mix; /* mixing ratio*/ int lc; n_layer = nlev - 1; if (allocate) if (((*dens_avg) = (float*)calloc (n_layer, sizeof (float))) == NULL) { fprintf (stderr, "Error allocating memory for (*dens_avg) in average (ancillary.c)\n"); return -1; } switch (interpol_method) { case INTERP_METHOD_SPLINE: status = spline_average (zd, dens, nlev, dens_avg); break; case INTERP_METHOD_LINEAR: for (lc = 0; lc < n_layer; lc++) (*dens_avg)[lc] = 0.5 * (dens[lc] + dens[lc + 1]); break; case INTERP_METHOD_LOG: for (lc = 0; lc < n_layer; lc++) { (*dens_avg)[lc] = log_average (dens[lc], dens[lc + 1]); } break; case INTERP_METHOD_LINMIX: /* linear mixing ratio integration */ case INTERP_METHOD_LOG_SPLINE: /* no log_spline intergration implemented jet !!! */ mix = (float*)calloc (nlev, sizeof (float)); if (mix == NULL) { fprintf (stderr, "Error allocating memory for mixing_ratio in average (ancillary.c)\n"); return -1; } for (lc = 0; lc < nlev; ++lc) mix[lc] = dens[lc] / dens_air[lc]; /* calculating to mixing ratio */ for (lc = 0; lc < n_layer; lc++) { (*dens_avg)[lc] = linmix_average (mix[lc], mix[lc + 1], dens_air[lc], dens_air[lc + 1]); } free (mix); break; default: fprintf (stderr, "Error, unknown interpolation method input."); return -1; } if (status != 0) { fprintf (stderr, "Error %d calculating average concentration (average, in ancillary.c)\n", status); return status; } return status; } /****************************************************************/ /* log_average returns the logarithmic average density between */ /* two layers with d[ens]1 und d[ens]2 */ /* assuming logarithmic variation with hight */ /****************************************************************/ float log_average (float d1, float d2) /* small function to calculate the average density */ /* assuming a function d=exp(-kx) */ { float avg = 0; float test1 = -666., test2 = -666.0; test1 = (d1 < d2 ? d1 : d2); test2 = fabs (d2 - d1); if (test1 <= 0 || test2 <= 0.001 * d1) /* in this case numerically unstable => */ avg = 0.5 * (d1 + d2); /* use linear interpolation instead */ else avg = (d2 - d1) / log (d2 / d1); return avg; } double dlog_average (double d1, double d2) /* small function to calculate the average density */ /* assuming a function d=exp(-kx) */ { double avg = 0; double test1 = -666., test2 = -666.0; test1 = (d1 < d2 ? d1 : d2); test2 = fabs (d2 - d1); if (test1 <= 0 || test2 <= 0.001 * d1) /* in this case numerically unstable => */ avg = 0.5 * (d1 + d2); /* use linear interpolation instead */ else avg = (d2 - d1) / log (d2 / d1); return avg; } /*************************************************************************/ /* linmix_average returns the linear mixing ratio average density n_bar */ /* between two layers with m[ixing ratio]1 and m2 and */ /* air number density n1 and n2 */ /* assuming linear variation of the mix ratio with height */ /* and logarithmic variation of the air number dens with height */ /*************************************************************************/ float linmix_average (float mmr1, float mmr2, float n1, float n2) { float avg = 0; float test1 = -666., test2 = -666.0; float log_n = 0; test1 = (n1 < n2 ? n1 : n2); test2 = fabs (n1 - n2); if (test1 <= 0 || test2 <= 0.001 * n1) /* in this case numerically unstable => */ avg = 0.5 * (mmr1 * n1 + mmr2 * n2); /* use linear interpolation instead */ else { log_n = log (n2 / n1); avg = 1 / (log_n * log_n) * ((n2 * mmr2 - n1 * mmr1) * log_n - (mmr2 - mmr1) * (n2 - n1)); } return avg; } /***********************************************************************/ /* mass_weighted_average returns the mass weighted average of x */ /* between two layers with properties x1,n1 and x2,n2 */ /* air number density n1 and n2 */ /* assuming linear variation of the x with height */ /* and logarithmic variation of the air number dens with height */ /***********************************************************************/ float mass_weighted_average (float x1, float x2, float n1, float n2) { float avg = 0; float test1 = -666., test2 = -666.0; test1 = (n1 < n2 ? n1 : n2); test2 = fabs (n1 - n2); if (test1 <= 0 || test2 <= 0.001 * n1) /* in this case numerically unstable => */ avg = (n1 * x1 + n2 * x2) / (n1 + n2); /* use simple mass weighte average instead */ else { avg = (n2 * x2 - n1 * x1) / (n2 - n1) - (x2 - x1) / log (n2 / n1); } return avg; } /************************************************************/ /* small function to calculate the average density */ /* assuming cubic spline variation */ /* returns an array instead of number as previous functions */ /************************************************************/ int spline_average (float* x, float* y, int n, float** y_average) { double *a0 = NULL, *a1 = NULL, *a2 = NULL, *a3 = NULL; int m; int i; double dx, dx2, dx3; int status; int descend = 0; double *x_sort = NULL, *y_sort = NULL; double* y_average_d; x_sort = (double*)calloc (n, sizeof (double)); y_sort = (double*)calloc (n, sizeof (double)); if (x[1] <= x[0]) descend = 1; /* number of layers = number of level - 1 */ m = n - 1; if (!descend) { for (i = 0; i < n; i++) { x_sort[i] = (double)x[i]; y_sort[i] = (double)y[i]; } } else { for (i = 0; i < n; i++) { x_sort[i] = (double)x[n - 1 - i]; y_sort[i] = (double)y[n - 1 - i]; } } status = spline_coeffc (x_sort, y_sort, n, &a0, &a1, &a2, &a3); if (status != 0) { fprintf (stderr, "Error %d during execution of 'spline_coeffc'\n", status); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return status; } y_average_d = (double*)calloc (m, sizeof (double)); for (i = 0; i < m; i++) { dx = (double)(x_sort[i + 1] - x_sort[i]); dx2 = (double)(dx * dx); dx3 = (double)(dx * dx2); y_average_d[i] = ((double)0.25) * a3[i] * dx3 + ((double)1. / 3.) * a2[i] * dx2 + ((double)0.5) * a1[i] * dx + a0[i]; } if (!descend) for (i = 0; i < m; i++) (*y_average)[i] = (float)y_average_d[i]; else for (i = 0; i < m; i++) (*y_average)[i] = (float)y_average_d[m - 1 - i]; return 0; } /****************************************************************/ /* alloc_and_read_netCDF_1D_double */ /* allocate and */ /* read an 1D array direct from netCDF file */ /* ncid input input id_number of the file */ /* dimension input name of dimension number in netCDF file */ /* dim output number of elements */ /* variable input name of data in netCDF file */ /* data output pointer to data (double) */ /* January 2007 Ulrich Hamann */ /****************************************************************/ int alloc_and_read_netCDF_1D_double (int ncid, char* dimension, size_t* dim, char* variable, double** data) { int status = 0; #if HAVE_NETCDF4 int id_dim = 0; int id_var = 0; /* get dimension id for "dimension" */ status = nc_inq_dimid (ncid, dimension, &id_dim); if (status != NC_NOERR) { fprintf (stderr, "Error '%s' locating '%s' from netCDF file\n", nc_strerror (status), dimension); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return status; } /* get dimension length for "dimension" */ status = nc_inq_dimlen (ncid, id_dim, dim); if (status != NC_NOERR) { fprintf (stderr, "Error '%s' while reading '%s' from netCDF file\n", nc_strerror (status), dimension); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return status; } /* fprintf (stderr, "reading %s, dim=%u \n", variable, *dim); */ /* allocate data */ if ((*(data) = calloc (*(dim), sizeof (double))) == NULL) { fprintf (stderr, "Error allocating memory for 'lat' (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return -1; } /* get id number for variable */ status = nc_inq_varid (ncid, variable, &id_var); if (status != NC_NOERR) { fprintf (stderr, "Error '%s' while getting id for '%s' (line %d, function %s in %s) \n", nc_strerror (status), variable, __LINE__, __func__, __FILE__); return status; } /* read variable (double) */ status = nc_get_var_double (ncid, id_var, *(data)); if (status != NC_NOERR) { fprintf (stderr, "Error '%s' while reading '%s' from netCDF file (line %d, function %s in %s) \n", nc_strerror (status), variable, __LINE__, __func__, __FILE__); return status; } #endif return status; } /**********************************************************/ /* get_grid_index */ /* */ /* small function that searches the index */ /* where "number" is closest to an element of "array" */ /* periodic takes care of 360 degree periodic boundaries */ /* */ /* Ulrich Hamann, Mai 2007 */ /**********************************************************/ int get_grid_index (float number, double* array, int n_array, int periodic) { int index = NOT_DEFINED_INTEGER; int i = 0; float min_dist = NOT_DEFINED_FLOAT; float dist = NOT_DEFINED_FLOAT; float grid_dist_1 = NOT_DEFINED_FLOAT; float grid_dist_2 = NOT_DEFINED_FLOAT; /* if we have only one entry than choose the first and only entry */ if (n_array == 1) { index = 0; return index; } if (periodic) { /* initialisation with periodic boundary considering periodicity */ min_dist = fabs (array[n_array - 1] - 360.0 - number); index = n_array - 1; if (fabs (array[n_array - 1] + 360.0 - number) < min_dist) min_dist = fabs (array[n_array - 1] + 360.0 - number); } else { /* initialisation with boundary */ min_dist = fabs (array[n_array - 1] - number); index = n_array - 1; } /* find minimum distance between number and grid elements */ for (i = 0; i < n_array; i++) { dist = fabs (array[i] - number); if (dist < min_dist) { min_dist = dist; index = i; } } /* check 1: index must be defined */ if (index == NOT_DEFINED_INTEGER) { fprintf (stderr, "Error, did NOT find pixel index for pixel %6.0f !!! \n", number); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return -2; } /* check 2: min_dist should be smaller than distance to next neighbour */ if (index != 0) { grid_dist_1 = fabs (array[index] - array[index - 1]); } if (index != n_array - 1) { grid_dist_2 = fabs (array[index] - array[index + 1]); /* choose the largest grid space distance (more relaxed constraint) */ if (grid_dist_2 > grid_dist_1) grid_dist_1 = grid_dist_2; } if (index == 0 && index == n_array - 1) grid_dist_1 = 0; if (min_dist > grid_dist_1) { fprintf (stderr, "Error while searching grid index, minimum distance (%f) between element %8.2f and grid \n", number, min_dist); fprintf (stderr, " is larger than one grid spacing! (grid spacing = %f) \n", grid_dist_1); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return -1; } /* correct periodic behaviour at the eastern boundary */ if (periodic) { if (fabs (array[0] + 360.0 - number) < min_dist || fabs (array[0] - 360.0 - number) < min_dist) { min_dist = fabs (array[n_array - 1] + 360.0 - number); index = 0; } } return index; } /****************************************************************/ /* get_all_netCDF_indices */ /* */ /* Purpose: */ /* get size and index for lat, lon, time */ /* */ /* input: */ /* ------ */ /* ncid id_number of the open netCDF file */ /* lat latitude */ /* lon longitude */ /* UTC universal time correlated */ /* time_interpolate switch for time interpolation */ /* verbose additional verbose output */ /* quiet no verbose output */ /* */ /* output: */ /* ------- */ /* ilat index for latitude */ /* nlat size of latitude in netCDF file */ /* ilon index for longitude */ /* nlon size of longitude in netCDF file */ /* nt number of time steps (2 if time interpolation) */ /* itime1 first index for time */ /* itime2 second index for time */ /* dt time step fraction where time */ /* is in between time1 and time2 */ /* */ /* September 2007 by Ulrich Hamann */ /****************************************************************/ int get_all_netCDF_indices (char* filename, float lat, float lon, int* ncid, long* ilat, size_t* nlat, long* ilon, size_t* nlon, double** lat_grid, double** lon_grid, struct tm UTC, int time_interpolate, int* nt, int* itime1, int* itime2, float* dt, int verbose, int quiet) { #if HAVE_NETCDF4 int status = 0; int i = 0, j = 0; float lon_tmp = NOT_DEFINED_FLOAT; float lat_min = NOT_DEFINED_FLOAT, lat_max = NOT_DEFINED_FLOAT; float lon_min = NOT_DEFINED_FLOAT, lon_max = NOT_DEFINED_FLOAT; char function_name[] = "get_all_netCDF_indices"; char file_name[] = "ancillary.c"; if (lat < -90.0 || 90.0 < lat) { fprintf (stderr, "Error, latitude %f outside range, maybe not specified (if == -999) in %s (%s)\n", lat, function_name, file_name); return -1; } if (lon < -360.0 || 360.0 < lon) { fprintf (stderr, "Error, longitude %f outside range, maybe not specified (if == -999) in %s (%s)\n", lon, function_name, file_name); return -1; } /* open netcdf file */ status = nc_open (filename, NC_NOWRITE, ncid); if (status != NC_NOERR) { fprintf (stderr, "Error %d opening netCDF file %s in %s (%s)\n", status, filename, function_name, file_name); return status; } /* read latitude array */ status = alloc_and_read_netCDF_1D_double ((*ncid), "lat", nlat, "lat", lat_grid); if (status != 0) { fprintf (stderr, "Error %d reading latitude in %s (%s)\n", status, function_name, file_name); return status; } /* search correct latitude index */ (*ilat) = get_grid_index (lat, *lat_grid, (*nlat), FALSE); if (ilat < 0) { fprintf (stderr, "Error -1 finding index for lat=%5.2f in %s, %s (%s)\n", lat, filename, function_name, file_name); return -1; } /* get range of latitude */ lat_min = (*lat_grid)[0]; lat_max = (*lat_grid)[0]; for (i = 1; i < (*nlat); i++) { if ((*lat_grid)[i] > lat_max) lat_max = (*lat_grid)[i]; if ((*lat_grid)[i] < lat_min) lat_min = (*lat_grid)[i]; } /* read longitude */ status = alloc_and_read_netCDF_1D_double ((*ncid), "lon", nlon, "lon", lon_grid); if (status != 0) { fprintf (stderr, "Error %d reading longitude in %s (%s)\n", status, function_name, file_name); return status; } /* get range of longitude */ lon_min = (*lon_grid)[0]; lon_max = (*lon_grid)[0]; for (j = 1; j < (*nlon); j++) { if ((*lon_grid)[j] > lon_max) lon_max = (*lon_grid)[j]; if ((*lon_grid)[j] < lon_min) lon_min = (*lon_grid)[j]; } lon_tmp = lon; /* longitude periodicity */ /* correct periodicity of latitude so, that longitude is inside the range of the map */ if (lon_tmp < lon_min) lon_tmp += 360.0; if (lon_tmp > lon_max) lon_tmp -= 360.0; /* search correct longitude index */ (*ilon) = get_grid_index (lon_tmp, *lon_grid, (*nlon), TRUE); if (ilon < 0) { fprintf (stderr, "Error -2 finding index for lon=%5.2f in %s, %s (%s)\n", lon, filename, function_name, file_name); return -2; } if (verbose) fprintf (stderr, " found %zd x %zd data points\n", (*nlat), (*nlon)); if (verbose) { fprintf (stderr, " map size = [%8.3f (South),%8.3f (North)] x [%8.3f (West),%8.3f (East)]\n", lat_min, lat_max, lon_min, lon_max); } /* get time index */ status = get_time_index ((*ncid), UTC, time_interpolate, nt, itime1, itime2, dt, verbose, quiet); if (status != 0) { fprintf (stderr, "Error %d, during get_time_index in %s (%s)\n", status, function_name, file_name); return status; } if (verbose) { if (UTC.tm_mday > 0 && (*nt) == 2) fprintf (stderr, " read data at: lat =%7.2f (%7.2f), lon =%7.2f (%7.2f), time = %s", (*lat_grid)[(*ilat)], lat, (*lon_grid)[(*ilon)], lon_tmp, asctime (&UTC)); else fprintf (stderr, " read data at: lat =%7.2f (%7.2f), lon =%7.2f (%7.2f), \n", (*lat_grid)[(*ilat)], lat, (*lon_grid)[(*ilon)], lon_tmp); if ((*nt) == 1) fprintf (stderr, " element: i=%6ld (lon), j=%6ld (lat), t=%6d (time)\n", (*ilon), (*ilat), (*itime1)); if ((*nt) == 2) fprintf (stderr, " element: i=%6ld (lon), j=%6ld (lat), (%4d/%4d) (time1/time2)\n", (*ilon), (*ilat), (*itime1), (*itime2)); } return status; #else fprintf (stderr, " ***********************************************************************\n"); fprintf (stderr, " * You have built uvspec without libnetcdf and hence cannot *\n"); fprintf (stderr, " * use any netCDF option. Please get netcdf and rebuild. *\n"); fprintf (stderr, " ***********************************************************************\n"); return -1; #endif } /****************************************************************/ /* read_p_T_z_from_ECMWF_file */ /* */ /* Purpose: */ /* read pressure, temperature from ECMWF file */ /* interpolate data with respect to time if wanted */ /* calculate z-grid for the layers */ /* */ /* input: */ /* ------ */ /* ncid id_number of the open netCDF file */ /* ilat index for latitude */ /* nlat size of latitude in netCDF file */ /* ilon index for longitude */ /* nlon size of longitude in netCDF file */ /* nt number of time steps (2 if time interpolation) */ /* itime1 first index for time */ /* itime2 second index for time */ /* dt time step fraction where time */ /* is in between time1 and time2 */ /* time_interpolate switch for time interpolation */ /* verbose additional verbose output */ /* quiet no verbose output */ /* */ /* output: */ /* ------- */ /* nlay number of layers + 1 for surface */ /* p_layer layer averaged pressure */ /* T_layer layer averaged temperature */ /* z_layer z-levels for layer midpoints */ /* */ /* September 2007 by Ulrich Hamann */ /****************************************************************/ int read_p_T_z_from_ECMWF_file (int ncid, long ilat, size_t nlat, long ilon, size_t nlon, int nt, int itime1, int itime2, float dt, size_t* nlay, size_t* nlev, float altitude, float** p_layer, float** T_layer, float** z_layer, int verbose, int quiet) { int status = 0; float** tmp_p_level = NULL; float** tmp_p_layer = NULL; float** tmp_T_layer = NULL; int itime = 0; int t = NOT_DEFINED_INTEGER; float SP = -999.0; float dx = NOT_DEFINED_FLOAT; int lc = 0; char function_name[] = "read_p_T_z_from_ECMWF_file"; char file_name[] = "ancillary.c"; /* alloc nt timesteps for pressure */ if ((tmp_p_level = calloc (nt, sizeof (float*))) == NULL) { fprintf (stderr, "Error, allocating memory for pressure in %s (%s)\n", function_name, file_name); return -10; } /* alloc nt timesteps for pressure */ if ((tmp_p_layer = calloc (nt, sizeof (float*))) == NULL) { fprintf (stderr, "Error, allocating memory for pressure in %s (%s)\n", function_name, file_name); return -10; } /* alloc nt timesteps for temperature */ if ((tmp_T_layer = calloc (nt, sizeof (float*))) == NULL) { fprintf (stderr, "Error, allocating memory for temperature in %s (%s)\n", function_name, file_name); return -10; } /* read nt (= 1 or 2) time steps */ for (t = 0; t <= nt - 1; t++) { if (t == 0) itime = itime1; if (t == 1) itime = itime2; /* alloc and read pressure */ /* requires that SP/LNSP, hyai and hybi are in the ECMWF netCDF file */ /* in ancillary.c */ status = alloc_and_read_ECMWF_netCDF_pressure (ncid, &(tmp_p_level[t]), nlev, &(tmp_p_layer[t]), nlay, itime, ilat, ilon, FALSE); if (status != 0) { fprintf (stderr, "Error %d reading pressure from netCDF file\n", status); return status; } if ((tmp_p_layer[t] = realloc (tmp_p_layer[t], ((*nlay) + 1) * sizeof (float))) == NULL) { fprintf (stderr, "Error, reallocating memory for 'tmp_p_layer' in %s (%s)\n", function_name, file_name); return -1; } #if HAVE_NETCDF4 /* read surface pressure from ECMWF file */ status = read_ECMWF_surface_pressure (ncid, itime, ilat, ilon, &(SP), FALSE); /* FALSE == no verbose */ if (status != NC_NOERR) { fprintf (stderr, "Error '%s', while read_ECMWF_surface_pressure in %s (%s)\n", nc_strerror (status), function_name, file_name); return status; } #else fprintf (stderr, " ***********************************************************************\n"); fprintf (stderr, " * You have built uvspec without libnetcdf and hence cannot *\n"); fprintf (stderr, " * use ECMWF input options. Please get netcdf and rebuild. *\n"); fprintf (stderr, " ***********************************************************************\n"); return -1; #endif tmp_p_layer[t][(*nlay)] = SP / 100.0; /* Pa -> hPa */ /* allocate and read temperature, defined on layers */ status = alloc_and_read_netCDF_column_float (ncid, "T", &tmp_T_layer[t], (*nlay), itime, ilat, ilon, FALSE); if (status != 0) { fprintf (stderr, "Error %d reading temperature 'T' from netCDF file\n", status); return status; } if ((tmp_T_layer[t] = realloc (tmp_T_layer[t], ((*nlay) + 1) * sizeof (float))) == NULL) { fprintf (stderr, "Error, reallocating memory for 'T_layer' in %s (%s)\n", function_name, file_name); return -1; } /* extrapolate temperature from last layer midpoint to surface */ dx = (log (tmp_p_level[0][(*nlev) - 1] / tmp_p_level[0][(*nlev) - 1]) - log (tmp_p_layer[0][(*nlay) - 1] / tmp_p_level[0][(*nlev) - 1])) / (log (tmp_p_layer[0][(*nlay) - 1] / tmp_p_level[0][(*nlev) - 1]) - log (tmp_p_layer[0][(*nlay) - 2] / tmp_p_level[0][(*nlev) - 1])); tmp_T_layer[t][(*nlay)] = (1.0 + dx) * tmp_T_layer[0][(*nlay) - 1] - dx * tmp_T_layer[0][(*nlay) - 2]; } /* number of final layers == number of layers plus one for surface */ (*nlay) = (*nlay) + 1; if (verbose) fprintf (stderr, " found %4zd levels, \n", (*nlev)); if (nt == 1) { /* no time interpolation needed, just copy data */ } else { /* time interpolation */ for (lc = 0; lc < (*nlay); lc++) { tmp_p_layer[0][lc] = (1.0 - dt) * tmp_p_layer[0][lc] + dt * tmp_p_layer[1][lc]; tmp_T_layer[0][lc] = (1.0 - dt) * tmp_T_layer[0][lc] + dt * tmp_T_layer[1][lc]; } } /* calculate z from T and p using hydrostatic equation (in ancillary.c) */ status = calculate_z_from_p_and_T (tmp_p_layer[0], tmp_T_layer[0], z_layer, (*nlay), altitude, FALSE); if (status != 0) { fprintf (stderr, "Error %d during calculate_z_from_p_and_T (line %d, function '%s' in '%s') \n", status, __LINE__, __func__, __FILE__); return status; } /* allocate space for final results */ if (((*p_layer) = calloc ((*nlay), sizeof (float))) == NULL) { fprintf (stderr, "Error, allocating memory for pressure (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); return -10; } /* allocate space for final results */ if (((*T_layer) = calloc ((*nlay), sizeof (float))) == NULL) { fprintf (stderr, "Error, allocating memory for pressure (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); return -10; } for (lc = 0; lc < (*nlay); lc++) { (*p_layer)[lc] = tmp_p_layer[0][lc]; (*T_layer)[lc] = tmp_T_layer[0][lc]; } return status; } /****************************************************************/ /* get_number_from_netCDF_map */ /* */ /* Purpose: */ /* search one entry (float) */ /* by latitude, longitude, time from one netCDF file */ /* */ /* input: */ /* ------ */ /* lat latitude */ /* lon longitude */ /* UTC universal time correlated */ /* filename name of the netCDF file including the map */ /* */ /* output: */ /* ------- */ /* result data read from the map */ /* February 2007 by Ulrich Hamann */ /****************************************************************/ int get_number_from_netCDF_map (float lat, float lon, struct tm UTC, int time_interpolate, char* filename, void* data, int external_type, char* variable_name, int verbose, int quiet) { int status = 0; #if HAVE_NETCDF4 int ncid = 0; int id_data = 0; nc_type netCDF_type; char lat_name[FILENAME_MAX] = ""; char lon_name[FILENAME_MAX] = ""; size_t nlat = 0; size_t nlon = 0; int ilat = NOT_DEFINED_INTEGER; int ilon = NOT_DEFINED_INTEGER; int dummy = NOT_DEFINED_INTEGER; double* lat_grid = NULL; double* lon_grid = NULL; int i = 0, j = 0, t = 0; int nt = -1; int itime1 = NOT_DEFINED_INTEGER, itime2 = NOT_DEFINED_INTEGER; float dt = NOT_DEFINED_FLOAT; int dimensions = NOT_DEFINED_INTEGER; size_t* index = NULL; float lon_tmp = NOT_DEFINED_FLOAT; float lat_min = NOT_DEFINED_FLOAT, lat_max = NOT_DEFINED_FLOAT; float lon_min = NOT_DEFINED_FLOAT, lon_max = NOT_DEFINED_FLOAT; unsigned char* data_uchar = NULL; short* data_short = NULL; int* data_int = NULL; float* data_float = NULL; double* data_double = NULL; /* int data_unit_len = NOT_DEFINED_INTEGER; */ /* nc_type unit_type; */ char data_unit[50] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; float missing_value = NOT_DEFINED_FLOAT; float unit_factor = 1.0; float scale_factor = 1.0; float add_offset = 0.0; if (verbose) { fprintf (stderr, " ... read %s from map: \n", variable_name); fprintf (stderr, " %s \n", filename); } lon_tmp = lon; /* open netcdf file */ status = nc_open (filename, NC_NOWRITE, &ncid); if (status != NC_NOERR) { fprintf (stderr, "Error '%s' opening netCDF file %s\n", nc_strerror (status), filename); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return status; } /* check format */ status = nc_inq_varid (ncid, "lat", &id_data); if (status == NC_NOERR) { strcpy (lat_name, "lat"); strcpy (lon_name, "lon"); } else { status = nc_inq_varid (ncid, "latitude", &id_data); if (status == NC_NOERR) { strcpy (lat_name, "latitude"); strcpy (lon_name, "longitude"); } else { fprintf (stderr, "Error, neither 'lat' nor 'latitude' in %s while reading '%s'\n", filename, variable_name); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return status; } } /* get id for "data to read" (variable_name) */ status = nc_inq_varid (ncid, variable_name, &id_data); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', while getting ncid for %s from %s\n", nc_strerror (status), variable_name, filename); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return status; } /* get type of the variable */ status = nc_inq_vartype (ncid, id_data, &(netCDF_type)); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', while getting type for %s from %s\n", nc_strerror (status), variable_name, filename); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return status; } if (verbose) fprintf (stderr, " data type = %d (NC_BYTE %d, NC_CHAR %d, NC_SHORT %d, NC_INT %d, NC_FLOAT %d, NC_DOUBLE %d)\n", netCDF_type, NC_BYTE, NC_CHAR, NC_SHORT, NC_INT, NC_FLOAT, NC_DOUBLE); /* read attribute unit and interprete it as much as possible */ status = nc_get_att_text (ncid, id_data, "units", data_unit); if (status != NC_NOERR) { /* no given units -> default unit_factor = 1.0 */ unit_factor = 1.0; /* that's OK, put status to 0 */ status = 0; } else { /* some information is given about the unit, try to interprete it */ if (strncasecmp ("per cent", data_unit, 8) == 0) unit_factor = 100.0; /* % -> 1 */ if (strcasecmp ("m", data_unit) == 0) unit_factor = 1000.0; /* m -> km */ if (strncasecmp ("meter", data_unit, 5) == 0) unit_factor = 1000.0; /* m -> km */ if (strcasecmp ("m**2 s**-2", data_unit) == 0) unit_factor = 9.80 * 1000.0; /* gpm -> km */ if (verbose) fprintf (stderr, " data unit = %s, scale data with %f\n", data_unit, unit_factor); } /* read attribute scale_factor */ status = nc_get_att_float (ncid, id_data, "scale_factor", &scale_factor); if (status != NC_NOERR) { /* no given scale_factor -> default scale_factor = 1.0 */ /* that's OK, put status to 0 */ status = 0; } else { /* if (verbose) */ /* fprintf (stderr," scale_factor = %f \n", scale_factor); */ } /* read attribute add_offset */ status = nc_get_att_float (ncid, id_data, "add_offset", &add_offset); if (status != NC_NOERR) { /* no given offset -> default offset = 0.0 */ /* that's OK, put status to 0 */ status = 0; } else { /* if (verbose) */ /* fprintf (stderr," add_offset = %f \n", add_offset); */ } /* read attribute _FillValue / missing_value */ status = nc_get_att_float (ncid, id_data, "missing_value", &missing_value); if (status != NC_NOERR) { /* no given missing_value -> try to read _FillValue */ status = nc_get_att_float (ncid, id_data, "_FillValue", &missing_value); if (status != NC_NOERR) { /* no given attribute, that's OK, we can' make checks, but we continue */ status = 0; } else { /* if (verbose) */ /* fprintf (stderr," missing_value = %f \n", missing_value); */ } } /* read latitude array */ status = alloc_and_read_netCDF_1D_double (ncid, lat_name, &nlat, lat_name, &(lat_grid)); if (status != 0) { fprintf (stderr, "Error %d reading '%s' from %s (line %d, function %s in %s)\n", status, lat_name, filename, __LINE__, __func__, __FILE__); return status; } /* read longitude */ status = alloc_and_read_netCDF_1D_double (ncid, lon_name, &nlon, lon_name, &(lon_grid)); if (status != 0) { fprintf (stderr, "Error %d reading '%s' from %s (line %d, function %s in %s)\n", status, lon_name, filename, __LINE__, __func__, __FILE__); return status; } if (verbose) fprintf (stderr, " found %zd (south-north) x %zd (west-east) data points\n", nlat, nlon); /* get range of latitude */ lat_min = lat_grid[0]; lat_max = lat_grid[0]; for (i = 1; i < nlat; i++) { if (lat_grid[i] > lat_max) lat_max = lat_grid[i]; if (lat_grid[i] < lat_min) lat_min = lat_grid[i]; } /* get range of longitude */ lon_min = lon_grid[0]; lon_max = lon_grid[0]; for (j = 1; j < nlon; j++) { if (lon_grid[j] > lon_max) lon_max = lon_grid[j]; if (lon_grid[j] < lon_min) lon_min = lon_grid[j]; } /* longitude periodicity */ /* correct periodicity of latitude so, that longitude is inside the range of the map */ if (lon_tmp < lon_min) lon_tmp += 360.0; if (lon_tmp > lon_max) lon_tmp -= 360.0; if (verbose) { fprintf (stderr, " map size = [%8.3f (South),%8.3f (North)] x [%8.3f (West),%8.3f (East)]\n", lat_min, lat_max, lon_min, lon_max); } /* read time in netCDF file (if present) */ status = nc_inq_dimid (ncid, "time", &dummy); if (status == NC_NOERR) { /* get time index */ status = get_time_index (ncid, UTC, time_interpolate, &(nt), &(itime1), &(itime2), &(dt), verbose, quiet); if (status != 0) { fprintf (stderr, "Error '%s', during get_time_index\n", nc_strerror (status)); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return status; } /* data in the netCDF file is assumed to have 3 dimensions: lat/lon/time */ dimensions = 3; } else { /* no time variable in the netCDF file */ status = 0; /* it's OK, even if there is no time information */ nt = 1; /* take the first entry */ dimensions = 2; /* lon / lat */ } /* netCDF index might have 2 (lat/lon) or 3 (time/lat/lon) elements */ if ((index = calloc (dimensions, sizeof (size_t))) == NULL) { fprintf (stderr, "Error allocation of memory for 'index'\n"); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return -1; } /* start_2D[0] as type size_t can not be negative, that why first index is used here */ ilat = get_grid_index (lat, lat_grid, nlat, FALSE); if (ilat < 0) { fprintf (stderr, "Error %d, while searching index in lat_grid \n", ilat); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return ilat; } /* start_2D[1] as type size_t can not be negative, that why first index is used here */ ilon = get_grid_index (lon_tmp, lon_grid, nlon, TRUE); if (ilon < 0) { fprintf (stderr, "Error %d, while searching index in lon_grid \n", ilon); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return ilon; } /* alloc nt timesteps for data */ switch (netCDF_type) { case (NC_BYTE): case (NC_CHAR): if ((data_uchar = calloc (nt, sizeof (char*))) == NULL) { fprintf (stderr, "Error, allocating memory for data_intern\n"); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return -10; } break; case (NC_SHORT): if ((data_short = calloc (nt, sizeof (short*))) == NULL) { fprintf (stderr, "Error, allocating memory for data_intern\n"); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return -10; } break; case (NC_INT): if ((data_int = calloc (nt, sizeof (int*))) == NULL) { fprintf (stderr, "Error, allocating memory for data_intern\n"); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return -10; } break; case (NC_FLOAT): if ((data_float = calloc (nt, sizeof (float*))) == NULL) { fprintf (stderr, "Error, allocating memory for data_intern\n"); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return -10; } break; case (NC_DOUBLE): if ((data_double = calloc (nt, sizeof (double*))) == NULL) { fprintf (stderr, "Error, allocating memory for data_intern\n"); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return -10; } break; default: fprintf (stderr, "Error, unknown type (short, float, double ...) of variable %d\n", netCDF_type); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return -1; } if (dimensions == 2) { index[0] = ilat; index[1] = ilon; if (verbose) { fprintf (stderr, " read data at: lat =%7.2f (%7.2f), lon =%7.2f (%7.2f), ", lat_grid[ilat], lat, lon_grid[ilon], lon_tmp); fprintf (stderr, " element: (j=%6d) x (i=%6d) \n", ilat, ilon); } switch (netCDF_type) { case (NC_BYTE): case (NC_CHAR): status = nc_get_var1_uchar (ncid, id_data, index, &(data_uchar[0])); /* fprintf (stderr, " read data value is %d (unscaled)\n", (data_uchar[0])); */ if (data_uchar[0] == missing_value) status = ERROR_READ_MISSING_VALUE; break; case (NC_SHORT): status = nc_get_var1_short (ncid, id_data, index, &(data_short[0])); /* fprintf (stderr, " read data value is %d (unscaled)\n", (data_short[0])); */ if (data_short[0] == missing_value) status = ERROR_READ_MISSING_VALUE; break; case (NC_INT): status = nc_get_var1_int (ncid, id_data, index, &(data_int[0])); /* fprintf (stderr, " read data value is %d (unscaled)\n", (data_int[0])); */ if (data_int[0] == missing_value) status = ERROR_READ_MISSING_VALUE; break; case (NC_FLOAT): status = nc_get_var1_float (ncid, id_data, index, &(data_float[0])); /* fprintf (stderr, " read data value is %f (unscaled)\n", (data_float[0])); */ if (data_float[0] == missing_value) status = ERROR_READ_MISSING_VALUE; break; case (NC_DOUBLE): status = nc_get_var1_double (ncid, id_data, index, &(data_double[0])); /* fprintf (stderr, " read data value is %lf (unscaled)\n", (data_double[0])); */ if (data_double[0] == missing_value) status = ERROR_READ_MISSING_VALUE; break; default: fprintf (stderr, "Error, unknown variable type %d (short, float, ...) in data file %s (line %d, function %s in %s) \n", netCDF_type, filename, __LINE__, __func__, __FILE__); return -1; } if (status == ERROR_READ_MISSING_VALUE) { fprintf (stderr, " !!! Error 'Read missing_value' reading %s from %s\n", variable_name, filename); return status; } if (status != NC_NOERR) { fprintf (stderr, "Error '%s'\n", nc_strerror (status)); fprintf (stderr, " reading %s from %s\n", variable_name, filename); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return status; } switch (netCDF_type) { case (NC_BYTE): case (NC_CHAR): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(data_uchar[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(data_uchar[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(data_uchar[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(data_uchar[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(data_uchar[0] * scale_factor + add_offset) / unit_factor; break; } break; case (NC_SHORT): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(data_short[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(data_short[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(data_short[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(data_short[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(data_short[0] * scale_factor + add_offset) / unit_factor; break; } break; case (NC_INT): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(data_int[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(data_int[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(data_int[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(data_int[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(data_int[0] * scale_factor + add_offset) / unit_factor; break; } break; case (NC_FLOAT): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(data_float[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(data_float[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(data_float[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(data_float[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(data_float[0] * scale_factor + add_offset) / unit_factor; break; } break; case (NC_DOUBLE): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(data_double[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(data_double[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(data_double[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(data_double[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(data_double[0] * scale_factor + add_offset) / unit_factor; break; } break; default: fprintf (stderr, "Error, type of variable %d in get_float_from_netCDF_map (ancillary.c) \n", netCDF_type); return -1; } switch (external_type) { case (TYPE_CHAR): if (verbose) fprintf (stderr, " read data value is %d\n", (*(char*)data)); break; case (TYPE_SHORT): if (verbose) fprintf (stderr, " read data value is %d\n", (*(short*)data)); break; case (TYPE_INT): if (verbose) fprintf (stderr, " read data value is %d\n", (*(int*)data)); break; case (TYPE_FLOAT): if (verbose) fprintf (stderr, " read data value is %f\n", (*(float*)data)); break; case (TYPE_DOUBLE): if (verbose) fprintf (stderr, " read data value is %lf\n", (*(double*)data)); break; } } else if (dimensions == 3) { index[1] = ilat; index[2] = ilon; if (verbose) { if (UTC.tm_mday > 0) { fprintf (stderr, " read data at: lat =%7.2f (%7.2f), lon =%7.2f (%7.2f), time = %s", lat_grid[ilat], lat, lon_grid[ilon], lon_tmp, asctime (&UTC)); if (nt == 1) fprintf (stderr, " element: (j=%6d) x (i=%6d) x (t=%6d)\n", ilat, ilon, itime1); if (nt == 2) fprintf (stderr, " element: (j=%6d) x (i=%6d) x (t1/t2)=(%4d/%4d)\n", ilat, ilon, itime1, itime2); } else { fprintf (stderr, " read data at: lat =%7.2f (%7.2f), lon =%7.2f (%7.2f)\n", lat_grid[ilat], lat, lon_grid[ilon], lon_tmp); fprintf (stderr, " element: (j=%6d) x (i=%6d) x (t=%6d)\n", ilat, ilon, itime1); } } /* read nt (= 1 or 2) time steps */ for (t = 0; t <= nt - 1; t++) { if (t == 0) index[0] = itime1; if (t == 1) index[0] = itime2; switch (netCDF_type) { case (NC_BYTE): case (NC_CHAR): status = nc_get_var1_uchar (ncid, id_data, index, &(data_uchar[t])); /* fprintf (stderr, " read data value is %d (unscaled)\n", (data_uchar[t])); */ if (data_uchar[t] == missing_value) status = ERROR_READ_MISSING_VALUE; break; case (NC_SHORT): status = nc_get_var1_short (ncid, id_data, index, &(data_short[t])); /* fprintf (stderr, " read data value is %d (unscaled)\n", (data_short[t])); */ if (data_short[t] == missing_value) status = ERROR_READ_MISSING_VALUE; break; case (NC_INT): status = nc_get_var1_int (ncid, id_data, index, &(data_int[t])); /* fprintf (stderr, " read data value is %d (unscaled)\n", (data_int[t])); */ if (data_int[t] == missing_value) status = ERROR_READ_MISSING_VALUE; break; case (NC_FLOAT): status = nc_get_var1_float (ncid, id_data, index, &(data_float[t])); /* fprintf (stderr, " read data value is %f (unscaled)\n", (data_float[t])); */ if (data_float[t] == missing_value) status = ERROR_READ_MISSING_VALUE; break; case (NC_DOUBLE): status = nc_get_var1_double (ncid, id_data, index, &(data_double[t])); /* fprintf (stderr, " read data value is %lf (unscaled)\n", (data_double[t])); */ if (data_double[t] == missing_value) status = ERROR_READ_MISSING_VALUE; break; default: fprintf (stderr, "Error, unknown variable type %d (short, float, ...) in data file %s (line %d, function %s in %s) \n", netCDF_type, filename, __LINE__, __func__, __FILE__); return -1; } if (status == ERROR_READ_MISSING_VALUE) { fprintf (stderr, "Error 'Read missing_value' reading %s from %s\n", variable_name, filename); return status; } if (status != NC_NOERR) { fprintf (stderr, "Error '%s'\n", nc_strerror (status)); fprintf (stderr, " reading %s from %s\n", variable_name, filename); fprintf (stderr, " (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return status; } } if (nt == 1) { /* no time interpolation nessesary */ switch (netCDF_type) { case (NC_BYTE): case (NC_CHAR): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(data_uchar[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(data_uchar[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(data_uchar[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(data_uchar[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(data_uchar[0] * scale_factor + add_offset) / unit_factor; break; } break; case (NC_SHORT): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(data_short[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(data_short[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(data_short[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(data_short[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(data_short[0] * scale_factor + add_offset) / unit_factor; break; } break; case (NC_INT): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(data_int[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(data_int[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(data_int[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(data_int[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(data_int[0] * scale_factor + add_offset) / unit_factor; break; } break; case (NC_FLOAT): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(data_float[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(data_float[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(data_float[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(data_float[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(data_float[0] * scale_factor + add_offset) / unit_factor; break; } break; case (NC_DOUBLE): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(data_double[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(data_double[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(data_double[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(data_double[0] * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(data_double[0] * scale_factor + add_offset) / unit_factor; break; } break; default: fprintf (stderr, "Error, type of variable %d in get_float_from_netCDF_map (ancillary.c) \n", netCDF_type); return -1; } if (verbose) fprintf (stderr, " read data value is"); } else { /* time interpolation */ switch (netCDF_type) { case (NC_BYTE): case (NC_CHAR): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(((1.0 - dt) * data_uchar[0] + dt * data_uchar[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(((1.0 - dt) * data_uchar[0] + dt * data_uchar[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(((1.0 - dt) * data_uchar[0] + dt * data_uchar[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(((1.0 - dt) * data_uchar[0] + dt * data_uchar[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(((1.0 - dt) * data_uchar[0] + dt * data_uchar[1]) * scale_factor + add_offset) / unit_factor; break; } break; case (NC_SHORT): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(((1.0 - dt) * data_short[0] + dt * data_short[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(((1.0 - dt) * data_short[0] + dt * data_short[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(((1.0 - dt) * data_short[0] + dt * data_short[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(((1.0 - dt) * data_short[0] + dt * data_short[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(((1.0 - dt) * data_short[0] + dt * data_short[1]) * scale_factor + add_offset) / unit_factor; break; } break; case (NC_INT): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(((1.0 - dt) * data_int[0] + dt * data_int[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(((1.0 - dt) * data_int[0] + dt * data_int[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(((1.0 - dt) * data_int[0] + dt * data_int[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(((1.0 - dt) * data_int[0] + dt * data_int[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(((1.0 - dt) * data_int[0] + dt * data_int[1]) * scale_factor + add_offset) / unit_factor; break; } break; case (NC_FLOAT): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(((1.0 - dt) * data_float[0] + dt * data_float[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(((1.0 - dt) * data_float[0] + dt * data_float[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(((1.0 - dt) * data_float[0] + dt * data_float[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(((1.0 - dt) * data_float[0] + dt * data_float[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(((1.0 - dt) * data_float[0] + dt * data_float[1]) * scale_factor + add_offset) / unit_factor; break; } break; case (NC_DOUBLE): switch (external_type) { case (TYPE_CHAR): (*(char*)data) = (char)(((1.0 - dt) * data_double[0] + dt * data_double[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_SHORT): (*(short*)data) = (short)(((1.0 - dt) * data_double[0] + dt * data_double[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_INT): (*(int*)data) = (int)(((1.0 - dt) * data_double[0] + dt * data_double[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_FLOAT): (*(float*)data) = (float)(((1.0 - dt) * data_double[0] + dt * data_double[1]) * scale_factor + add_offset) / unit_factor; break; case (TYPE_DOUBLE): (*(double*)data) = (double)(((1.0 - dt) * data_double[0] + dt * data_double[1]) * scale_factor + add_offset) / unit_factor; break; } break; default: fprintf (stderr, "Error, type of variable %d in get_float_from_netCDF_map (ancillary.c) \n", netCDF_type); return -1; } if (verbose) fprintf (stderr, " interpolated data value is"); } if (verbose) { switch (external_type) { case (TYPE_CHAR): fprintf (stderr, " %d\n", (*(char*)data)); break; case (TYPE_SHORT): fprintf (stderr, " %d\n", (*(short*)data)); break; case (TYPE_INT): fprintf (stderr, " %d\n", (*(int*)data)); break; case (TYPE_FLOAT): fprintf (stderr, " %f\n", (*(float*)data)); break; case (TYPE_DOUBLE): fprintf (stderr, " %lf\n", (*(double*)data)); break; } } } nc_close (ncid); free (lat_grid); free (lon_grid); free (index); switch (netCDF_type) { case (NC_BYTE): case (NC_CHAR): free (data_uchar); break; case (NC_SHORT): free (data_short); break; case (NC_INT): free (data_int); break; case (NC_FLOAT): free (data_float); break; case (NC_DOUBLE): free (data_double); break; default: fprintf (stderr, "Error, unknown variable type %d (short, float, ...) in data file %s (line %d, function %s in %s) \n", netCDF_type, filename, __LINE__, __func__, __FILE__); return -1; } #else fprintf (stderr, " ******************************************************************\n"); fprintf (stderr, " * You have built uvspec without libnetcdf and hence cannot *\n"); fprintf (stderr, " * use the any map option. Please get netcdf and rebuild. *\n"); fprintf (stderr, " ******************************************************************\n"); return -1; #endif return status; } /*****************************************************************/ /* read satellite geometry */ /* */ /* Purpose: */ /* read satellite geometry from netCDF file */ /* */ /* input: */ /* ------ */ /* sat_pixel_x pixel index in x direction */ /* sat_pixel_y pixel index in y direction */ /* UTC simulated time (universal time correlated) */ /* filename name of netCDF file (contain sat geometry) */ /* */ /* output: */ /* ------- */ /* latitude latitude */ /* longitude longitude */ /* numu number of cos(theta_sat) angles */ /* maxumu number of cos(theta_sat) angles */ /* umu cos(theta_sat) angles */ /* nphi number of azimith_sat angles */ /* maxphi number of azimith_sat angles */ /* phi azimith_sat angles in degrees */ /* */ /* Mai 2007 by Ulrich Hamann */ /*****************************************************************/ int read_sat_geometry (int pixel_x, int pixel_y, struct tm UTC, char* filename, float* latitude, float* longitude, int* numu, int* maxumu, float** umu, int* nphi, int* maxphi, float** phi, int solver, int verbose, int quiet) { #if HAVE_NETCDF4 int status = 0; int ncid = NOT_DEFINED_INTEGER; int id_lat = NOT_DEFINED_INTEGER; int id_lon = NOT_DEFINED_INTEGER; int id_vza = NOT_DEFINED_INTEGER; int id_vaa = NOT_DEFINED_INTEGER; size_t n_pixel_x = NOT_DEFINED_INTEGER; size_t n_pixel_y = NOT_DEFINED_INTEGER; double* sat_pixel_x_grid = NULL; double* sat_pixel_y_grid = NULL; int ix = NOT_DEFINED_INTEGER; int iy = NOT_DEFINED_INTEGER; int dummy = NOT_DEFINED_INTEGER; size_t* index = NULL; int nt = -1; int itime1 = NOT_DEFINED_INTEGER, itime2 = NOT_DEFINED_INTEGER; float dt = NOT_DEFINED_FLOAT; int dimensions = NOT_DEFINED_INTEGER; /* open netcdf file */ status = nc_open (filename, NC_NOWRITE, &ncid); if (status != NC_NOERR) { fprintf (stderr, "Error '%s' opening netCDF file %s\n", nc_strerror (status), filename); return status; } /* get id for latitude */ status = nc_inq_varid (ncid, "lat", &id_lat); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', while getting id for 'latitude' from %s\n", nc_strerror (status), filename); return status; } /* get id for longitude */ status = nc_inq_varid (ncid, "lon", &id_lon); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', while getting id for 'longitude' from %s\n", nc_strerror (status), filename); return status; } /* get id for viewing zenith angle */ status = nc_inq_varid (ncid, "vza", &id_vza); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', while getting id for 'viewing zenith angle' from %s\n", nc_strerror (status), filename); return status; } /* get id for viewing azimuth angle */ status = nc_inq_varid (ncid, "vaa", &id_vaa); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', while getting id for 'viewing azimuth angle' from %s\n", nc_strerror (status), filename); return status; } /* read pixel_x array (must already be stored in the result file) */ status = alloc_and_read_netCDF_1D_double (ncid, "pixel_x", &n_pixel_x, "pixel_x", &(sat_pixel_x_grid)); if (status != 0) { fprintf (stderr, "Error %d reading sat_pixel_x-grid from %s\n", status, filename); return status; } /* read pixel_y array (must already be stored in the file) */ status = alloc_and_read_netCDF_1D_double (ncid, "pixel_y", &n_pixel_y, "pixel_y", &(sat_pixel_y_grid)); if (status != 0) { fprintf (stderr, "Error %d reading sat_pixel_y-grid from %s\n", status, filename); return status; } /* search index */ ix = get_grid_index (pixel_x, sat_pixel_x_grid, n_pixel_x, FALSE); if (ix < 0) { fprintf (stderr, "Error %d, while searching index for pixel %6d in pixel-x_grid \n", ix, pixel_x); return ix; } iy = get_grid_index (pixel_y, sat_pixel_y_grid, n_pixel_y, FALSE); if (iy < 0) { fprintf (stderr, "Error %d, while searching index for pixel %6d in pixel-y_grid \n", iy, pixel_y); return iy; } /* read time in netCDF file (if present) */ status = nc_inq_dimid (ncid, "time", &dummy); if (status == NC_NOERR) { /* get time index */ status = get_time_index (ncid, UTC, TIME_NEAREST_DATE, &(nt), &(itime1), &(itime2), &(dt), verbose, quiet); if (status != 0) { fprintf (stderr, "Error %d, during get_time_index in get_float_from_netCDF_map (ancillary.c)\n", status); return status; } /* data in the netCDF file is assumed to have 3 dimensions: lat/lon/time */ dimensions = 3; } else { /* no time variable in the netCDF file */ nt = 1; dimensions = 2; } status = NC_NOERR; /* netCDF index might have 2 (lat/lon) or 3 (time/lat/lon) elements */ if ((index = calloc (dimensions, sizeof (size_t))) == NULL) { fprintf (stderr, "Error: Allocation of index in get_float_from_netCDF_map (ancillary.c)\n"); return -1; } (*numu) = 1; (*maxumu) = 1; (*nphi) = 1; switch (solver) { case SOLVER_SDISORT: case SOLVER_SPSDISORT: case SOLVER_FDISORT1: case SOLVER_FDISORT2: (*maxphi) = 3; /* Minimum number required by disort. */ break; case SOLVER_DISORT: case SOLVER_FTWOSTR: case SOLVER_SOS: case SOLVER_MONTECARLO: case SOLVER_POLRADTRAN: case SOLVER_TZS: case SOLVER_SSS: case SOLVER_SSSI: case SOLVER_NULL: case SOLVER_RODENTS: case SOLVER_TWOSTREBE: case SOLVER_TWOMAXRND: case SOLVER_TWOMAXRND3C: case SOLVER_DYNAMIC_TWOSTREAM: case SOLVER_DYNAMIC_TENSTREAM: case SOLVER_TWOSTR: case SOLVER_SSLIDAR: (*maxphi) = 1; break; default: fprintf (stderr, "Error, unknown rte_solver %d in read_sat_geometry\n", solver); return -1; } if (((*umu) = calloc ((*numu), sizeof (float))) == NULL) { fprintf (stderr, "Error, allocating memory for umu in read_sat_geometry (ancillary.c)\n"); return -10; } if (((*phi) = calloc ((*nphi), sizeof (float))) == NULL) { fprintf (stderr, "Error, allocating memory for phi in read_sat_geometry (ancillary.c)\n"); return -10; } if (dimensions == 2) { index[0] = iy; index[1] = ix; if (verbose) { fprintf (stderr, "*** read satellite geometry for pixel_x =%5.0f (%5d), pixel_y =%7.2f (%5d), ", sat_pixel_x_grid[ix], pixel_x, sat_pixel_y_grid[iy], pixel_y); fprintf (stderr, " element: %6d x %6d \n", ix + 1, iy + 1); } } else if (dimensions == 3) { index[0] = itime1; index[1] = iy; index[2] = ix; if (verbose) { if (UTC.tm_mday > 0) { fprintf (stderr, " ... read satellite geometry for pixel_x =%6.0f (%5d), lon =%6.0f (%5d), time = %s", sat_pixel_x_grid[ix], pixel_x, sat_pixel_y_grid[iy], pixel_y, asctime (&UTC)); if (nt == 1) fprintf (stderr, " element: %6d x %6d x %6d\n", ix + 1, iy + 1, itime1 + 1); if (nt == 2) fprintf (stderr, " element: %6d x %6d x (%4d/%4d)\n", ix + 1, iy + 1, itime1 + 1, itime2 + 1); } else { fprintf (stderr, " ... read satellite geometry for pixel_x =%6.0f (%5d), lon =%6.0f (%5d)\n", sat_pixel_x_grid[ix], pixel_x, sat_pixel_y_grid[iy], pixel_y); fprintf (stderr, " element: %6d x %6d x %6d\n", ix + 1, iy + 1, itime1 + 1); } } } else { fprintf (stderr, "Error, wrong number for dimensions %d in read_sat_geometry (ancillary.c)\n", dimensions); return -1; } /* fprintf (stderr,"index= [%d, %d], status = %d/%d \n", index[0], index[1], status, NC_NOERR ); */ status = nc_get_var1_float (ncid, id_lat, index, latitude); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', reading 'lat' (latitude) from %s in read_sat_geometry (ancillary.c)\n", nc_strerror (status), filename); return -1; } status = nc_get_var1_float (ncid, id_lon, index, longitude); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', reading 'lon' (longitude) from %s in read_sat_geometry (ancillary.c)\n", nc_strerror (status), filename); return -1; } status = nc_get_var1_float (ncid, id_vza, index, &((*umu)[0])); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', reading 'vza' (viewing zenith angle) from %s in read_sat_geometry (ancillary.c)\n", nc_strerror (status), filename); return -1; } status = nc_get_var1_float (ncid, id_vaa, index, &((*phi)[0])); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', reading 'vaa' (viewing azimuth angle) from %s in read_sat_geometry (ancillary.c)\n", nc_strerror (status), filename); return -1; } if (verbose) fprintf (stderr, " latitude: %9.5f, longitude: %9.5f, theta_sat=%8.3f, phi_sat=%8.3f \n", (*latitude), (*longitude), (*umu)[0], (*phi)[0]); (*umu)[0] = cos (PI / 180 * (*umu)[0]); return status; #else fprintf (stderr, " ***********************************************************************\n"); fprintf (stderr, " * You have built uvspec without libnetcdf and hence cannot *\n"); fprintf (stderr, " * use the satellite_geometry option. Please get netcdf and rebuild. *\n"); fprintf (stderr, " ***********************************************************************\n"); return -1; #endif } /****************************************************************/ /* read_u10_from_map */ /* */ /* Purpose: */ /* read wind in 10m height from an ECMWF file */ /* */ /* input: */ /* ------ */ /* lat latitude */ /* lon longitude */ /* UTC universal time correlated */ /* filename name of the netCDF file including the wind map */ /* */ /* output: */ /* ------- */ /* u10 wind velocity (vector norm) in m/s */ /* */ /* September 2007 by Ulrich Hamann */ /****************************************************************/ int read_u10_from_map (float lat, float lon, struct tm UTC, int time_interpolate, char* filename, float* u10, int verbose, int quiet) { #if HAVE_NETCDF4 int status = 0; float u_10 = 0.0; float v_10 = 0.0; float* u = NULL; float* v = NULL; float* w = NULL; float* z_wind = NULL; size_t nlev = 0; char function_name[] = "read_u10_from_ECMWF_file"; char file_name[] = "ancillary.c"; /* read u in 10m height */ status = get_number_from_netCDF_map (lat, lon, UTC, time_interpolate, filename, &(u_10), TYPE_FLOAT, "U10", verbose, quiet); if (status != 0) { fprintf (stderr, " Error '%s' reading '%s' from %s in %s (%s) \n", nc_strerror (status), "10U", filename, function_name, file_name); } /* read v in 10m height */ status = get_number_from_netCDF_map (lat, lon, UTC, time_interpolate, filename, &(v_10), TYPE_FLOAT, "V10", verbose, quiet); if (status != 0) { fprintf (stderr, " Error '%s' reading '%s' from %s in %s (%s) \n", nc_strerror (status), "10V", filename, function_name, file_name); } if (status == 0) { /* everything OK, calculate norm of the vector */ (*u10) = sqrt (u_10 * u_10 + v_10 * v_10); return status; } else { if (verbose) { fprintf (stderr, " ... didn't find u10 and v10 in netCDF file %s \n", filename); fprintf (stderr, " try to read profiles u(z) and v(z) \n"); } status = read_wind_from_ECMWF_file (lat, lon, UTC, time_interpolate, filename, 0.0, &(u), &(v), &(w), &(z_wind), &(nlev), verbose, quiet); if (status != 0) { fprintf (stderr, "Error '%s' reading '%s' from %s in %s (%s) \n", nc_strerror (status), "u(z) and v(z)", filename, function_name, file_name); return status; } if (verbose) fprintf (stderr, " take data from lowest level, lc=%3zd, z=%7.3f km, u=%7.2f m/s, v=%7.2f m/s \n", (nlev - 2), z_wind[nlev - 2], u[nlev - 2], v[nlev - 2]); /* (nlev-1) = surface, where u=0, v=0; (nlev-2)=last layer before surface, in ECMWF data approx 20m */ (*u10) = sqrt (u[nlev - 2] * u[nlev - 2] + v[nlev - 2] * v[nlev - 2]); free (u); free (v); free (z_wind); } return status; #else fprintf (stderr, " ***********************************************************************\n"); fprintf (stderr, " * You have built uvspec without libnetcdf and hence cannot *\n"); fprintf (stderr, " * use the cox_and_munk_u10_map option. Please get netcdf and rebuild. *\n"); fprintf (stderr, " ***********************************************************************\n"); return -1; #endif } /****************************************************************/ /* read_wind_from_ECMWF_file */ /* */ /* Purpose: */ /* read wind components u and v from an ECMWF file */ /* */ /* input: */ /* ------ */ /* lat latitude */ /* lon longitude */ /* UTC universal time correlated */ /* filename name of the netCDF file including the wind map */ /* */ /* output: */ /* ------- */ /* u-profil wind (west-east component) */ /* v-profil wind (south north component) */ /* z_wind height scale for u and v */ /* nlev_wind number of levels for u, v, and z */ /* */ /* September 2007 by Ulrich Hamann */ /****************************************************************/ int read_wind_from_ECMWF_file (float lat, float lon, struct tm UTC, int time_interpolate, char* filename, float altitude, float** u, float** v, float** w, float** z_wind, size_t* nlev, int verbose, int quiet) { int status = 0; #if HAVE_NETCDF4 int ncid = NOT_DEFINED_INTEGER; int lc = NOT_DEFINED_INTEGER; int t = NOT_DEFINED_INTEGER; int nt = -1; long ilat = NOT_DEFINED_INTEGER, ilon = NOT_DEFINED_INTEGER, itime = NOT_DEFINED_INTEGER; /* index for lat, lon, time in netCDF file */ size_t nlat = 0; size_t nlon = 0; double* ECMWF_lat = NULL; double* ECMWF_lon = NULL; int itime1 = -1, itime2 = -1; float dt = NOT_DEFINED_FLOAT; size_t tmp_nlev = 0; size_t nlay = 0; float** p_level = NULL; float** p_layer = NULL; float** T_layer = NULL; float** u_layer = NULL; float** v_layer = NULL; float** w_layer = NULL; int id_var_test = 0; char T_name[2] = ""; char U_name[2] = ""; char V_name[2] = ""; char W_name[2] = ""; float dx = NOT_DEFINED_FLOAT; float SP = -999.0; char function_name[] = "read_u10_from_ECMWF_file"; char file_name[] = "ancillary.c"; if (verbose) fprintf (stderr, " ... read wind profiles from netCDF file %s \n", filename); /* open netcdf file */ status = nc_open (filename, NC_NOWRITE, &ncid); if (status != NC_NOERR) { fprintf (stderr, "Error %d opening netCDF file %s in %s (%s)\n", status, filename, function_name, file_name); return status; } /* read latitude array */ status = alloc_and_read_netCDF_1D_double (ncid, "lat", &nlat, "lat", &(ECMWF_lat)); if (status != 0) { fprintf (stderr, "Error %d reading latitude in %s (%s)\n", status, function_name, file_name); return status; } /* search correct latitude index */ ilat = get_grid_index (lat, ECMWF_lat, nlat, FALSE); if (ilat < 0) { fprintf (stderr, "Error -1 finding index for lat=%5.2f in %s, %s (%s)\n", lat, filename, function_name, file_name); return -1; } /* read longitude */ status = alloc_and_read_netCDF_1D_double (ncid, "lon", &nlon, "lon", &(ECMWF_lon)); if (status != 0) { fprintf (stderr, "Error %d reading longitude in %s (%s)\n", status, function_name, file_name); return status; } /* search correct longitude index */ ilon = get_grid_index (lon, ECMWF_lon, nlon, TRUE); if (ilon < 0) { fprintf (stderr, "Error -2 finding index for lon=%5.2f in %s, %s (%s)\n", lon, filename, function_name, file_name); return -2; } /* get time index */ status = get_time_index (ncid, UTC, time_interpolate, &(nt), &(itime1), &(itime2), &(dt), verbose, quiet); if (status != 0) { fprintf (stderr, "Error %d, during get_time_index in %s (%s)\n", status, function_name, file_name); return status; } /* alloc nt timesteps for pressure */ if ((p_level = calloc (nt, sizeof (float*))) == NULL) { fprintf (stderr, "Error, allocating memory for pressure in %s (%s)\n", function_name, file_name); return -10; } /* alloc nt timesteps for pressure */ if ((p_layer = calloc (nt, sizeof (float*))) == NULL) { fprintf (stderr, "Error, allocating memory for pressure in %s (%s)\n", function_name, file_name); return -10; } /* alloc nt timesteps for temperature */ if ((T_layer = calloc (nt, sizeof (float*))) == NULL) { fprintf (stderr, "Error, allocating memory for temperature in %s (%s)\n", function_name, file_name); return -10; } /* alloc nt timesteps for west-east wind component u */ if ((u_layer = calloc (nt, sizeof (float*))) == NULL) { fprintf (stderr, "Error, allocating memory for water vapour in %s (%s)\n", function_name, file_name); return -10; } /* alloc nt timesteps for south north wind component v */ if ((v_layer = calloc (nt, sizeof (float*))) == NULL) { fprintf (stderr, "Error, allocating memory for water vapour in %s (%s)\n", function_name, file_name); return -10; } /* alloc nt timesteps for vertical wind component w */ if ((w_layer = calloc (nt, sizeof (float*))) == NULL) { fprintf (stderr, "Error, allocating memory for water vapour in %s (%s)\n", function_name, file_name); return -10; } /* read nt (= 1 or 2) time steps */ for (t = 0; t <= nt - 1; t++) { if (t == 0) itime = itime1; if (t == 1) itime = itime2; /* alloc and read pressure */ /* requires that SP/LNSP, hyai and hybi are in the ECMWF netCDF file */ /* in ancillary.c */ status = alloc_and_read_ECMWF_netCDF_pressure (ncid, &(p_level[t]), &(tmp_nlev), &(p_layer[t]), &(nlay), itime, ilat, ilon, verbose); if (status != 0) { fprintf (stderr, "Error %d reading pressure from netCDF file %s\n", status, filename); return status; } if ((p_layer[t] = realloc (p_layer[t], (nlay + 1) * sizeof (float))) == NULL) { fprintf (stderr, "Error, reallocating memory for 'p_layer' in %s (%s)\n", function_name, file_name); return -1; } /* read surface pressure from ECMWF file */ status = read_ECMWF_surface_pressure (ncid, itime, ilat, ilon, &(SP), verbose); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', while read_ECMWF_surface_pressure in %s (%s)\n", nc_strerror (status), function_name, file_name); return status; } p_layer[t][nlay] = SP / 100.0; status = nc_inq_varid (ncid, "t", &id_var_test); if (status == NC_NOERR) { strcpy (T_name, "t"); strcpy (U_name, "u"); strcpy (V_name, "v"); strcpy (W_name, "w"); } else { status = nc_inq_varid (ncid, "T", &id_var_test); if (status == NC_NOERR) { strcpy (T_name, "T"); strcpy (U_name, "U"); strcpy (V_name, "V"); strcpy (W_name, "W"); } else { fprintf (stderr, "Error, unknown format of the ECMWF wind file %s\n", filename); return status; } } /* allocate and read temperature, defined on layers */ status = alloc_and_read_netCDF_column_float (ncid, T_name, &T_layer[t], nlay, itime, ilat, ilon, verbose); if (status != 0) { fprintf (stderr, "Error %d reading temperature '%s' from netCDF file %s\n", status, T_name, filename); return status; } if ((T_layer[t] = realloc (T_layer[t], (nlay + 1) * sizeof (float))) == NULL) { fprintf (stderr, "Error, reallocating memory for 'T_layer' in %s (%s)\n", function_name, file_name); return -1; } /* extrapolate temperature from last layer midpoint to surface */ dx = (log (p_level[0][tmp_nlev - 1] / p_level[0][tmp_nlev - 1]) - log (p_layer[0][nlay - 1] / p_level[0][tmp_nlev - 1])) / (log (p_layer[0][nlay - 1] / p_level[0][tmp_nlev - 1]) - log (p_layer[0][nlay - 2] / p_level[0][tmp_nlev - 1])); T_layer[t][nlay] = (1.0 + dx) * T_layer[0][nlay - 1] - dx * T_layer[0][nlay - 2]; /* allocate and read wind component u, defined on layers */ status = alloc_and_read_netCDF_column_float (ncid, U_name, &u_layer[t], nlay, itime, ilat, ilon, verbose); if (status != 0) { fprintf (stderr, "Error %d reading wind component '%s' from netCDF file %s\n", status, U_name, filename); return status; } if ((u_layer[t] = realloc (u_layer[t], (nlay + 1) * sizeof (float))) == NULL) { fprintf (stderr, "Error, reallocating memory for 'u_layer' in %s (%s)\n", function_name, file_name); return -1; } u_layer[t][nlay] = 0.0; /* allocate and read wind component v, defined on layers */ status = alloc_and_read_netCDF_column_float (ncid, V_name, &v_layer[t], nlay, itime, ilat, ilon, verbose); if (status != 0) { fprintf (stderr, "Error %d reading wind component '%s' from netCDF file %s\n", status, V_name, filename); return status; } if ((v_layer[t] = realloc (v_layer[t], (nlay + 1) * sizeof (float))) == NULL) { fprintf (stderr, "Error, reallocating memory for 'v_layer' in %s (%s)\n", function_name, file_name); return -1; } v_layer[t][nlay] = 0.0; /* allocate and read wind component w, defined on layers */ status = alloc_and_read_netCDF_column_float (ncid, W_name, &w_layer[t], nlay, itime, ilat, ilon, verbose); if (status != 0) { fprintf (stderr, "Error %d reading wind component '%s' from netCDF file %s\n", status, W_name, filename); return status; } if ((w_layer[t] = realloc (w_layer[t], (nlay + 1) * sizeof (float))) == NULL) { fprintf (stderr, "Error, reallocating memory for 'w_layer' in %s (%s)\n", function_name, file_name); return -1; } w_layer[t][nlay] = 0.0; } if (verbose) { fprintf (stderr, " found %zd x %zd x %zd (lat x lon x lev) data points\n", nlat, nlon, (*nlev)); fprintf (stderr, " reading pixel lat = %5.2f (%4ld), lon = %5.2f (%4ld)\n", lat, ilat, lon, ilon); } /* number of final levels == number of layers plus one, which was additional realloced above */ (*nlev) = nlay + 1; if (nt == 1) { /* no time interpolation needed, do nothing */ } else { /* write time interpolated data into the zero'th entry */ for (lc = 0; lc < (*nlev); lc++) { p_layer[0][lc] = (1.0 - dt) * p_layer[0][lc] + dt * p_layer[1][lc]; T_layer[0][lc] = (1.0 - dt) * T_layer[0][lc] + dt * T_layer[1][lc]; u_layer[0][lc] = (1.0 - dt) * u_layer[0][lc] + dt * u_layer[1][lc]; v_layer[0][lc] = (1.0 - dt) * v_layer[0][lc] + dt * v_layer[1][lc]; w_layer[0][lc] = (1.0 - dt) * w_layer[0][lc] + dt * w_layer[1][lc]; } } /* calculate z from T and p using hydrostatic equation (in ancillary.c) */ status = calculate_z_from_p_and_T (p_layer[0], T_layer[0], z_wind, (*nlev), altitude, verbose); if (status != 0) { fprintf (stderr, "Error %d during calculate_z_from_p_and_T (line %d, function '%s' in '%s') \n", status, __LINE__, __func__, __FILE__); return status; } /* allocate space for final results */ if (((*u) = calloc ((*nlev), sizeof (float))) == NULL) { fprintf (stderr, "Error, allocating memory for pressure (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); return -10; } /* allocate space for final results */ if (((*v) = calloc ((*nlev), sizeof (float))) == NULL) { fprintf (stderr, "Error, allocating memory for pressure (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); return -10; } /* allocate space for final results */ if (((*w) = calloc ((*nlev), sizeof (float))) == NULL) { fprintf (stderr, "Error, allocating memory for pressure (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); return -10; } /* copy data */ for (lc = 0; lc < (*nlev); lc++) { (*u)[lc] = u_layer[0][lc]; (*v)[lc] = v_layer[0][lc]; (*w)[lc] = w_layer[0][lc]; /* /\* additional verbose output at layer mid-points *\/ */ /* fprintf (stderr, " lc =%3d, z=%7.3f, u=%7.3f, v=%7.3f, w=%7.3f ( p_layer=%9.3f, T_layer=%7.3f ) \n", */ /* lc, (*z_wind)[lc], (*u)[lc], (*v)[lc], (*w)[lc], p_layer[0][lc], T_layer[0][lc]); */ } ASCII_free_float (p_layer, nt); ASCII_free_float (T_layer, nt); ASCII_free_float (u_layer, nt); ASCII_free_float (v_layer, nt); ASCII_free_float (w_layer, nt); #else fprintf (stderr, " ******************************************************************\n"); fprintf (stderr, " * You have built uvspec without libnetcdf and hence cannot *\n"); fprintf (stderr, " * use the ECMWF data file option. Please get netcdf and rebuild. *\n"); fprintf (stderr, " ******************************************************************\n"); return -1; #endif return status; } /*****************************************************************/ /* get_time_index */ /* */ /* Purpose: */ /* read time grid from netCDF file and search */ /* the suitable time index to input.UTC */ /* if only one time is specified in the netCDF file, */ /* take that field */ /* */ /* input: */ /* ------ */ /* ncid id number of the netCDF file */ /* UTC universal time correlated */ /* */ /* output: */ /* ------- */ /* nt number of time points for futher calculations */ /* t1 time index 1 */ /* t2 time index 2 */ /* dt normalized fraction between time1 and time2 */ /* */ /* February 2007 by Ulrich Hamann */ /* */ /* changes: */ /* - July 2007 by Ulrich Hamann */ /* interpretation of the attribute time:units */ /*****************************************************************/ int get_time_index (int ncid, struct tm UTC, int time_interpolate, int* nt, int* t1, int* t2, float* dt, int verbose, int quiet) { #if HAVE_NETCDF4 #define FORMAT_UNKNOWN 0 #define YYYYMMDD_FF 1 /* day as %Y%m%d.%f */ #define MONTH_OF_YEAR 2 #define HOURS_SINCE 3 /* hours since 1900-01-01 00:00:0.0 */ #define SECONDS_SINCE 4 int status = 0; time_t time_in_s = NOT_DEFINED_INTEGER; time_t min_delta_t = 999999999; int id_time = NOT_DEFINED_INTEGER; int time_format = NOT_DEFINED_INTEGER; double* time_grid = NULL; time_t* time_grid_in_s = NULL; struct tm* date_grid = NULL; char tmp_string[FILENAME_MAX] = ""; int i = 0; int year = 0, month = 0, day = 0, hour = 0, min = 0; size_t ntime = NOT_DEFINED_INTEGER; int t = NOT_DEFINED_INTEGER; int t_min = NOT_DEFINED_INTEGER; double tmp_time = NOT_DEFINED_FLOAT; char* timestr = NULL; char data_unit[50] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"; if (UTC.tm_mday < 0) { if (!quiet) { fprintf (stderr, " ******* WARNING >>>>>> no time specified\n"); fprintf (stderr, " take first entry in the netCDF file\n"); } (*t1) = 0; /* read first entry in the netCDF file */ (*t2) = 0; (*dt) = 0.0; (*nt) = 1; /* read one entry in the netCDF file */ return status; } /* read time in ECMWF file */ status = alloc_and_read_netCDF_1D_double (ncid, "time", &(ntime), "time", &(time_grid)); if (status != 0) { fprintf (stderr, "Error %d reading time in get_time_index (ancillary.c)\n", status); return status; } /* alloc ECMWF time in s - array */ if ((time_grid_in_s = calloc (ntime, sizeof (time_t))) == NULL) { fprintf (stderr, "Error, allocating memory for time_grid_in_s (line %d, function %s in %s) \n", __LINE__, __func__, __FILE__); return -1; } /* get id for time */ status = nc_inq_varid (ncid, "time", &id_time); if (status == NC_NOERR) { /* time is specified in the netCDF file */ /* try to read attribute 'unit' and interprete it as much as possible */ status = nc_get_att_text (ncid, id_time, "units", data_unit); if (status != NC_NOERR) { /* no given units -> take first entry in netCDF file */ /*assume that it is day as %Y%m%d.%f */ if (!quiet) fprintf (stderr, "*** Warning, no time unit specified in the netCDF file, assume that it is \'day as \%%Y\%%m\%%d.\%%f\'\n"); time_format = YYYYMMDD_FF; /* we hope this is OK, so set status to OK again */ status = 0; } else { /* some information is given about the unit, try to interprete it */ if (strncasecmp ("day as %Y%m%d.%f", data_unit, 15) == 0) time_format = YYYYMMDD_FF; else if (strncasecmp ("hours since", data_unit, 10) == 0) time_format = HOURS_SINCE; else if (strncasecmp ("seconds since", data_unit, 12) == 0) time_format = SECONDS_SINCE; else if (strncasecmp ("month of year", data_unit, 12) == 0) time_format = MONTH_OF_YEAR; else { fprintf (stderr, " ... Error, unknown time format %s (line %d, function %s in %s)\n", data_unit, __LINE__, __func__, __FILE__); return -1; } } } else { /* no time specified in netCDF file -> take first entry in netCDF file */ status = 0; time_format = FORMAT_UNKNOWN; } /* alloc date_grid ( structure - ) array */ if ((date_grid = calloc (ntime, sizeof (struct tm))) == NULL) { fprintf (stderr, "Error, allocating memory for date_grid (line %d, function %s in %s)\n", __LINE__, __func__, __FILE__); return -10; } switch (time_format) { case FORMAT_UNKNOWN: /* do nothing, than automatically the first entry is specified (index=0) */ break; case YYYYMMDD_FF: case MONTH_OF_YEAR: /* if 'month of year', then shift the month by 2 digits to the left, and add 15 for the middle of the month */ if (time_format == MONTH_OF_YEAR) for (t = 0; t < ntime; t++) time_grid[t] = time_grid[t] * 100.0 + 15.; if (ntime > 1) { /* for year 0 AD we assume climatologic data, add current year for time selection */ if (0101.0 <= time_grid[0] && time_grid[ntime - 1] < 1232.0) for (t = 0; t < ntime; t++) time_grid[t] += (UTC.tm_year + 1900) * 10000.0; /* time range check */ /* dates after the year 1600 AD and before 3000 AD */ if (time_grid[0] < 16000101.00 || 30000101.00 < time_grid[0]) { fprintf (stderr, "Error, time [%f,%f] in netCDF file outside assumed range [1600AD, 3000AD] \n", time_grid[0], time_grid[ntime - 1]); return -1; } /* convert format "YYYYMMDD.FF" into "C-standard format" */ for (t = 0; t < ntime; t++) { date_grid[t].tm_year = floor (time_grid[t]) / 10000 - 1900; tmp_time = time_grid[t] - (date_grid[t].tm_year + 1900.0) * 10000.0; date_grid[t].tm_mon = floor (tmp_time) / 100 - 1; tmp_time = tmp_time - (date_grid[t].tm_mon + 1.0) * 100.0; date_grid[t].tm_mday = floor (tmp_time); tmp_time = tmp_time - date_grid[t].tm_mday; date_grid[t].tm_hour = floor (tmp_time * 24.0); tmp_time = tmp_time - date_grid[t].tm_hour / 24.0; date_grid[t].tm_min = floor (tmp_time * 24.0 * 60.0); tmp_time = tmp_time - date_grid[t].tm_min / (24.0 * 60.0); date_grid[t].tm_sec = floor (tmp_time * (24.0 * 60.0 * 60.0)); date_grid[t].tm_wday = weekday (date_grid[t].tm_year + 1900, date_grid[t].tm_mon + 1, date_grid[t].tm_mday); time_grid_in_s[t] = my_timegm (&(date_grid[t])); /* timestr = asctime( &(date_grid[t]) ); */ /* fprintf (stderr,"time_grid[%d] = %10.5f, %ld , %s", t, time_grid[t], time_grid_in_s[t], timestr); */ } } /* else, do nothing, than automatically the first entry is specified (index=0) */ break; case HOURS_SINCE: case SECONDS_SINCE: if (time_format == HOURS_SINCE) i = 0; if (time_format == SECONDS_SINCE) i = 2; year = atoi (substr (tmp_string, data_unit, 12 + i, 4)); month = atoi (substr (tmp_string, data_unit, 17 + i, 2)); day = atoi (substr (tmp_string, data_unit, 20 + i, 2)); hour = atoi (substr (tmp_string, data_unit, 23 + i, 2)); min = atoi (substr (tmp_string, data_unit, 26 + i, 2)); /* there are not always seconds in the format */ /* fprintf (stderr, " time scince: %d %d %d %d %d \n", year, month, day, hour, min ); */ /* /\* boundary check *\/ */ /* if ( time_grid[0] < 0.0 || 500.0 < time_grid[0] ) { /\* assuming more than 0 and less than 500 forecast hours *\/ */ /* fprintf (stderr, "Error -1, wrong format for time %f in the netCDF file \n", time_grid[0] ); */ /* return -1; */ /* } */ /* convert format "hours since 1900-01-01 00:00:0.0" into "C-standard format" */ for (t = 0; t < ntime; t++) { date_grid[t].tm_year = year - 1900; date_grid[t].tm_mon = month - 1; date_grid[t].tm_mday = day; if (time_format == HOURS_SINCE) date_grid[t].tm_hour = hour + time_grid[t]; else date_grid[t].tm_hour = hour; date_grid[t].tm_min = min; if (time_format == SECONDS_SINCE) date_grid[t].tm_sec = 0.0 + time_grid[t]; else date_grid[t].tm_sec = 0.0; date_grid[t].tm_wday = weekday (date_grid[t].tm_year + 1900, date_grid[t].tm_mon + 1, date_grid[t].tm_mday); time_grid_in_s[t] = my_timegm (&(date_grid[t])); timestr = asctime (&(date_grid[t])); /* fprintf (stderr,"time_grid[%d] = %10.5f, %ld , %s", t, time_grid[t], time_grid_in_s[t], timestr); */ } break; default: fprintf (stderr, "Error, unknown time_format = %d (line %d, function %s in %s)\n", time_format, __LINE__, __func__, __FILE__); return -1; } /* convert time to search for into seconds scince 01.01.1970 00:00:00 UTC */ time_in_s = my_timegm (&(UTC)); /* search time index */ switch (time_interpolate) { case TIME_NEAREST_DATE: min_delta_t = labs (time_grid_in_s[0] - time_in_s); /* first guess for time difference */ t_min = 0; /* first guess for index */ for (t = 1; t < ntime; t++) if (labs (time_grid_in_s[t] - time_in_s) < min_delta_t) { min_delta_t = labs (time_grid_in_s[t] - time_in_s); t_min = t; } (*t1) = t_min; (*t2) = NOT_DEFINED_INTEGER; (*dt) = (float)t_min; (*nt) = 1; /* read first and only time step from file */ if (verbose) { timestr = asctime (&(UTC)); fprintf (stderr, " specified time: %s", timestr); timestr = asctime (&(date_grid[(*t1)])); fprintf (stderr, " nearest time in netCDF file: %s", timestr); } break; case TIME_INTERPOLATION: if (ntime == 1) { if ((time_grid_in_s[0] != time_in_s) && !quiet) { timestr = asctime (&(UTC)); fprintf (stderr, "\n ******* WARNING >>>>>> specified time %s", timestr); timestr = asctime (&(date_grid[0])); fprintf (stderr, " ******* WARNING >>>>>> is NOT the time found in the ECMWF atmosphere data file %s\n", timestr); } (*t1) = 0; (*t2) = NOT_DEFINED_INTEGER; (*dt) = 0.0; (*nt) = 1; /* read first and only time step from file */ } else { for (t = 1; t < ntime; t++) { if (time_grid_in_s[t - 1] <= time_in_s && time_in_s <= time_grid_in_s[t]) { /* fprintf (stderr,"E[t-1] = %ld, t_s = %ld, E[t] = %ld\n", time_grid_in_s[t-1], time_in_s, time_grid_in_s[t]); */ (*t1) = t - 1; (*t2) = t; (*dt) = (float)(time_in_s - time_grid_in_s[t - 1]) / (float)(time_grid_in_s[t] - time_grid_in_s[t - 1]); (*nt) = 2; /* read two time steps from file */ /* fprintf (stderr,"t1 = %3d, t2 = %3d , dt = %10.5f\n", t1, t2, dt); */ break; } } if (t1 < 0) { fprintf (stderr, "Error -1 finding index for time = %s in get_time_index (ancillary.c)\n", asctime (&(UTC))); return -1; } if (verbose) { fprintf (stderr, " time interpolation of data between \n"); timestr = asctime (&(date_grid[(*t1)])); fprintf (stderr, " %s", timestr); timestr = asctime (&(date_grid[(*t2)])); fprintf (stderr, " %s", timestr); } } break; default: fprintf (stderr, "Error, unknown time_interpolate = %d in get_time_index (in ancillary.c)\n", time_interpolate); return -1; } free (date_grid); free (time_grid); free (time_grid_in_s); return status; #else fprintf (stderr, " ***********************************************************************\n"); fprintf (stderr, " * You have built uvspec without libnetcdf and hence cannot *\n"); fprintf (stderr, " * use any netCDF option. Please get netcdf and rebuild. *\n"); fprintf (stderr, " ***********************************************************************\n"); return -1; #endif } /*****************************************************************/ /* get_local_apparent_time_index */ /* (almost the same as get_time_index) */ /* */ /* Purpose: */ /* read time grid from netCDF file and search */ /* the suitable time index to input.LAT */ /* if only one time is specified in the netCDF file, */ /* take that field */ /* */ /* input: */ /* ------ */ /* ncid id number of the netCDF file */ /* LAT local apparent time */ /* */ /* output: */ /* ------- */ /* nt number of time points for futher calculations */ /* t1 time index 1 */ /* t2 time index 2 */ /* dt normalized fraction between time1 and time2 */ /* */ /* April 2007 by Ulrich Hamann */ /*****************************************************************/ int get_local_apparent_time_index (int ncid, struct tm LAT, int time_interpolate, int* nt, int* t1, int* t2, float* dt, int verbose, int quiet) { int status = 0; time_t time_in_s = NOT_DEFINED_INTEGER; time_t min_delta_t = 999999999; double* time_grid = NULL; time_t* time_grid_in_s = NULL; struct tm* date_grid = NULL; size_t ntime = NOT_DEFINED_INTEGER; int t = NOT_DEFINED_INTEGER; int t_min = NOT_DEFINED_INTEGER; double tmp_time = NOT_DEFINED_FLOAT; char* timestr = NULL; char function_name[] = "get_local_apparent_time_index"; char file_name[] = "ancillary.c"; if (LAT.tm_mday < 0) { fprintf (stderr, " ******* WARNING >>>>>> no time specified\n"); fprintf (stderr, " take first entry in the netCDF file\n"); (*t1) = 0; /* read first entry in the netCDF file */ (*t2) = 0; (*dt) = 0.0; (*nt) = 1; /* read one entry in the netCDF file */ return status; } /* time in seconds scince 01.01.1970 00:00:00 LAT */ time_in_s = my_timegm (&(LAT)); /* read time in netCDF file */ status = alloc_and_read_netCDF_1D_double (ncid, "local_apparent_time", &(ntime), "local_apparent_time", &(time_grid)); if (status != 0) { fprintf (stderr, "Error %d reading time in %s (%s)\n", status, function_name, file_name); return status; } /* alloc netCDF date structure */ if ((date_grid = calloc (ntime, sizeof (struct tm))) == NULL) { fprintf (stderr, "Error, allocating memory for date_grid in %s (%s)\n", function_name, file_name); return -10; } /* alloc netCDF time in s - array */ if ((time_grid_in_s = calloc (ntime, sizeof (time_t))) == NULL) { fprintf (stderr, "Error, allocating memory for date_grid time_grid_in_s in get_local_apparent_time_index (ancillary.c) \n"); return -1; } /* convert format "YYYYMMDD.day_fraction" into "C-standard format" */ for (t = 0; t < ntime; t++) { date_grid[t].tm_year = floor (time_grid[t]) / 10000 - 1900; tmp_time = time_grid[t] - (date_grid[t].tm_year + 1900.0) * 10000.0; date_grid[t].tm_mon = floor (tmp_time) / 100 - 1; tmp_time = tmp_time - (date_grid[t].tm_mon + 1.0) * 100.0; date_grid[t].tm_mday = floor (tmp_time); tmp_time = tmp_time - date_grid[t].tm_mday; date_grid[t].tm_hour = floor (tmp_time * 24.0); tmp_time = tmp_time - date_grid[t].tm_hour / 24.0; date_grid[t].tm_min = floor (tmp_time * 24.0 * 60.0); tmp_time = tmp_time - date_grid[t].tm_min / (24.0 * 60.0); date_grid[t].tm_min = floor (tmp_time * (24.0 * 60.0 * 60.0)); date_grid[t].tm_wday = weekday (date_grid[t].tm_year + 1900, date_grid[t].tm_mon + 1, date_grid[t].tm_mday); time_grid_in_s[t] = my_timegm (&(date_grid[t])); timestr = asctime (&(date_grid[t])); /* fprintf (stderr,"time_grid[%d] = %10.5f, %ld , %s", t, time_grid[t], time_grid_in_s[t], timestr); */ } /* search time index */ switch (time_interpolate) { case TIME_NEAREST_DATE: min_delta_t = labs (time_grid_in_s[0] - time_in_s); /* first guess for time difference */ t_min = 0; /* first guess for index */ for (t = 1; t < ntime; t++) if (labs (time_grid_in_s[t] - time_in_s) < min_delta_t) { min_delta_t = labs (time_grid_in_s[t] - time_in_s); t_min = t; } (*t1) = t_min; (*t2) = NOT_DEFINED_INTEGER; (*dt) = (float)t_min; (*nt) = 1; /* read first and only time step from file */ if (verbose) { timestr = asctime (&(LAT)); fprintf (stderr, " specified time: %s", timestr); timestr = asctime (&(date_grid[(*t1)])); fprintf (stderr, " nearest time in netCDF file: %s", timestr); } break; case TIME_INTERPOLATION: if (ntime == 1) { if ((time_grid_in_s[0] != time_in_s) && !quiet) { timestr = asctime (&(LAT)); fprintf (stderr, "\n ******* WARNING >>>>>> specified time %s", timestr); timestr = asctime (&(date_grid[0])); fprintf (stderr, " ******* WARNING >>>>>> is NOT the time found in the netCDF atmosphere data file %s\n", timestr); } (*t1) = 0; (*t2) = NOT_DEFINED_INTEGER; (*dt) = 0.0; (*nt) = 1; /* read first and only time step from file */ } else { for (t = 1; t < ntime; t++) { if (time_grid_in_s[t - 1] <= time_in_s && time_in_s <= time_grid_in_s[t]) { /* fprintf (stderr,"E[t-1] = %ld, t_s = %ld, E[t] = %ld\n", time_grid_in_s[t-1], time_in_s, time_grid_in_s[t]); */ (*t1) = t - 1; (*t2) = t; (*dt) = (float)(time_in_s - time_grid_in_s[t - 1]) / (float)(time_grid_in_s[t] - time_grid_in_s[t - 1]); (*nt) = 2; /* read two time steps from file */ /* fprintf (stderr,"t1 = %3d, t2 = %3d , dt = %10.5f\n", t1, t2, dt); */ break; } } if (t1 < 0) { fprintf (stderr, "Error -1 finding index for time = %s in %s (%s)\n", asctime (&(LAT)), function_name, file_name); return -1; } if (verbose) { fprintf (stderr, " time interpolation of data between \n"); timestr = asctime (&(date_grid[(*t1)])); fprintf (stderr, " %s", timestr); timestr = asctime (&(date_grid[(*t2)])); fprintf (stderr, " %s", timestr); } } break; default: fprintf (stderr, "Error, unknown time_interpolate = %d in %s (%s)\n", time_interpolate, function_name, file_name); return -1; } return status; } /******************************************************************************/ /* alloc_and_read_ECMWF_netCDF_pressure */ /* small function to read hybrid coefficients hyai, hybi and surface pressure */ /* from ECMWF netCDF file and convert those to a pressure array */ /* requires that SP, hyai and hybi are in the ECMWF netCDF file */ /******************************************************************************/ int alloc_and_read_ECMWF_netCDF_pressure (int ncid, float** p_level, size_t* nlev, float** p_layer, size_t* nlay, size_t itime, size_t ilat, size_t ilon, int verbose) { #if HAVE_NETCDF4 int status = 0; float SP = -999.0; double* hyai = NULL; double* hybi = NULL; double* hyam = NULL; double* hybm = NULL; int lc = 0; /* read surface pressure from ECMWF file */ status = read_ECMWF_surface_pressure (ncid, itime, ilat, ilon, &(SP), verbose); if (status != NC_NOERR) { fprintf (stderr, "Error '%s' returned by read_ECMWF_surface_pressure (line %d, function '%s' in '%s')\n", nc_strerror (status), __LINE__, __func__, __FILE__); return status; } /* read hybrid pressure coefficients on levels (layer boundaries) */ status = alloc_and_read_netCDF_1D_double (ncid, "ilev", nlev, "hyai", &(hyai)); if (status != 0) { fprintf (stderr, "Error %d reading hyai (line %d, function '%s' in '%s') \n", status, __LINE__, __func__, __FILE__); return status; } status = alloc_and_read_netCDF_1D_double (ncid, "ilev", nlev, "hybi", &(hybi)); if (status != 0) { fprintf (stderr, "Error %d reading hybi (line %d, function '%s' in '%s')\n", status, __LINE__, __func__, __FILE__); return status; } /* read hybrid pressure coefficients at layers (layer midpoints) */ status = alloc_and_read_netCDF_1D_double (ncid, "mlev", nlay, "hyam", &(hyam)); if (status != 0) { fprintf (stderr, "Error %d reading hyam (line %d, function '%s' in '%s')\n", status, __LINE__, __func__, __FILE__); return status; } status = alloc_and_read_netCDF_1D_double (ncid, "mlev", nlay, "hybm", &(hybm)); if (status != 0) { fprintf (stderr, "Error %d reading hybm (line %d, function '%s' in '%s')\n", status, __LINE__, __func__, __FILE__); return status; } /* allocate pressure */ if (((*p_level) = calloc ((*nlev), sizeof (float))) == NULL) { fprintf (stderr, "Error allocating memory for 'p_level' (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); return -1; } /* /\***************************************************************\/ */ /* /\* calculate level pressure (layer boundaries) *\/ */ /* /\* starting from 1, because we DO NOT like the uppermost p=0.0 *\/ */ /* /\* it is bad for merging with background atmosphere *\/ */ /* /\***************************************************************\/ */ /* /\* allocate pressure *\/ */ /* if (((*p_level) = calloc((*nlev)-1, sizeof(float)))==NULL) { */ /* fprintf (stderr, "Error allocating memory for 'p_level' (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); */ /* return -1; */ /* } */ /* /\* fprintf (stderr,"level pressures\n"); *\/ */ /* for (lc=1; lc<(*nlev); lc++) { */ /* (*p_level)[lc-1]= 0.01*(hyai[lc]+hybi[lc]*SP); /\* 0.01 == Pa -> hPa *\/ */ /* /\* fprintf (stderr,"lc=%3d a=%10.2f, b=%10.6f, p=%10.4f, SP=%10.4f, SP-p=%10.4f\n", *\/ */ /* /\* lc-1,hyai[lc],hybi[lc],(*p_level)[lc-1],SP/100.,SP/100.-(*p_level)[lc-1]); *\/ */ /* } */ /* (*nlev)=(*nlev)-1; */ /* fprintf (stderr,"level pressures\n"); */ for (lc = 0; lc < (*nlev); lc++) { (*p_level)[lc] = 0.01 * (hyai[lc] + hybi[lc] * SP); /* 0.01 == Pa -> hPa */ /* fprintf (stderr,"lc=%3d a=%10.2f, b=%10.6f, p=%10.4f, SP=%10.4f, SP-p=%10.4f\n", */ /* lc-1,hyai[lc],hybi[lc],(*p_level)[lc],SP/100.,SP/100.-(*p_level)[lc]); */ } /* allocate pressure */ if (((*p_layer) = calloc ((*nlay), sizeof (float))) == NULL) { fprintf (stderr, "Error allocating memory for 'p_layer' (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); return -1; } /* calculate layer pressure (layer midpoints) */ /* fprintf (stderr,"layer pressures\n"); */ for (lc = 0; lc < (*nlay); lc++) { (*p_layer)[lc] = 0.01 * (hyam[lc] + hybm[lc] * SP); /* 0.01 == Pa -> hPa */ /* fprintf (stderr,"lc=%3d a=%10.2f, b=%10.6f, p=%10.4f, SP=%10.4f, SP-p=%10.4f\n", */ /* lc,hyam[lc],hybm[lc],(*p_layer)[lc],SP/100.,SP/100.-(*p_layer)[lc]); */ } return status; #else fprintf (stderr, " ***********************************************************************\n"); fprintf (stderr, " * You have built uvspec without libnetcdf and hence cannot *\n"); fprintf (stderr, " * use any ECMWF input option. Please get netcdf and rebuild. *\n"); fprintf (stderr, " ***********************************************************************\n"); return -1; #endif } int read_ECMWF_surface_pressure (int ncid, size_t itime, size_t ilat, size_t ilon, float* SP, int verbose) { #if HAVE_NETCDF4 int status = 0; int id_data = 0; int log_SP = FALSE; nc_type netCDF_type; float scale_factor = 1.0; float add_offset = 0.0; char variable_name[FILENAME_MAX] = ""; size_t* index = NULL; /* get variable id for "SP" (surface pressure) */ if ((status = nc_inq_varid (ncid, "SP", &id_data)) == NC_NOERR) strcpy (variable_name, "SP"); else if ((status = nc_inq_varid (ncid, "sp", &id_data)) == NC_NOERR) strcpy (variable_name, "sp"); else if ((status = nc_inq_varid (ncid, "LNSP", &id_data)) == NC_NOERR) { strcpy (variable_name, "LNSP"); log_SP = TRUE; } else if ((status = nc_inq_varid (ncid, "lnsp", &id_data)) == NC_NOERR) { strcpy (variable_name, "lnsp"); log_SP = TRUE; } else { fprintf (stderr, "Error '%s', while getting id for surface pressure\n", nc_strerror (status)); fprintf (stderr, " (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); return status; } /* get type of the variable */ status = nc_inq_vartype (ncid, id_data, &(netCDF_type)); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', while getting type (float, double ...) of '%s'\n", nc_strerror (status), variable_name); fprintf (stderr, " (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); return status; } /* read attribute scale_factor */ status = nc_get_att_float (ncid, id_data, "scale_factor", &scale_factor); if (status != NC_NOERR) { /* no given scale_factor -> default scale_factor = 1.0 */ /* that's OK, put status to 0 */ status = 0; } else { /* if (verbose) */ /* fprintf (stderr," scale_factor = %f \n", scale_factor); */ } /* read attribute add_offset */ status = nc_get_att_float (ncid, id_data, "add_offset", &add_offset); if (status != NC_NOERR) { /* no given offset -> default offset = 0.0 */ /* that's OK, put status to 0 */ status = 0; } else { /* if (verbose) */ /* fprintf (stderr," add_offset = %f \n", add_offset); */ } if (log_SP == FALSE) { if (((index) = calloc (3, sizeof (size_t))) == NULL) { fprintf (stderr, "Error, allocting memory for index\n"); fprintf (stderr, " (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); return -1; } /* index for surface pressure */ index[0] = itime; index[1] = ilat; index[2] = ilon; } else { /* that means -> if (log_SP == TRUE) */ if (((index) = calloc (4, sizeof (size_t))) == NULL) { fprintf (stderr, "Error, allocting memory for index\n"); fprintf (stderr, " (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); return -1; } /* index for log surface pressure */ index[0] = itime; index[1] = 0; /* level == 0, there is only one level! */ index[2] = ilat; index[3] = ilon; } /* read surface pressure */ status = nc_get_var1_float (ncid, id_data, index, SP); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', while reading surface pressure\n", nc_strerror (status)); fprintf (stderr, " (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); return status; } (*SP) = (*SP) * scale_factor + add_offset; /* if (verbose) */ /* fprintf (stderr," %s = %f \n", variable_name, (*SP)); */ if (log_SP == TRUE) (*SP) = exp ((*SP)); free (index); return status; #else fprintf (stderr, " ***********************************************************************\n"); fprintf (stderr, " * You have built uvspec without libnetcdf and hence cannot *\n"); fprintf (stderr, " * use any ECMWF input option. Please get netcdf and rebuild. *\n"); fprintf (stderr, " ***********************************************************************\n"); return -1; #endif } /**********************************************************************/ /* alloc_and_read_netCDF_column_float */ /* function extracts a column of size nlev of data from a netCDF file */ /* at the indeces itime, ilat, ilon */ /* variable is the name (string), which is stored in the netCDF file */ /**********************************************************************/ int alloc_and_read_netCDF_column_float (int ncid, char* variable_name, float** data, size_t nlev, size_t itime, size_t ilat, size_t ilon, int verbose) { #if HAVE_NETCDF4 int status = 0; int id_data = 0; nc_type netCDF_type; double scale_factor = 1.0; double add_offset = 0.0; unsigned char data_uchar = 0; short data_short = -999; int data_int = -999; float data_float = -999.0; double data_double = -999.0; size_t index4D[4] = {0, 0, 0, 0}; /* time, lev, lat, lon */ int lc = 0; /* get variable id for data */ status = nc_inq_varid (ncid, variable_name, &id_data); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', while getting id for '%s' (line %d, function '%s' in '%s')\n", nc_strerror (status), variable_name, __LINE__, __func__, __FILE__); return status; } /* get type of the variable */ status = nc_inq_vartype (ncid, id_data, &(netCDF_type)); if (status != NC_NOERR) { fprintf (stderr, "Error '%s', while getting type for %s (line %d, function '%s' in '%s')\n", nc_strerror (status), variable_name, __LINE__, __func__, __FILE__); return status; } if (verbose) fprintf (stderr, " reading '%s' type %d (NC_BYTE %d, NC_CHAR %d, NC_SHORT %d, NC_INT %d, NC_FLOAT %d, NC_DOUBLE %d)\n", variable_name, netCDF_type, NC_BYTE, NC_CHAR, NC_SHORT, NC_INT, NC_FLOAT, NC_DOUBLE); /* read attribute scale_factor */ status = nc_get_att_double (ncid, id_data, "scale_factor", &scale_factor); if (status != NC_NOERR) { /* no given scale_factor -> default scale_factor = 1.0 */ /* that's OK, put status to 0 */ status = 0; } else { /* if (verbose) */ /* fprintf (stderr," scale_factor = %f \n", scale_factor); */ } /* read attribute add_offset */ status = nc_get_att_double (ncid, id_data, "add_offset", &add_offset); if (status != NC_NOERR) { /* no given offset -> default offset = 0.0 */ /* that's OK, put status to 0 */ status = 0; } else { /* if (verbose) */ /* fprintf (stderr," add_offset = %f \n", add_offset); */ } /* allocate temporary data array */ if (((*data) = calloc (nlev, sizeof (float))) == NULL) { fprintf (stderr, "Error allocating memory for 'data' \n (line %d, function '%s' in '%s') \n", __LINE__, __func__, __FILE__); return -1; } /* read temperature (in netCDF = float *T(time, mlev, lat, lon);)*/ index4D[0] = itime; /* index4D[1] see loops */ index4D[2] = ilat; index4D[3] = ilon; switch (netCDF_type) { case (NC_BYTE): case (NC_CHAR): for (lc = 0; lc < nlev; lc++) { index4D[1] = lc; status = nc_get_var1_uchar (ncid, id_data, index4D, &(data_uchar)); (*data)[lc] = (float)(((double)data_uchar) * scale_factor + add_offset); /* fprintf (stderr," lc = %3d, %s = %e \n", lc, variable_name, (*data)[lc]); */ } if (status != NC_NOERR) { fprintf (stderr, "Error '%s', reading %s (uchar) \n (line %d, function '%s' in '%s')\n", nc_strerror (status), variable_name, __LINE__, __func__, __FILE__); return status; } break; case (NC_SHORT): for (lc = 0; lc < nlev; lc++) { index4D[1] = lc; status = nc_get_var1_short (ncid, id_data, index4D, &(data_short)); (*data)[lc] = (float)(((double)data_short) * scale_factor + add_offset); if (fabs ((*data)[lc]) < scale_factor * 10E-6 && scale_factor != 1.0) /* THIS IS REALLY NOT NICE !!!! */ (*data)[lc] = 0.0; /* THIS IS REALLY NOT NICE !!!! */ /* fprintf (stderr," lc = %3d, %s = %e \n", lc, variable_name, (*data)[lc]); */ } if (status != NC_NOERR) { fprintf (stderr, "Error '%s', reading %s (short) \n (line %d, function '%s' in '%s')\n", nc_strerror (status), variable_name, __LINE__, __func__, __FILE__); return status; } break; case (NC_INT): for (lc = 0; lc < nlev; lc++) { index4D[1] = lc; status = nc_get_var1_int (ncid, id_data, index4D, &(data_int)); (*data)[lc] = (float)(((double)data_int) * scale_factor + add_offset); /* fprintf (stderr," lc = %3d, %s = %e \n", lc, variable_name, (*data)[lc]); */ } if (status != NC_NOERR) { fprintf (stderr, "Error '%s', reading %s (integer) \n (line %d, function '%s' in '%s')\n", nc_strerror (status), variable_name, __LINE__, __func__, __FILE__); return status; } break; case (NC_FLOAT): for (lc = 0; lc < nlev; lc++) { index4D[1] = lc; status = nc_get_var1_float (ncid, id_data, index4D, &(data_float)); (*data)[lc] = (float)(((double)data_float) * scale_factor + add_offset); /* if (status != NC_NOERR) fprintf (stderr,"Error '%s' reading '%s', index lc = %3d \n", nc_strerror(status), variable_name, lc); */ } if (status != NC_NOERR) { fprintf (stderr, "Error '%s', reading %s (float) \n (line %d, function '%s' in '%s')\n", nc_strerror (status), variable_name, __LINE__, __func__, __FILE__); return status; } break; case (NC_DOUBLE): for (lc = 0; lc < nlev; lc++) { index4D[1] = lc; status = nc_get_var1_double (ncid, id_data, index4D, &(data_double)); (*data)[lc] = (float)((data_double)*scale_factor + add_offset); /* fprintf (stderr," lc = %3d, %s = %e \n", lc, variable_name, (*data)[lc]); */ } if (status != NC_NOERR) { fprintf (stderr, "Error '%s', reading %s (double)\n (line %d, function '%s' in '%s')\n", nc_strerror (status), variable_name, __LINE__, __func__, __FILE__); return status; } break; default: fprintf (stderr, "Error, unknown type of variable %d \n (line %d, function '%s' in '%s') \n", netCDF_type, __LINE__, __func__, __FILE__); return -1; } return status; #else fprintf (stderr, " ******************************************************************\n"); fprintf (stderr, " * You have built uvspec without libnetcdf and hence cannot *\n"); fprintf (stderr, " * use any netCDF option. Please get netcdf and rebuild. *\n"); fprintf (stderr, " ******************************************************************\n"); return -1; #endif } /***********************************************************************************/ /* Function: calculate_z_from_p_and_T */ /* Description: */ /* calculates a z-grid using the hydrostatic equation */ /* 2 possible equation: */ /* first: z_{i-1} - z_{i} = R / g *0.5* bar{T} * log(p_{i}/p_{i-1} */ /* second: (longer derivation takes */ /* */ /* Parameters: */ /* p pressure in hPa (input) */ /* T temperature in K (input) */ /* z height above sea level in km (output) */ /* n number of height levels */ /* altitude altitude of the ground in km (input) */ /* */ /* Return value: */ /* int status == 0, if everthing is OK */ /* < 0, if there was an error */ /* Example: */ /* Files: ancillary.c */ /* Known bugs: - */ /* Author: */ /* xx 200? U. Hamann Created */ /* */ /***********************************************************************************/ int calculate_z_from_p_and_T (float* p, float* T, float** z, int n, float altitude, int verbose) { int status = 0; int lc = NOT_DEFINED_INTEGER; //20120816ak g is not used, commented // float g = NOT_DEFINED_FLOAT; /* Allocate z */ if (((*z) = (float*)calloc (n, sizeof (float))) == NULL) { fprintf (stderr, "Error, allocating memory for z\n"); fprintf (stderr, " (line %d, function '%s' in '%s')\n", __LINE__, __func__, __FILE__); return -2; } if (verbose) { fprintf (stderr, " ... converting p and T to a z-grid\n"); fprintf (stderr, " assuming: R = %12.6f, g(z=0km) = %12.6f\n", R_AIR, G_SURFACE); } /* initialise lowest level */ if (altitude == 0.0) { (*z)[n - 1] = 0.0; //20120816ak g is not used, commented // g = G_SURFACE; } else { (*z)[n - 1] = altitude; //20120816ak g is not used, commented // g = G_SURFACE * (R_EARTH * R_EARTH) / ((R_EARTH+(*z)[n-1]*1000.)*(R_EARTH+(*z)[n-1]*1000.)); } /* calculate radiosonde z-grid (hydrostatic equation) */ for (lc = n - 1; lc >= 1; lc--) { /* equation (1) for no variation of g inside one layer */ /* (*z)[lc-1]=(*z)[lc] + 0.001 * R/g*0.5*(T[lc]+T[lc-1])*log(p[lc]/p[lc-1]); */ /* /\* 0.001 <==> m -> km *\/ */ /* g = G_SURFACE * (R_EARTH * R_EARTH) / ((R_EARTH+1000.0*(*z)[lc-1])*(R_EARTH+1000.0*(*z)[lc-1])); */ /* equation (2) taking care of variation of g inside one layer */ (*z)[lc - 1] = -R_EARTH + 1 / (1 / (R_EARTH + 1000.0 * (*z)[lc]) - R_AIR / (G_SURFACE * R_EARTH * R_EARTH) * 0.5 * (T[lc] + T[lc - 1]) * log (p[lc] / p[lc - 1])); (*z)[lc - 1] *= 0.001; /* m -> km */ /* /\* addiotional verbose output *\/ */ /* fprintf (stderr," lc-1=%3d, dz(g=const)[km]=%7.4f, dz(g=g(z))[km]=%7.4f, z_sur[km]=%8.4f, T=%5.2f, p=%10.4f \n", */ /* lc-1, 0.001 * R_AIR/g*0.5*(T[lc]+T[lc-1])*log(p[lc]/p[lc-1]), */ /* (*z)[lc-1]-(*z)[lc],(*z)[lc-1],T[lc-1], p[lc-1]); */ } return status; } /* given month, day, year, returns day of week, eg. Monday = 0 etc. */ /* tested for 1901 to 2099 (seems to work from 1800 on too) */ int weekday (int year, int month, int day) { int ix = NOT_DEFINED_INTEGER, tx = NOT_DEFINED_INTEGER, vx = NOT_DEFINED_INTEGER; switch (month) { case 2: case 6: vx = 0; break; case 8: vx = 4; break; case 10: vx = 8; break; case 9: case 12: vx = 12; break; case 3: case 11: vx = 16; break; case 1: case 5: vx = 20; break; case 4: case 7: vx = 24; break; default: fprintf (stderr, "Error, determening day of week in function weekday (in ancillary.c) (month = %d)\n", month); return -1000; break; } if (year > 1900) /* 1900 was not a leap year */ year -= 1900; ix = ((year - 21) % 28) + vx + (month > 2); /* take care of February */ tx = (ix + (ix / 4)) % 7 + day; /* take care of leap year */ return ((tx + 1) % 7); } char* strtrim (char* str, const char* trim) { return strltrim (strrtrim (str, trim), trim); } char* strrtrim (char* str, const char* trim) { char* end; if (!str) return NULL; if (!trim) trim = " \t\n\r"; end = str + strlen (str); while (end-- > str) { if (!strchr (trim, *end)) return str; *end = 0; } return str; } char* strltrim (char* str, const char* trim) { if (!str) return NULL; if (!trim) trim = " \t\r\n"; while (*str) { if (!strchr (trim, *str)) return str; ++str; } return str; } /***********************************************************************************/ /* Function: specific_heat_capacity_moist_air */ /* Description: */ /* calculated the specific heat capacity of dry air and water vapour and */ /* returns a mass weighted average according to the specific humidity */ /* */ /* Parameters: */ /* float T_in_K temperature in Kelvin */ /* float dens_air number density of air moleculs in cm-3 */ /* float dens_wv number density of water vapour moleculs in cm-3 */ /* float mol_mass_air weight of one 'air' molecul in u */ /* float mol_mass_wv weight of one 'water vapour' molecul in u */ /* float *c_p returned result */ /* specific heat capacity in J/(kg K) */ /* Return value: */ /* int status == 0, if everthing is OK */ /* < 0, if there was an error */ /* Example: */ /* Files: ancillary.c */ /* Known bugs: - */ /* Author: */ /* Feb 2008 U. Hamann Created */ /* */ /***********************************************************************************/ int specific_heat_capacity_moist_air (float T_in_K, float dens_air, float dens_wv, float* c_p, int quiet) { int status = 0; float mmr_h2o = NOT_DEFINED_FLOAT; float c_p_dry_air = NOT_DEFINED_FLOAT; float c_p_wv = NOT_DEFINED_FLOAT; char function_name[] = "specific_heat_capacity_moist_air"; char file_name[] = "ancillary.c"; mmr_h2o = (dens_wv * MOL_MASS_WV) / (dens_air * MOL_MASS_AIR); /* temperature dependent heat capacity of dry air */ c_p_dry_air = specific_heat_capacity_dry_air (T_in_K, quiet); if (c_p_dry_air < 0.0) { fprintf (stderr, "Error during specific_heat_capacity_dry_air in %s (%s)\n", function_name, file_name); return -1; } /* temperature dependent heat capacity of water vapour */ c_p_wv = specific_heat_capacity_water_vapour (T_in_K, quiet); if (c_p_wv < 0.0) { fprintf (stderr, "Error during specific_heat_capacity_water_vapour in %s (%s)\n", function_name, file_name); return -1; } /* mass weighted mean of specific heat capacity (moist air), e.g. Etling page 34 */ *c_p = (1 - mmr_h2o) * c_p_dry_air + mmr_h2o * c_p_wv; return status; } /***********************************************************************************************/ /* function: specific_heat_capacity_dry_air */ /* interpolate a table to get the temperature dependent specific heat capacity of dry air */ /* input float T in K */ /* output float specific heat capacity of dry air in J/(kg K) */ /* ulrich hamann 2007-03-31 */ /***********************************************************************************************/ float specific_heat_capacity_dry_air (float T_in_K, int quiet) { const int n = 14; int i = NOT_DEFINED_INTEGER; float c_p_dry_air = NOT_DEFINED_FLOAT; float c_p_grid[14] = {1002.3, 1002.5, 1002.7, 1003.1, 1003.8, 1004.9, 1006.3, 1008.2, 1010.6, 1013.5, 1020.6, 1029.5, 1039.8, 1051.1}; float T_grid[14] = {175.0, 200.0, 225.0, 250.0, 275.0, 300.0, 325.0, 350.0, 375.0, 400.0, 450.0, 500.0, 550.0, 600.0}; if (T_in_K < T_grid[0]) { if (!quiet) { fprintf (stderr, " *** Warning, getting specific heat capacity of dry air,\n"); fprintf (stderr, " temperature %7.2f is below of the tabled temperature region.\n", T_in_K); } return c_p_grid[0]; } else if (T_in_K > T_grid[n - 1]) { if (!quiet) { fprintf (stderr, " *** Warning, getting specific heat capacity of dry air,\n"); fprintf (stderr, " temperature %7.2f is above of the tabled temperature region.\n", T_in_K); } return c_p_grid[n - 1]; } else { for (i = 1; i < n; i++) { if (T_grid[i - 1] <= T_in_K && T_in_K <= T_grid[i]) { c_p_dry_air = c_p_grid[i - 1] + (c_p_grid[i] - c_p_grid[i - 1]) / (T_grid[i] - T_grid[i - 1]) * (T_in_K - T_grid[i - 1]); /* fprintf (stderr,"T_(i-1)=%5.2f, T=%5.2f, T_(i)=%5.2f, c_(i-1)=%6.2f, c=%6.2f, c_(i)=%6.2f (dry air) \n", */ /* T_grid[i-1],T_in_K,T_grid[i],c_p_grid[i-1],c_p_dry_air,c_p_grid[i]); */ break; } } if (c_p_dry_air < 0.0) { fprintf (stderr, "Error, determening specific heat of dry air, T = %6.2f K (in ancillary.c)\n", T_in_K); return -1.0; } return c_p_dry_air; } } /***********************************************************************************************/ /* function: specific_heat_capacity_water_vapour */ /* interpolate a table to get the temperature dependent specific heat capacity of water vapour */ /* input float T in K */ /* output float specific heat capacity of water vapour in J/(kg K) */ /* ulrich hamann: 2007-03-31 */ /***********************************************************************************************/ float specific_heat_capacity_water_vapour (float T_in_K, int quiet) { const int n = 14; int i = NOT_DEFINED_INTEGER; float c_p_wv = NOT_DEFINED_FLOAT; float c_p_wv_grid[14] = {1850.0, 1851.0, 1852.0, 1855.0, 1859.0, 1864.0, 1871.0, 1880.0, 1890.0, 1901.0, 1926.0, 1954.0, 1984.0, 2015.0}; float T_grid[14] = {175.0, 200.0, 225.0, 250.0, 275.0, 300.0, 325.0, 350.0, 375.0, 400.0, 450.0, 500.0, 550.0, 600.0}; if (T_in_K < T_grid[0]) { if (!quiet) { fprintf (stderr, " *** Warning, getting specific heat capacity of water vapour,\n"); fprintf (stderr, " temperature %7.2f is below of the tabled temperature region.\n", T_in_K); } return c_p_wv_grid[0]; } else if (T_in_K > T_grid[n - 1]) { if (!quiet) { fprintf (stderr, " *** Warning, getting specific heat capacity of water vapour,\n"); fprintf (stderr, " temperature %7.2f is above of the tabled temperature region.\n", T_in_K); } return c_p_wv_grid[n - 1]; } else { for (i = 1; i < n; i++) { if (T_grid[i - 1] <= T_in_K && T_in_K <= T_grid[i]) { c_p_wv = c_p_wv_grid[i - 1] + (c_p_wv_grid[i] - c_p_wv_grid[i - 1]) / (T_grid[i] - T_grid[i - 1]) * (T_in_K - T_grid[i - 1]); /* fprintf (stderr,"T_(i-1)=%5.2f, T=%5.2f, T_(i)=%5.2f, c_(i-1)=%6.2f, c=%6.2f, c_(i)=%6.2f (water vapour) \n", */ /* T_grid[i-1],T_in_K,T_grid[i],c_p_wv_grid[i-1],c_p_wv,c_p_wv_grid[i]); */ break; } } if (c_p_wv < 0.0) { fprintf (stderr, "Error, determening specific heat of water vapour, T = %6.2f K (in ancillary.c)\n", T_in_K); return -1.0; } return c_p_wv; } } /***********************************************************************************/ /* Function: my_timegm */ /* Description: */ /* converts a tm-struct (date as year, month, day, hour, min, ...) */ /* into time in s since 1.1.1970 */ /* (reverse function to gmtime) */ /* (essentially the same as timegm, which is part of many GNU C libraries, */ /* but not all, and therefor not portable) */ /* */ /* Parameters: */ /* struct tm *tm input date */ /* */ /* Return value: */ /* time_t time */ /* == -1 if there was some error */ /* */ /* Example: */ /* Files: ancillary.c */ /* Known bugs: - */ /* Author: */ /* Oct 2003 Roger Dingledine */ /* Delivered-to the SEUL-project under the Gnu Public Licence */ /* http://archives.seul.org/or/cvs/Oct-2003/msg00123.html */ /* Jan 2007 Ulrich Hamann */ /* implemented into libRadtran */ /* */ /***********************************************************************************/ time_t my_timegm (struct tm* tm) { time_t ret; char* tz; tz = getenv ("TZ"); setenv ("TZ", "", 1); tzset(); ret = mktime (tm); if (tz) setenv ("TZ", tz, 1); else unsetenv ("TZ"); tzset(); return ret; } /* check if two floats are equal - still not optimal, but how to improve? */ /* THIS CODE IS IDENTICALLY IN CLOUD3D.C */ static inline int float_equal (float a, float b) { if (a == 0 || b == 0) { if (fabs (a - b) < MC_EPSILON) return 1; } else { /* relative difference smaller than MC_EPSILON */ if (fabs (a - b) < MC_EPSILON * fabs (a)) return 1; } return 0; } double nm_to_inv_cm (double wavelength_nm) { double wavenumber_inv_cm; wavenumber_inv_cm = 1 / (wavelength_nm * 1E-07); return wavenumber_inv_cm; } double inv_cm_to_nm (double wavenumber_inv_cm) { double wavelength_nm; wavelength_nm = 1E+07 / wavenumber_inv_cm; return wavelength_nm; } double polarizability_anisotropy_N2 (double nu) { double gam = 0; gam = -6.01466e-25 + 2.38557e-14 / (1.86099e+10 - nu * nu); return gam; } double polarizability_anisotropy_O2 (double nu) { double gam; gam = 7.149e-26 + 4.59364e-15 / (4.82716e+9 - nu * nu); return gam; } struct E_rot_N2 { int gJ; int J; double E; }; struct E_rot_O2 { int N; int J; double E; }; struct E_rot_trans_N2 { int J; int Jp; /* J prime */ int gJ; /* Statistical weight */ double E; double delta_E; double cpt; /* Plazcek-Teller coefficient */ }; struct E_rot_trans_O2 { int N; int J; int Np; /* N prime */ int Jp; /* J prime */ double E; double delta_E; double cpt; /* Plazcek-Teller coefficient */ }; int crs_raman_N2 (double lambda, int n_transitions, float temper, double*** crs, int* iv, int verbose) { /* Calculate Raman shifted wavelengths for input wavelength lambda in nm and */ /* temperature temper. n_transitions are considered. Wavelength are returned */ /* in first column of crs and cross section in the second column. Cross */ /* section is weighted by the volume mixing ratio. */ double c = 299792458; /* Speed of light, m/s, from wikipedia */ double h = 6.62606896e-34; /* Planck constant, Js, from wikipedia */ double hc = h * c * 100; /* Converted to J*cm */ double nm_to_cm = 1e-07; /* Convert from nm to cm */ double raman_const = 256 * pow (M_PI, 5) / 27; /* All the numbers below for the Rotational energy states of O2 are */ /* from the report: Accounting for Raman Scattering in DOAS, */ /* J.F. de Haan, SN-OMIE-KNMI-409, Version 1.0, May 18, 2003 */ /* N: Nuclear rotational angular momentum quantum number gJ: statistical weight factor E: Rotational energy of state in cm-1 From Table A-1 */ int N_E = 31; struct E_rot_N2 E_rot[N_E]; E_rot[0].J = 0; E_rot[0].gJ = 6; E_rot[0].E = 0.0000; E_rot[1].J = 1; E_rot[1].gJ = 3; E_rot[1].E = 3.9791; E_rot[2].J = 2; E_rot[2].gJ = 6; E_rot[2].E = 11.9373; E_rot[3].J = 3; E_rot[3].gJ = 3; E_rot[3].E = 23.8741; E_rot[4].J = 4; E_rot[4].gJ = 6; E_rot[4].E = 39.7892; E_rot[5].J = 5; E_rot[5].gJ = 3; E_rot[5].E = 59.6821; E_rot[6].J = 6; E_rot[6].gJ = 6; E_rot[6].E = 83.5521; E_rot[7].J = 7; E_rot[7].gJ = 3; E_rot[7].E = 111.3983; E_rot[8].J = 8; E_rot[8].gJ = 6; E_rot[8].E = 143.2197; E_rot[9].J = 9; E_rot[9].gJ = 3; E_rot[9].E = 179.0154; E_rot[10].J = 10; E_rot[10].gJ = 6; E_rot[10].E = 218.7839; E_rot[11].J = 11; E_rot[11].gJ = 3; E_rot[11].E = 262.5240; E_rot[12].J = 12; E_rot[12].gJ = 6; E_rot[12].E = 310.2341; E_rot[13].J = 13; E_rot[13].gJ = 3; E_rot[13].E = 361.9126; E_rot[14].J = 14; E_rot[14].gJ = 6; E_rot[14].E = 417.5576; E_rot[15].J = 15; E_rot[15].gJ = 3; E_rot[15].E = 477.1673; E_rot[16].J = 16; E_rot[16].gJ = 6; E_rot[16].E = 540.7395; E_rot[17].J = 17; E_rot[17].gJ = 3; E_rot[17].E = 608.2722; E_rot[18].J = 18; E_rot[18].gJ = 6; E_rot[18].E = 679.7628; E_rot[19].J = 19; E_rot[19].gJ = 3; E_rot[19].E = 755.2090; E_rot[20].J = 20; E_rot[20].gJ = 6; E_rot[20].E = 834.6081; E_rot[21].J = 21; E_rot[21].gJ = 3; E_rot[21].E = 917.9574; E_rot[22].J = 22; E_rot[22].gJ = 6; E_rot[22].E = 1005.2540; E_rot[23].J = 23; E_rot[23].gJ = 3; E_rot[23].E = 1096.4948; E_rot[24].J = 24; E_rot[24].gJ = 6; E_rot[24].E = 1191.6766; E_rot[25].J = 25; E_rot[25].gJ = 3; E_rot[25].E = 1290.7963; E_rot[26].J = 26; E_rot[26].gJ = 6; E_rot[26].E = 1393.8503; E_rot[27].J = 27; E_rot[27].gJ = 3; E_rot[27].E = 1500.8350; E_rot[28].J = 28; E_rot[28].gJ = 6; E_rot[28].E = 1611.7467; E_rot[29].J = 29; E_rot[29].gJ = 3; E_rot[29].E = 1726.5816; E_rot[30].J = 30; E_rot[30].gJ = 6; E_rot[30].E = 1845.3358; /* N: Nuclear rotational angular momentum quantum number J: Total angular momentum quantum number E: Rotational energy of state in cm-1 From Table A-2 */ int N_E_trans = 48; struct E_rot_trans_N2 Etr[N_E_trans]; Etr[0].J = 25; Etr[0].Jp = 23; Etr[0].gJ = 3; Etr[0].E = 1290.7963; Etr[0].delta_E = -194.3015; Etr[0].cpt = 0.3601; Etr[1].J = 24; Etr[1].Jp = 22; Etr[1].gJ = 6; Etr[1].E = 1191.6766; Etr[1].delta_E = -186.4226; Etr[1].cpt = 0.3595; Etr[2].J = 23; Etr[2].Jp = 21; Etr[2].gJ = 3; Etr[2].E = 1096.4948; Etr[2].delta_E = -178.5374; Etr[2].cpt = 0.3589; Etr[3].J = 22; Etr[3].Jp = 20; Etr[3].gJ = 6; Etr[3].E = 1005.2540; Etr[3].delta_E = -170.6459; Etr[3].cpt = 0.3581; Etr[4].J = 21; Etr[4].Jp = 19; Etr[4].gJ = 3; Etr[4].E = 917.9574; Etr[4].delta_E = -162.7484; Etr[4].cpt = 0.3573; Etr[5].J = 20; Etr[5].Jp = 18; Etr[5].gJ = 6; Etr[5].E = 834.6081; Etr[5].delta_E = -154.8453; Etr[5].cpt = 0.3565; Etr[6].J = 19; Etr[6].Jp = 17; Etr[6].gJ = 3; Etr[6].E = 755.2090; Etr[6].delta_E = -146.9368; Etr[6].cpt = 0.3555; Etr[7].J = 18; Etr[7].Jp = 16; Etr[7].gJ = 6; Etr[7].E = 679.7628; Etr[7].delta_E = -139.0233; Etr[7].cpt = 0.3544; Etr[8].J = 17; Etr[8].Jp = 15; Etr[8].gJ = 3; Etr[8].E = 608.2722; Etr[8].delta_E = -131.1049; Etr[8].cpt = 0.3532; Etr[9].J = 16; Etr[9].Jp = 14; Etr[9].gJ = 6; Etr[9].E = 540.7395; Etr[9].delta_E = -123.1819; Etr[9].cpt = 0.3519; Etr[10].J = 15; Etr[10].Jp = 13; Etr[10].gJ = 3; Etr[10].E = 477.1673; Etr[10].delta_E = -115.2547; Etr[10].cpt = 0.3504; Etr[11].J = 14; Etr[11].Jp = 12; Etr[11].gJ = 6; Etr[11].E = 417.5576; Etr[11].delta_E = -107.3235; Etr[11].cpt = 0.3487; Etr[12].J = 13; Etr[12].Jp = 11; Etr[12].gJ = 3; Etr[12].E = 361.9126; Etr[12].delta_E = -99.3886; Etr[12].cpt = 0.3467; Etr[13].J = 12; Etr[13].Jp = 10; Etr[13].gJ = 6; Etr[13].E = 310.2341; Etr[13].delta_E = -91.4502; Etr[13].cpt = 0.3443; Etr[14].J = 11; Etr[14].Jp = 9; Etr[14].gJ = 3; Etr[14].E = 262.5240; Etr[14].delta_E = -83.5086; Etr[14].cpt = 0.3416; Etr[15].J = 10; Etr[15].Jp = 8; Etr[15].gJ = 6; Etr[15].E = 218.7839; Etr[15].delta_E = -75.5642; Etr[15].cpt = 0.3383; Etr[16].J = 9; Etr[16].Jp = 7; Etr[16].gJ = 3; Etr[16].E = 179.0154; Etr[16].delta_E = -67.6171; Etr[16].cpt = 0.3344; Etr[17].J = 8; Etr[17].Jp = 6; Etr[17].gJ = 6; Etr[17].E = 143.2197; Etr[17].delta_E = -59.6676; Etr[17].cpt = 0.3294; Etr[18].J = 7; Etr[18].Jp = 5; Etr[18].gJ = 3; Etr[18].E = 111.3983; Etr[18].delta_E = -51.7162; Etr[18].cpt = 0.3231; Etr[19].J = 6; Etr[19].Jp = 4; Etr[19].gJ = 6; Etr[19].E = 83.5521; Etr[19].delta_E = -43.7629; Etr[19].cpt = 0.3147; Etr[20].J = 5; Etr[20].Jp = 3; Etr[20].gJ = 3; Etr[20].E = 59.6821; Etr[20].delta_E = -35.8080; Etr[20].cpt = 0.3030; Etr[21].J = 4; Etr[21].Jp = 2; Etr[21].gJ = 6; Etr[21].E = 39.7892; Etr[21].delta_E = -27.8519; Etr[21].cpt = 0.2857; Etr[22].J = 3; Etr[22].Jp = 1; Etr[22].gJ = 3; Etr[22].E = 23.8741; Etr[22].delta_E = -19.8950; Etr[22].cpt = 0.2571; Etr[23].J = 2; Etr[23].Jp = 0; Etr[23].gJ = 6; Etr[23].E = 11.9373; Etr[23].delta_E = -11.9373; Etr[23].cpt = 0.2000; Etr[24].J = 0; Etr[24].Jp = 2; Etr[24].gJ = 6; Etr[24].E = 0.0000; Etr[24].delta_E = 11.9373; Etr[24].cpt = 1.0000; Etr[25].J = 1; Etr[25].Jp = 3; Etr[25].gJ = 3; Etr[25].E = 3.9791; Etr[25].delta_E = 19.8950; Etr[25].cpt = 0.6000; Etr[26].J = 2; Etr[26].Jp = 4; Etr[26].gJ = 6; Etr[26].E = 11.9373; Etr[26].delta_E = 27.8519; Etr[26].cpt = 0.5143; Etr[27].J = 3; Etr[27].Jp = 5; Etr[27].gJ = 3; Etr[27].E = 23.8741; Etr[27].delta_E = 35.8080; Etr[27].cpt = 0.4762; Etr[28].J = 4; Etr[28].Jp = 6; Etr[28].gJ = 6; Etr[28].E = 39.7892; Etr[28].delta_E = 43.7629; Etr[28].cpt = 0.4545; Etr[29].J = 5; Etr[29].Jp = 7; Etr[29].gJ = 3; Etr[29].E = 59.6821; Etr[29].delta_E = 51.7162; Etr[29].cpt = 0.4406; Etr[30].J = 6; Etr[30].Jp = 8; Etr[30].gJ = 6; Etr[30].E = 83.5521; Etr[30].delta_E = 59.6676; Etr[30].cpt = 0.4308; Etr[31].J = 7; Etr[31].Jp = 9; Etr[31].gJ = 3; Etr[31].E = 111.3983; Etr[31].delta_E = 67.6171; Etr[31].cpt = 0.4235; Etr[32].J = 8; Etr[32].Jp = 10; Etr[32].gJ = 6; Etr[32].E = 143.2197; Etr[32].delta_E = 75.5642; Etr[32].cpt = 0.4180; Etr[33].J = 9; Etr[33].Jp = 11; Etr[33].gJ = 3; Etr[33].E = 179.0154; Etr[33].delta_E = 83.5086; Etr[33].cpt = 0.4135; Etr[34].J = 10; Etr[34].Jp = 12; Etr[34].gJ = 6; Etr[34].E = 218.7839; Etr[34].delta_E = 91.4502; Etr[34].cpt = 0.4099; Etr[35].J = 11; Etr[35].Jp = 13; Etr[35].gJ = 3; Etr[35].E = 262.5240; Etr[35].delta_E = 99.3886; Etr[35].cpt = 0.4070; Etr[36].J = 12; Etr[36].Jp = 14; Etr[36].gJ = 6; Etr[36].E = 310.2341; Etr[36].delta_E = 107.3235; Etr[36].cpt = 0.4044; Etr[37].J = 13; Etr[37].Jp = 15; Etr[37].gJ = 3; Etr[37].E = 361.9126; Etr[37].delta_E = 115.2547; Etr[37].cpt = 0.4023; Etr[38].J = 14; Etr[38].Jp = 16; Etr[38].gJ = 6; Etr[38].E = 417.5576; Etr[38].delta_E = 123.1819; Etr[38].cpt = 0.4004; Etr[39].J = 15; Etr[39].Jp = 17; Etr[39].gJ = 3; Etr[39].E = 477.1673; Etr[39].delta_E = 131.1049; Etr[39].cpt = 0.3988; Etr[40].J = 16; Etr[40].Jp = 18; Etr[40].gJ = 6; Etr[40].E = 540.7395; Etr[40].delta_E = 139.0233; Etr[40].cpt = 0.3974; Etr[41].J = 17; Etr[41].Jp = 19; Etr[41].gJ = 3; Etr[41].E = 608.2722; Etr[41].delta_E = 146.9368; Etr[41].cpt = 0.3961; Etr[42].J = 18; Etr[42].Jp = 20; Etr[42].gJ = 6; Etr[42].E = 679.7628; Etr[42].delta_E = 154.8453; Etr[42].cpt = 0.3950; Etr[43].J = 19; Etr[43].Jp = 21; Etr[43].gJ = 3; Etr[43].E = 755.2090; Etr[43].delta_E = 162.7484; Etr[43].cpt = 0.3940; Etr[44].J = 20; Etr[44].Jp = 22; Etr[44].gJ = 6; Etr[44].E = 834.6081; Etr[44].delta_E = 170.6459; Etr[44].cpt = 0.3931; Etr[45].J = 21; Etr[45].Jp = 23; Etr[45].gJ = 3; Etr[45].E = 917.9574; Etr[45].delta_E = 178.5374; Etr[45].cpt = 0.3922; Etr[46].J = 22; Etr[46].Jp = 24; Etr[46].gJ = 6; Etr[46].E = 1005.2540; Etr[46].delta_E = 186.4226; Etr[46].cpt = 0.3915; Etr[47].J = 23; Etr[47].Jp = 25; Etr[47].gJ = 3; Etr[47].E = 1096.4948; Etr[47].delta_E = 194.3015; Etr[47].cpt = 0.3908; double E_J = 0; /* Rotational energy of state J */ double W_J = 0; /* Fraction of molecules in the rotational state J at temperature T */ double b_J = 0; /* Placzek-Teller coefficient */ double lambda_inv_cm = 0, lambda_cm = 0, lambda_shifted_inv_cm = 0, lambda_shifted_nm = 0, lambda_shifted_cm = 0; double delta_nu = 0; double crs_i = 0, crs_j = 0; double fNJ = 0, Z = 0; /* Eq 5 and 10 */ double gam_i = 0, gam_j = 0; double volume_mixing_ratio = 0.7905; double sum = 0; int J = 0; /* Rotational state J */ int g_J = 0; int status = 0; int ivi = 0, is = 0; if (verbose) { fprintf (stderr, "Calculating Raman scattering wavelength shifts and cross sections "); fprintf (stderr, "for N2, wavelength %5.1f nm, number of transitions %d, T=%6.2f.\n", lambda, n_transitions, temper); fprintf (stderr, " %3s %1s %7s %8s %7s %9s %10s %9s %17s %9s %12s %7s %12s %12s %11s %14s %12s\n", "ivi", "J", "E_J", "g_J", "W_J", "b_J", "W_J*b_J", "wvl", "wvl_shift_cm-1", "delta_nu", "wvl_shift_nm", "gam_i", "gam_j", "crs_i", "crs_j", "crs_i*vmr", "crs_j*vmr"); } ivi = *iv; sum = 0; for (is = 0; is < N_E; is++) { g_J = E_rot[is].gJ; J = E_rot[is].J; E_J = E_rot[is].E * hc; sum += g_J * (2 * J + 1) * exp (-E_J / (BOLTZMANN * temper)); } Z = sum; for (is = 0; is < n_transitions; is++) { g_J = Etr[is].gJ; J = Etr[is].J; E_J = Etr[is].E * hc; b_J = Etr[is].cpt; delta_nu = -Etr[is].delta_E; /* Negative delta_E corresponds to a photon with a larger */ /* energy, shorter wavelength than the incident photon */ /* delta_E is the change in rotational energy of the */ /* molecule. The minus puts the photon in the right */ /* wavelength. */ W_J = g_J * (2 * J + 1) * exp (-E_J / (BOLTZMANN * temper)); fNJ = W_J / Z; lambda_inv_cm = nm_to_inv_cm (lambda); lambda_shifted_inv_cm = lambda_inv_cm + delta_nu; lambda_shifted_nm = inv_cm_to_nm (lambda_shifted_inv_cm); lambda_shifted_cm = lambda_shifted_nm * nm_to_cm; lambda_cm = lambda * nm_to_cm; gam_i = polarizability_anisotropy_N2 (lambda_shifted_inv_cm); gam_j = polarizability_anisotropy_N2 (lambda_inv_cm); crs_i = fNJ * gam_i * gam_i * raman_const * b_J / pow (lambda_shifted_cm, 4); crs_j = fNJ * gam_j * gam_j * raman_const * b_J / pow (lambda_cm, 4); (*crs)[ivi][0] = lambda_shifted_nm; (*crs)[ivi][1] = crs_i * volume_mixing_ratio; (*crs)[ivi][2] = crs_j * volume_mixing_ratio; if (verbose) fprintf (stderr, "Raman_crs_N2 %2d %2d %12.6e %2d %12.6e %6.3f %12.6e %10.6f %10.6f %10.4f %10.6f %12.6e %12.6e %12.6e %12.6e %12.6e " "%12.6e\n", ivi, J, E_J, g_J, W_J, b_J, W_J * b_J, lambda, lambda_shifted_inv_cm, delta_nu, lambda_shifted_nm, gam_i, gam_j, crs_i, crs_j, crs_i * volume_mixing_ratio, crs_j * volume_mixing_ratio); ivi++; } *iv = ivi; return status; } int crs_raman_O2 (double lambda, int n_transitions, float temper, double*** crs, int* iv, int verbose) { /* Calculate Raman shifted wavelengths for input wavelength lambda in nm and */ /* temperature temper. n_transitions are considered. Wavelength are returned */ /* in first column of crs and cross section in the second column. Cross */ /* section is weighted by the volume mixing ratio. */ double c = 299792458; /* Speed of light, m/s, from wikipedia */ double h = 6.62606896e-34; /* Planck constant, Js, from wikipedia */ double hc = h * c * 100; /* Converted to J*cm */ double nm_to_cm = 1e-07; /* Convert from nm to cm */ double raman_const = 256 * pow (M_PI, 5) / 27; /* All the numbers below for the Rotational energy states of O2 are */ /* from the report: Accounting for Raman Scattering in DOAS, */ /* J.F. de Haan, SN-OMIE-KNMI-409, Version 1.0, May 18, 2003 */ /* N: Nuclear rotational angular momentum quantum number J: Total angular momentum quantum number E: Rotational energy of state in cm-1 From Table A-1 */ int N_E = 54; struct E_rot_O2 E_rot[N_E]; E_rot[0].N = 1; E_rot[0].J = 0; E_rot[0].E = 0.0000; E_rot[1].N = 1; E_rot[1].J = 2; E_rot[1].E = 2.0843; E_rot[2].N = 1; E_rot[2].J = 1; E_rot[2].E = 3.9611; E_rot[3].N = 3; E_rot[3].J = 2; E_rot[3].E = 16.2529; E_rot[4].N = 3; E_rot[4].J = 4; E_rot[4].E = 16.3876; E_rot[5].N = 3; E_rot[5].J = 3; E_rot[5].E = 18.3372; E_rot[6].N = 5; E_rot[6].J = 4; E_rot[6].E = 42.2001; E_rot[7].N = 5; E_rot[7].J = 6; E_rot[7].E = 42.2240; E_rot[8].N = 5; E_rot[8].J = 5; E_rot[8].E = 44.2117; E_rot[9].N = 7; E_rot[9].J = 8; E_rot[9].E = 79.5646; E_rot[10].N = 7; E_rot[10].J = 6; E_rot[10].E = 79.6070; E_rot[11].N = 7; E_rot[11].J = 7; E_rot[11].E = 81.5805; E_rot[12].N = 9; E_rot[12].J = 10; E_rot[12].E = 128.3978; E_rot[13].N = 9; E_rot[13].J = 8; E_rot[13].E = 128.4921; E_rot[14].N = 9; E_rot[14].J = 9; E_rot[14].E = 130.4376; E_rot[15].N = 11; E_rot[15].J = 12; E_rot[15].E = 188.7135; E_rot[16].N = 11; E_rot[16].J = 10; E_rot[16].E = 188.8532; E_rot[17].N = 11; E_rot[17].J = 11; E_rot[17].E = 190.7749; E_rot[18].N = 13; E_rot[18].J = 14; E_rot[18].E = 260.5011; E_rot[19].N = 13; E_rot[19].J = 12; E_rot[19].E = 260.6826; E_rot[20].N = 13; E_rot[20].J = 13; E_rot[20].E = 262.5829; E_rot[21].N = 15; E_rot[21].J = 16; E_rot[21].E = 343.7484; E_rot[22].N = 15; E_rot[22].J = 14; E_rot[22].E = 343.9697; E_rot[23].N = 15; E_rot[23].J = 15; E_rot[23].E = 345.8500; E_rot[24].N = 17; E_rot[24].J = 18; E_rot[24].E = 438.4418; E_rot[25].N = 17; E_rot[25].J = 16; E_rot[25].E = 438.7015; E_rot[26].N = 17; E_rot[26].J = 17; E_rot[26].E = 440.5620; E_rot[27].N = 19; E_rot[27].J = 20; E_rot[27].E = 544.5658; E_rot[28].N = 19; E_rot[28].J = 18; E_rot[28].E = 544.8628; E_rot[29].N = 19; E_rot[29].J = 19; E_rot[29].E = 546.7050; E_rot[30].N = 21; E_rot[30].J = 22; E_rot[30].E = 662.1030; E_rot[31].N = 21; E_rot[31].J = 20; E_rot[31].E = 662.4368; E_rot[32].N = 21; E_rot[32].J = 21; E_rot[32].E = 664.2610; E_rot[33].N = 23; E_rot[33].J = 24; E_rot[33].E = 791.0344; E_rot[34].N = 23; E_rot[34].J = 22; E_rot[34].E = 791.4045; E_rot[35].N = 23; E_rot[35].J = 23; E_rot[35].E = 793.2100; E_rot[36].N = 25; E_rot[36].J = 26; E_rot[36].E = 931.3390; E_rot[37].N = 25; E_rot[37].J = 24; E_rot[37].E = 931.7450; E_rot[38].N = 25; E_rot[38].J = 25; E_rot[38].E = 933.5330; E_rot[39].N = 27; E_rot[39].J = 28; E_rot[39].E = 1082.9941; E_rot[40].N = 27; E_rot[40].J = 26; E_rot[40].E = 1083.4356; E_rot[41].N = 27; E_rot[41].J = 27; E_rot[41].E = 1085.2060; E_rot[42].N = 29; E_rot[42].J = 30; E_rot[42].E = 1245.9750; E_rot[43].N = 29; E_rot[43].J = 28; E_rot[43].E = 1246.4518; E_rot[44].N = 29; E_rot[44].J = 29; E_rot[44].E = 1248.2040; E_rot[45].N = 31; E_rot[45].J = 32; E_rot[45].E = 1420.2552; E_rot[46].N = 31; E_rot[46].J = 30; E_rot[46].E = 1420.7672; E_rot[47].N = 31; E_rot[47].J = 31; E_rot[47].E = 1422.5020; E_rot[48].N = 33; E_rot[48].J = 34; E_rot[48].E = 1605.8064; E_rot[49].N = 33; E_rot[49].J = 32; E_rot[49].E = 1606.3533; E_rot[50].N = 33; E_rot[50].J = 33; E_rot[50].E = 1608.0710; E_rot[51].N = 35; E_rot[51].J = 36; E_rot[51].E = 1802.5983; E_rot[52].N = 35; E_rot[52].J = 34; E_rot[52].E = 1803.1802; E_rot[53].N = 35; E_rot[53].J = 35; E_rot[53].E = 1804.8810; /* N: Nuclear rotational angular momentum quantum number J: Total angular momentum quantum number E: Rotational energy of state in cm-1 From Table A-3 */ int N_E_trans = 185; struct E_rot_trans_O2 Etr[N_E_trans]; Etr[0].N = 33; Etr[0].J = 32; Etr[0].Np = 31; Etr[0].Jp = 30; Etr[0].E = 1606.3533; Etr[0].delta_E = -185.5861; Etr[0].cpt = 0.3630; Etr[1].N = 33; Etr[1].J = 33; Etr[1].Np = 31; Etr[1].Jp = 31; Etr[1].E = 1608.0710; Etr[1].delta_E = -185.5690; Etr[1].cpt = 0.3630; Etr[2].N = 33; Etr[2].J = 34; Etr[2].Np = 31; Etr[2].Jp = 32; Etr[2].E = 1605.8064; Etr[2].delta_E = -185.5512; Etr[2].cpt = 0.3637; Etr[3].N = 31; Etr[3].J = 30; Etr[3].Np = 29; Etr[3].Jp = 28; Etr[3].E = 1420.7672; Etr[3].delta_E = -174.3154; Etr[3].cpt = 0.3622; Etr[4].N = 31; Etr[4].J = 31; Etr[4].Np = 29; Etr[4].Jp = 29; Etr[4].E = 1422.5020; Etr[4].delta_E = -174.2980; Etr[4].cpt = 0.3622; Etr[5].N = 31; Etr[5].J = 32; Etr[5].Np = 29; Etr[5].Jp = 30; Etr[5].E = 1420.2552; Etr[5].delta_E = -174.2802; Etr[5].cpt = 0.3630; Etr[6].N = 29; Etr[6].J = 28; Etr[6].Np = 27; Etr[6].Jp = 26; Etr[6].E = 1246.4518; Etr[6].delta_E = -163.0162; Etr[6].cpt = 0.3613; Etr[7].N = 29; Etr[7].J = 29; Etr[7].Np = 27; Etr[7].Jp = 27; Etr[7].E = 1248.2040; Etr[7].delta_E = -162.9980; Etr[7].cpt = 0.3613; Etr[8].N = 29; Etr[8].J = 30; Etr[8].Np = 27; Etr[8].Jp = 28; Etr[8].E = 1245.9750; Etr[8].delta_E = -162.9809; Etr[8].cpt = 0.3622; Etr[9].N = 27; Etr[9].J = 26; Etr[9].Np = 25; Etr[9].Jp = 24; Etr[9].E = 1083.4355; Etr[9].delta_E = -151.6906; Etr[9].cpt = 0.3602; Etr[10].N = 27; Etr[10].J = 27; Etr[10].Np = 25; Etr[10].Jp = 25; Etr[10].E = 1085.2061; Etr[10].delta_E = -151.6730; Etr[10].cpt = 0.3602; Etr[11].N = 27; Etr[11].J = 28; Etr[11].Np = 25; Etr[11].Jp = 26; Etr[11].E = 1082.9941; Etr[11].delta_E = -151.6551; Etr[11].cpt = 0.3612; Etr[12].N = 25; Etr[12].J = 24; Etr[12].Np = 23; Etr[12].Jp = 22; Etr[12].E = 931.7450; Etr[12].delta_E = -140.3405; Etr[12].cpt = 0.3589; Etr[13].N = 25; Etr[13].J = 25; Etr[13].Np = 23; Etr[13].Jp = 23; Etr[13].E = 933.5330; Etr[13].delta_E = -140.3230; Etr[13].cpt = 0.3589; Etr[14].N = 25; Etr[14].J = 26; Etr[14].Np = 23; Etr[14].Jp = 24; Etr[14].E = 931.3390; Etr[14].delta_E = -140.3046; Etr[14].cpt = 0.3601; Etr[15].N = 23; Etr[15].J = 22; Etr[15].Np = 21; Etr[15].Jp = 20; Etr[15].E = 791.4045; Etr[15].delta_E = -128.9677; Etr[15].cpt = 0.3574; Etr[16].N = 23; Etr[16].J = 23; Etr[16].Np = 21; Etr[16].Jp = 21; Etr[16].E = 793.2100; Etr[16].delta_E = -128.9490; Etr[16].cpt = 0.3574; Etr[17].N = 23; Etr[17].J = 24; Etr[17].Np = 21; Etr[17].Jp = 22; Etr[17].E = 791.0344; Etr[17].delta_E = -128.9314; Etr[17].cpt = 0.3589; Etr[18].N = 21; Etr[18].J = 20; Etr[18].Np = 19; Etr[18].Jp = 18; Etr[18].E = 662.4368; Etr[18].delta_E = -117.5740; Etr[18].cpt = 0.3556; Etr[19].N = 21; Etr[19].J = 21; Etr[19].Np = 19; Etr[19].Jp = 19; Etr[19].E = 664.2610; Etr[19].delta_E = -117.5560; Etr[19].cpt = 0.3556; Etr[20].N = 21; Etr[20].J = 22; Etr[20].Np = 19; Etr[20].Jp = 20; Etr[20].E = 662.1030; Etr[20].delta_E = -117.5372; Etr[20].cpt = 0.3573; Etr[21].N = 19; Etr[21].J = 19; Etr[21].Np = 17; Etr[21].Jp = 18; Etr[21].E = 546.7050; Etr[21].delta_E = -108.2632; Etr[21].cpt = 0.0021; Etr[22].N = 19; Etr[22].J = 18; Etr[22].Np = 17; Etr[22].Jp = 16; Etr[22].E = 544.8628; Etr[22].delta_E = -106.1613; Etr[22].cpt = 0.3533; Etr[23].N = 19; Etr[23].J = 19; Etr[23].Np = 17; Etr[23].Jp = 17; Etr[23].E = 546.7050; Etr[23].delta_E = -106.1430; Etr[23].cpt = 0.3534; Etr[24].N = 19; Etr[24].J = 20; Etr[24].Np = 17; Etr[24].Jp = 18; Etr[24].E = 544.5658; Etr[24].delta_E = -106.1240; Etr[24].cpt = 0.3555; Etr[25].N = 19; Etr[25].J = 18; Etr[25].Np = 17; Etr[25].Jp = 17; Etr[25].E = 544.8628; Etr[25].delta_E = -104.3008; Etr[25].cpt = 0.0022; Etr[26].N = 17; Etr[26].J = 17; Etr[26].Np = 15; Etr[26].Jp = 16; Etr[26].E = 440.5620; Etr[26].delta_E = -96.8136; Etr[26].cpt = 0.0026; Etr[27].N = 17; Etr[27].J = 16; Etr[27].Np = 15; Etr[27].Jp = 14; Etr[27].E = 438.7015; Etr[27].delta_E = -94.7318; Etr[27].cpt = 0.3505; Etr[28].N = 17; Etr[28].J = 17; Etr[28].Np = 15; Etr[28].Jp = 15; Etr[28].E = 440.5620; Etr[28].delta_E = -94.7120; Etr[28].cpt = 0.3506; Etr[29].N = 17; Etr[29].J = 18; Etr[29].Np = 15; Etr[29].Jp = 16; Etr[29].E = 438.4418; Etr[29].delta_E = -94.6934; Etr[29].cpt = 0.3532; Etr[30].N = 17; Etr[30].J = 16; Etr[30].Np = 15; Etr[30].Jp = 15; Etr[30].E = 438.7015; Etr[30].delta_E = -92.8515; Etr[30].cpt = 0.0028; Etr[31].N = 15; Etr[31].J = 15; Etr[31].Np = 13; Etr[31].Jp = 14; Etr[31].E = 345.8500; Etr[31].delta_E = -85.3489; Etr[31].cpt = 0.0033; Etr[32].N = 15; Etr[32].J = 14; Etr[32].Np = 13; Etr[32].Jp = 12; Etr[32].E = 343.9697; Etr[32].delta_E = -83.2871; Etr[32].cpt = 0.3468; Etr[33].N = 15; Etr[33].J = 15; Etr[33].Np = 13; Etr[33].Jp = 13; Etr[33].E = 345.8500; Etr[33].delta_E = -83.2671; Etr[33].cpt = 0.3471; Etr[34].N = 15; Etr[34].J = 16; Etr[34].Np = 13; Etr[34].Jp = 14; Etr[34].E = 343.7484; Etr[34].delta_E = -83.2473; Etr[34].cpt = 0.3504; Etr[35].N = 15; Etr[35].J = 14; Etr[35].Np = 13; Etr[35].Jp = 13; Etr[35].E = 343.9697; Etr[35].delta_E = -81.3868; Etr[35].cpt = 0.0036; Etr[36].N = 13; Etr[36].J = 13; Etr[36].Np = 11; Etr[36].Jp = 12; Etr[36].E = 262.5829; Etr[36].delta_E = -73.8694; Etr[36].cpt = 0.0044; Etr[37].N = 13; Etr[37].J = 12; Etr[37].Np = 11; Etr[37].Jp = 10; Etr[37].E = 260.6826; Etr[37].delta_E = -71.8294; Etr[37].cpt = 0.3418; Etr[38].N = 13; Etr[38].J = 13; Etr[38].Np = 11; Etr[38].Jp = 11; Etr[38].E = 262.5829; Etr[38].delta_E = -71.8080; Etr[38].cpt = 0.3422; Etr[39].N = 13; Etr[39].J = 14; Etr[39].Np = 11; Etr[39].Jp = 12; Etr[39].E = 260.5011; Etr[39].delta_E = -71.7876; Etr[39].cpt = 0.3467; Etr[40].N = 13; Etr[40].J = 12; Etr[40].Np = 11; Etr[40].Jp = 11; Etr[40].E = 260.6826; Etr[40].delta_E = -69.9077; Etr[40].cpt = 0.0048; Etr[41].N = 11; Etr[41].J = 11; Etr[41].Np = 9; Etr[41].Jp = 10; Etr[41].E = 190.7749; Etr[41].delta_E = -62.3771; Etr[41].cpt = 0.0062; Etr[42].N = 11; Etr[42].J = 10; Etr[42].Np = 9; Etr[42].Jp = 9; Etr[42].E = 188.8532; Etr[42].delta_E = -60.3611; Etr[42].cpt = 0.3348; Etr[43].N = 11; Etr[43].J = 11; Etr[43].Np = 9; Etr[43].Jp = 9; Etr[43].E = 190.7749; Etr[43].delta_E = -60.3373; Etr[43].cpt = 0.3354; Etr[44].N = 11; Etr[44].J = 12; Etr[44].Np = 9; Etr[44].Jp = 10; Etr[44].E = 188.7135; Etr[44].delta_E = -60.3157; Etr[44].cpt = 0.3416; Etr[45].N = 11; Etr[45].J = 10; Etr[45].Np = 9; Etr[45].Jp = 9; Etr[45].E = 188.8532; Etr[45].delta_E = -58.4156; Etr[45].cpt = 0.0068; Etr[46].N = 9; Etr[46].J = 9; Etr[46].Np = 7; Etr[46].Jp = 8; Etr[46].E = 130.4376; Etr[46].delta_E = -50.8730; Etr[46].cpt = 0.0093; Etr[47].N = 9; Etr[47].J = 8; Etr[47].Np = 7; Etr[47].Jp = 6; Etr[47].E = 128.4921; Etr[47].delta_E = -48.8851; Etr[47].cpt = 0.3220; Etr[48].N = 9; Etr[48].J = 9; Etr[48].Np = 7; Etr[48].Jp = 7; Etr[48].E = 130.4376; Etr[48].delta_E = -48.8571; Etr[48].cpt = 0.3251; Etr[49].N = 9; Etr[49].J = 10; Etr[49].Np = 7; Etr[49].Jp = 8; Etr[49].E = 128.3978; Etr[49].delta_E = -48.8332; Etr[49].cpt = 0.3344; Etr[50].N = 9; Etr[50].J = 8; Etr[50].Np = 7; Etr[50].Jp = 7; Etr[50].E = 128.4921; Etr[50].delta_E = -46.9116; Etr[50].cpt = 0.0113; Etr[51].N = 5; Etr[51].J = 4; Etr[51].Np = 1; Etr[51].Jp = 2; Etr[51].E = 42.2001; Etr[51].delta_E = -40.1158; Etr[51].cpt = 0.0011; Etr[52].N = 7; Etr[52].J = 7; Etr[52].Np = 5; Etr[52].Jp = 6; Etr[52].E = 81.5805; Etr[52].delta_E = -39.3565; Etr[52].cpt = 0.0139; Etr[53].N = 7; Etr[53].J = 6; Etr[53].Np = 5; Etr[53].Jp = 4; Etr[53].E = 79.6070; Etr[53].delta_E = -37.4069; Etr[53].cpt = 0.3013; Etr[54].N = 7; Etr[54].J = 7; Etr[54].Np = 5; Etr[54].Jp = 5; Etr[54].E = 81.5805; Etr[54].delta_E = -37.3688; Etr[54].cpt = 0.3077; Etr[55].N = 7; Etr[55].J = 8; Etr[55].Np = 5; Etr[55].Jp = 6; Etr[55].E = 79.5646; Etr[55].delta_E = -37.3406; Etr[55].cpt = 0.3223; Etr[56].N = 7; Etr[56].J = 6; Etr[56].Np = 5; Etr[56].Jp = 5; Etr[56].E = 79.6070; Etr[56].delta_E = -35.3953; Etr[56].cpt = 0.0198; Etr[57].N = 5; Etr[57].J = 5; Etr[57].Np = 3; Etr[57].Jp = 4; Etr[57].E = 44.2117; Etr[57].delta_E = -27.8241; Etr[57].cpt = 0.0261; Etr[58].N = 5; Etr[58].J = 4; Etr[58].Np = 3; Etr[58].Jp = 2; Etr[58].E = 42.2001; Etr[58].delta_E = -25.9472; Etr[58].cpt = 0.2544; Etr[59].N = 5; Etr[59].J = 5; Etr[59].Np = 3; Etr[59].Jp = 3; Etr[59].E = 44.2117; Etr[59].delta_E = -25.8745; Etr[59].cpt = 0.2727; Etr[60].N = 5; Etr[60].J = 6; Etr[60].Np = 3; Etr[60].Jp = 4; Etr[60].E = 42.2240; Etr[60].delta_E = -25.8364; Etr[60].cpt = 0.3020; Etr[61].N = 5; Etr[61].J = 4; Etr[61].Np = 3; Etr[61].Jp = 4; Etr[61].E = 42.2001; Etr[61].delta_E = -25.8125; Etr[61].cpt = 0.0015; Etr[62].N = 5; Etr[62].J = 4; Etr[62].Np = 3; Etr[62].Jp = 3; Etr[62].E = 42.2001; Etr[62].delta_E = -23.8629; Etr[62].cpt = 0.0434; Etr[63].N = 3; Etr[63].J = 2; Etr[63].Np = 1; Etr[63].Jp = 0; Etr[63].E = 16.2529; Etr[63].delta_E = -16.2529; Etr[63].cpt = 0.0923; Etr[64].N = 3; Etr[64].J = 3; Etr[64].Np = 1; Etr[64].Jp = 2; Etr[64].E = 18.3372; Etr[64].delta_E = -16.2529; Etr[64].cpt = 0.0660; Etr[65].N = 3; Etr[65].J = 3; Etr[65].Np = 1; Etr[65].Jp = 1; Etr[65].E = 18.3372; Etr[65].delta_E = -14.3761; Etr[65].cpt = 0.1714; Etr[66].N = 3; Etr[66].J = 4; Etr[66].Np = 1; Etr[66].Jp = 2; Etr[66].E = 16.3876; Etr[66].delta_E = -14.3033; Etr[66].cpt = 0.2571; Etr[67].N = 3; Etr[67].J = 2; Etr[67].Np = 1; Etr[67].Jp = 2; Etr[67].E = 16.2529; Etr[67].delta_E = -14.1686; Etr[67].cpt = 0.0184; Etr[68].N = 3; Etr[68].J = 2; Etr[68].Np = 1; Etr[68].Jp = 1; Etr[68].E = 16.2529; Etr[68].delta_E = -12.2918; Etr[68].cpt = 0.1615; Etr[69].N = 19; Etr[69].J = 19; Etr[69].Np = 19; Etr[69].Jp = 20; Etr[69].E = 546.7050; Etr[69].delta_E = -2.1392; Etr[69].cpt = 0.0020; Etr[70].N = 17; Etr[70].J = 17; Etr[70].Np = 17; Etr[70].Jp = 18; Etr[70].E = 440.5620; Etr[70].delta_E = -2.1202; Etr[70].cpt = 0.0024; Etr[71].N = 15; Etr[71].J = 15; Etr[71].Np = 15; Etr[71].Jp = 16; Etr[71].E = 345.8500; Etr[71].delta_E = -2.1016; Etr[71].cpt = 0.0031; Etr[72].N = 1; Etr[72].J = 2; Etr[72].Np = 1; Etr[72].Jp = 0; Etr[72].E = 2.0843; Etr[72].delta_E = -2.0843; Etr[72].cpt = 0.1077; Etr[73].N = 3; Etr[73].J = 3; Etr[73].Np = 3; Etr[73].Jp = 2; Etr[73].E = 18.3372; Etr[73].delta_E = -2.0843; Etr[73].cpt = 0.0769; Etr[74].N = 13; Etr[74].J = 13; Etr[74].Np = 13; Etr[74].Jp = 14; Etr[74].E = 262.5829; Etr[74].delta_E = -2.0818; Etr[74].cpt = 0.0041; Etr[75].N = 11; Etr[75].J = 11; Etr[75].Np = 11; Etr[75].Jp = 12; Etr[75].E = 190.7749; Etr[75].delta_E = -2.0614; Etr[75].cpt = 0.0057; Etr[76].N = 9; Etr[76].J = 9; Etr[76].Np = 9; Etr[76].Jp = 10; Etr[76].E = 130.4376; Etr[76].delta_E = -2.0398; Etr[76].cpt = 0.0083; Etr[77].N = 7; Etr[77].J = 7; Etr[77].Np = 7; Etr[77].Jp = 8; Etr[77].E = 81.5805; Etr[77].delta_E = -2.0159; Etr[77].cpt = 0.0122; Etr[78].N = 5; Etr[78].J = 5; Etr[78].Np = 5; Etr[78].Jp = 4; Etr[78].E = 44.2117; Etr[78].delta_E = -2.0116; Etr[78].cpt = 0.0284; Etr[79].N = 5; Etr[79].J = 5; Etr[79].Np = 5; Etr[79].Jp = 6; Etr[79].E = 44.2117; Etr[79].delta_E = -1.9877; Etr[79].cpt = 0.0221; Etr[80].N = 7; Etr[80].J = 7; Etr[80].Np = 7; Etr[80].Jp = 6; Etr[80].E = 81.5805; Etr[80].delta_E = -1.9735; Etr[80].cpt = 0.0147; Etr[81].N = 3; Etr[81].J = 3; Etr[81].Np = 3; Etr[81].Jp = 4; Etr[81].E = 18.3372; Etr[81].delta_E = -1.9496; Etr[81].cpt = 0.0513; Etr[82].N = 9; Etr[82].J = 9; Etr[82].Np = 9; Etr[82].Jp = 8; Etr[82].E = 130.4376; Etr[82].delta_E = -1.9455; Etr[82].cpt = 0.0083; Etr[83].N = 11; Etr[83].J = 11; Etr[83].Np = 11; Etr[83].Jp = 10; Etr[83].E = 190.7749; Etr[83].delta_E = -1.9217; Etr[83].cpt = 0.0056; Etr[84].N = 13; Etr[84].J = 13; Etr[84].Np = 13; Etr[84].Jp = 12; Etr[84].E = 262.5829; Etr[84].delta_E = -1.9003; Etr[84].cpt = 0.0041; Etr[85].N = 15; Etr[85].J = 15; Etr[85].Np = 15; Etr[85].Jp = 14; Etr[85].E = 345.8500; Etr[85].delta_E = -1.8803; Etr[85].cpt = 0.0031; Etr[86].N = 1; Etr[86].J = 1; Etr[86].Np = 1; Etr[86].Jp = 2; Etr[86].E = 3.9611; Etr[86].delta_E = -1.8768; Etr[86].cpt = 0.2308; Etr[87].N = 17; Etr[87].J = 17; Etr[87].Np = 17; Etr[87].Jp = 16; Etr[87].E = 440.5620; Etr[87].delta_E = -1.8605; Etr[87].cpt = 0.0024; Etr[88].N = 19; Etr[88].J = 19; Etr[88].Np = 19; Etr[88].Jp = 18; Etr[88].E = 546.7050; Etr[88].delta_E = -1.8422; Etr[88].cpt = 0.0020; Etr[89].N = 3; Etr[89].J = 4; Etr[89].Np = 3; Etr[89].Jp = 2; Etr[89].E = 16.3876; Etr[89].delta_E = -0.1347; Etr[89].cpt = 0.0021; Etr[90].N = 3; Etr[90].J = 2; Etr[90].Np = 3; Etr[90].Jp = 4; Etr[90].E = 16.2529; Etr[90].delta_E = 0.1347; Etr[90].cpt = 0.0038; Etr[91].N = 19; Etr[91].J = 18; Etr[91].Np = 19; Etr[91].Jp = 19; Etr[91].E = 544.8628; Etr[91].delta_E = 1.8422; Etr[91].cpt = 0.0021; Etr[92].N = 17; Etr[92].J = 16; Etr[92].Np = 17; Etr[92].Jp = 17; Etr[92].E = 438.7015; Etr[92].delta_E = 1.8605; Etr[92].cpt = 0.0026; Etr[93].N = 1; Etr[93].J = 2; Etr[93].Np = 1; Etr[93].Jp = 1; Etr[93].E = 2.0843; Etr[93].delta_E = 1.8768; Etr[93].cpt = 0.1385; Etr[94].N = 15; Etr[94].J = 14; Etr[94].Np = 15; Etr[94].Jp = 15; Etr[94].E = 343.9697; Etr[94].delta_E = 1.8803; Etr[94].cpt = 0.0033; Etr[95].N = 13; Etr[95].J = 12; Etr[95].Np = 13; Etr[95].Jp = 13; Etr[95].E = 260.6826; Etr[95].delta_E = 1.9003; Etr[95].cpt = 0.0044; Etr[96].N = 11; Etr[96].J = 10; Etr[96].Np = 11; Etr[96].Jp = 11; Etr[96].E = 188.8532; Etr[96].delta_E = 1.9217; Etr[96].cpt = 0.0062; Etr[97].N = 9; Etr[97].J = 8; Etr[97].Np = 9; Etr[97].Jp = 9; Etr[97].E = 128.4921; Etr[97].delta_E = 1.9455; Etr[97].cpt = 0.0092; Etr[98].N = 3; Etr[98].J = 4; Etr[98].Np = 3; Etr[98].Jp = 3; Etr[98].E = 16.3876; Etr[98].delta_E = 1.9496; Etr[98].cpt = 0.0399; Etr[99].N = 7; Etr[99].J = 6; Etr[99].Np = 7; Etr[99].Jp = 7; Etr[99].E = 79.6070; Etr[99].delta_E = 1.9735; Etr[99].cpt = 0.0170; Etr[100].N = 5; Etr[100].J = 6; Etr[100].Np = 5; Etr[100].Jp = 5; Etr[100].E = 42.2240; Etr[100].delta_E = 1.9877; Etr[100].cpt = 0.0187; Etr[101].N = 5; Etr[101].J = 4; Etr[101].Np = 5; Etr[101].Jp = 5; Etr[101].E = 42.2001; Etr[101].delta_E = 2.0116; Etr[101].cpt = 0.0347; Etr[102].N = 7; Etr[102].J = 8; Etr[102].Np = 7; Etr[102].Jp = 7; Etr[102].E = 79.5646; Etr[102].delta_E = 2.0159; Etr[102].cpt = 0.0108; Etr[103].N = 9; Etr[103].J = 10; Etr[103].Np = 9; Etr[103].Jp = 9; Etr[103].E = 128.3978; Etr[103].delta_E = 2.0398; Etr[103].cpt = 0.0075; Etr[104].N = 11; Etr[104].J = 12; Etr[104].Np = 11; Etr[104].Jp = 11; Etr[104].E = 188.7135; Etr[104].delta_E = 2.0614; Etr[104].cpt = 0.0052; Etr[105].N = 13; Etr[105].J = 14; Etr[105].Np = 13; Etr[105].Jp = 13; Etr[105].E = 260.5011; Etr[105].delta_E = 2.0818; Etr[105].cpt = 0.0038; Etr[106].N = 1; Etr[106].J = 0; Etr[106].Np = 1; Etr[106].Jp = 2; Etr[106].E = 0.0000; Etr[106].delta_E = 2.0843; Etr[106].cpt = 0.5383; Etr[107].N = 3; Etr[107].J = 2; Etr[107].Np = 3; Etr[107].Jp = 3; Etr[107].E = 16.2529; Etr[107].delta_E = 2.0843; Etr[107].cpt = 0.1077; Etr[108].N = 15; Etr[108].J = 16; Etr[108].Np = 15; Etr[108].Jp = 15; Etr[108].E = 343.7484; Etr[108].delta_E = 2.1016; Etr[108].cpt = 0.0029; Etr[109].N = 17; Etr[109].J = 18; Etr[109].Np = 17; Etr[109].Jp = 17; Etr[109].E = 438.4418; Etr[109].delta_E = 2.1202; Etr[109].cpt = 0.0023; Etr[110].N = 19; Etr[110].J = 20; Etr[110].Np = 19; Etr[110].Jp = 19; Etr[110].E = 544.5658; Etr[110].delta_E = 2.1392; Etr[110].cpt = 0.0019; Etr[111].N = 1; Etr[111].J = 1; Etr[111].Np = 3; Etr[111].Jp = 2; Etr[111].E = 3.9611; Etr[111].delta_E = 12.2918; Etr[111].cpt = 0.2692; Etr[112].N = 1; Etr[112].J = 2; Etr[112].Np = 3; Etr[112].Jp = 2; Etr[112].E = 2.0843; Etr[112].delta_E = 14.1686; Etr[112].cpt = 0.0184; Etr[113].N = 1; Etr[113].J = 2; Etr[113].Np = 3; Etr[113].Jp = 4; Etr[113].E = 2.0843; Etr[113].delta_E = 14.3033; Etr[113].cpt = 0.4628; Etr[114].N = 1; Etr[114].J = 1; Etr[114].Np = 3; Etr[114].Jp = 3; Etr[114].E = 3.9611; Etr[114].delta_E = 14.3761; Etr[114].cpt = 0.4000; Etr[115].N = 1; Etr[115].J = 0; Etr[115].Np = 3; Etr[115].Jp = 2; Etr[115].E = 0.0000; Etr[115].delta_E = 16.2529; Etr[115].cpt = 0.4617; Etr[116].N = 1; Etr[116].J = 2; Etr[116].Np = 3; Etr[116].Jp = 3; Etr[116].E = 2.0843; Etr[116].delta_E = 16.2529; Etr[116].cpt = 0.0923; Etr[117].N = 3; Etr[117].J = 3; Etr[117].Np = 5; Etr[117].Jp = 4; Etr[117].E = 18.3372; Etr[117].delta_E = 23.8629; Etr[117].cpt = 0.0558; Etr[118].N = 3; Etr[118].J = 4; Etr[118].Np = 5; Etr[118].Jp = 4; Etr[118].E = 16.3876; Etr[118].delta_E = 25.8125; Etr[118].cpt = 0.0015; Etr[119].N = 3; Etr[119].J = 4; Etr[119].Np = 5; Etr[119].Jp = 6; Etr[119].E = 16.3876; Etr[119].delta_E = 25.8364; Etr[119].cpt = 0.4362; Etr[120].N = 3; Etr[120].J = 3; Etr[120].Np = 5; Etr[120].Jp = 5; Etr[120].E = 18.3372; Etr[120].delta_E = 25.8745; Etr[120].cpt = 0.4286; Etr[121].N = 3; Etr[121].J = 2; Etr[121].Np = 5; Etr[121].Jp = 4; Etr[121].E = 16.2529; Etr[121].delta_E = 25.9472; Etr[121].cpt = 0.4579; Etr[122].N = 3; Etr[122].J = 4; Etr[122].Np = 5; Etr[122].Jp = 5; Etr[122].E = 16.3876; Etr[122].delta_E = 27.8241; Etr[122].cpt = 0.0319; Etr[123].N = 5; Etr[123].J = 5; Etr[123].Np = 7; Etr[123].Jp = 6; Etr[123].E = 44.2117; Etr[123].delta_E = 35.3953; Etr[123].cpt = 0.0234; Etr[124].N = 5; Etr[124].J = 6; Etr[124].Np = 7; Etr[124].Jp = 8; Etr[124].E = 42.2240; Etr[124].delta_E = 37.3406; Etr[124].cpt = 0.4214; Etr[125].N = 5; Etr[125].J = 5; Etr[125].Np = 7; Etr[125].Jp = 7; Etr[125].E = 44.2117; Etr[125].delta_E = 37.3688; Etr[125].cpt = 0.4196; Etr[126].N = 5; Etr[126].J = 4; Etr[126].Np = 7; Etr[126].Jp = 6; Etr[126].E = 42.2001; Etr[126].delta_E = 37.4069; Etr[126].cpt = 0.4352; Etr[127].N = 5; Etr[127].J = 6; Etr[127].Np = 7; Etr[127].Jp = 7; Etr[127].E = 42.2240; Etr[127].delta_E = 39.3565; Etr[127].cpt = 0.0160; Etr[128].N = 1; Etr[128].J = 2; Etr[128].Np = 5; Etr[128].Jp = 4; Etr[128].E = 2.0843; Etr[128].delta_E = 40.1158; Etr[128].cpt = 0.0019; Etr[129].N = 7; Etr[129].J = 7; Etr[129].Np = 9; Etr[129].Jp = 8; Etr[129].E = 81.5805; Etr[129].delta_E = 46.9116; Etr[129].cpt = 0.0128; Etr[130].N = 7; Etr[130].J = 8; Etr[130].Np = 9; Etr[130].Jp = 10; Etr[130].E = 79.5646; Etr[130].delta_E = 48.8332; Etr[130].cpt = 0.4130; Etr[131].N = 7; Etr[131].J = 7; Etr[131].Np = 9; Etr[131].Jp = 9; Etr[131].E = 81.5805; Etr[131].delta_E = 48.8571; Etr[131].cpt = 0.4118; Etr[132].N = 7; Etr[132].J = 6; Etr[132].Np = 9; Etr[132].Jp = 8; Etr[132].E = 79.6070; Etr[132].delta_E = 48.8851; Etr[132].cpt = 0.4210; Etr[133].N = 7; Etr[133].J = 8; Etr[133].Np = 9; Etr[133].Jp = 9; Etr[133].E = 79.5646; Etr[133].delta_E = 50.8730; Etr[133].cpt = 0.0104; Etr[134].N = 9; Etr[134].J = 9; Etr[134].Np = 11; Etr[134].Jp = 10; Etr[134].E = 130.4376; Etr[134].delta_E = 58.4156; Etr[134].cpt = 0.0075; Etr[135].N = 9; Etr[135].J = 10; Etr[135].Np = 11; Etr[135].Jp = 12; Etr[135].E = 128.3978; Etr[135].delta_E = 60.3157; Etr[135].cpt = 0.4067; Etr[136].N = 9; Etr[136].J = 9; Etr[136].Np = 11; Etr[136].Jp = 11; Etr[136].E = 130.4376; Etr[136].delta_E = 60.3373; Etr[136].cpt = 0.4060; Etr[137].N = 9; Etr[137].J = 8; Etr[137].Np = 11; Etr[137].Jp = 10; Etr[137].E = 128.4921; Etr[137].delta_E = 60.3611; Etr[137].cpt = 0.4135; Etr[138].N = 9; Etr[138].J = 10; Etr[138].Np = 11; Etr[138].Jp = 11; Etr[138].E = 128.3978; Etr[138].delta_E = 62.3771; Etr[138].cpt = 0.0068; Etr[139].N = 11; Etr[139].J = 11; Etr[139].Np = 13; Etr[139].Jp = 12; Etr[139].E = 190.7749; Etr[139].delta_E = 69.9077; Etr[139].cpt = 0.0052; Etr[140].N = 11; Etr[140].J = 12; Etr[140].Np = 13; Etr[140].Jp = 14; Etr[140].E = 188.7135; Etr[140].delta_E = 71.7876; Etr[140].cpt = 0.4021; Etr[141].N = 11; Etr[141].J = 11; Etr[141].Np = 13; Etr[141].Jp = 13; Etr[141].E = 190.7749; Etr[141].delta_E = 71.8080; Etr[141].cpt = 0.4017; Etr[142].N = 11; Etr[142].J = 10; Etr[142].Np = 13; Etr[142].Jp = 12; Etr[142].E = 188.8532; Etr[142].delta_E = 71.8294; Etr[142].cpt = 0.4070; Etr[143].N = 11; Etr[143].J = 12; Etr[143].Np = 13; Etr[143].Jp = 13; Etr[143].E = 188.7135; Etr[143].delta_E = 73.8694; Etr[143].cpt = 0.0048; Etr[144].N = 13; Etr[144].J = 13; Etr[144].Np = 15; Etr[144].Jp = 14; Etr[144].E = 262.5829; Etr[144].delta_E = 81.3868; Etr[144].cpt = 0.0038; Etr[145].N = 13; Etr[145].J = 14; Etr[145].Np = 15; Etr[145].Jp = 16; Etr[145].E = 260.5011; Etr[145].delta_E = 83.2473; Etr[145].cpt = 0.3987; Etr[146].N = 13; Etr[146].J = 13; Etr[146].Np = 15; Etr[146].Jp = 15; Etr[146].E = 262.5829; Etr[146].delta_E = 83.2671; Etr[146].cpt = 0.3985; Etr[147].N = 13; Etr[147].J = 12; Etr[147].Np = 15; Etr[147].Jp = 14; Etr[147].E = 260.6826; Etr[147].delta_E = 83.2871; Etr[147].cpt = 0.4023; Etr[148].N = 13; Etr[148].J = 14; Etr[148].Np = 15; Etr[148].Jp = 15; Etr[148].E = 260.5011; Etr[148].delta_E = 85.3489; Etr[148].cpt = 0.0036; Etr[149].N = 15; Etr[149].J = 15; Etr[149].Np = 17; Etr[149].Jp = 16; Etr[149].E = 345.8500; Etr[149].delta_E = 92.8515; Etr[149].cpt = 0.0029; Etr[150].N = 15; Etr[150].J = 16; Etr[150].Np = 17; Etr[150].Jp = 18; Etr[150].E = 343.7484; Etr[150].delta_E = 94.6934; Etr[150].cpt = 0.3961; Etr[151].N = 15; Etr[151].J = 15; Etr[151].Np = 17; Etr[151].Jp = 17; Etr[151].E = 345.8500; Etr[151].delta_E = 94.7120; Etr[151].cpt = 0.3959; Etr[152].N = 15; Etr[152].J = 14; Etr[152].Np = 17; Etr[152].Jp = 16; Etr[152].E = 343.9697; Etr[152].delta_E = 94.7318; Etr[152].cpt = 0.3988; Etr[153].N = 15; Etr[153].J = 16; Etr[153].Np = 17; Etr[153].Jp = 17; Etr[153].E = 343.7484; Etr[153].delta_E = 96.8136; Etr[153].cpt = 0.0028; Etr[154].N = 17; Etr[154].J = 17; Etr[154].Np = 19; Etr[154].Jp = 18; Etr[154].E = 440.5620; Etr[154].delta_E = 104.3008; Etr[154].cpt = 0.0023; Etr[155].N = 17; Etr[155].J = 18; Etr[155].Np = 19; Etr[155].Jp = 20; Etr[155].E = 438.4418; Etr[155].delta_E = 106.1240; Etr[155].cpt = 0.3939; Etr[156].N = 17; Etr[156].J = 17; Etr[156].Np = 19; Etr[156].Jp = 19; Etr[156].E = 440.5620; Etr[156].delta_E = 106.1430; Etr[156].cpt = 0.3938; Etr[157].N = 17; Etr[157].J = 16; Etr[157].Np = 19; Etr[157].Jp = 18; Etr[157].E = 438.7015; Etr[157].delta_E = 106.1613; Etr[157].cpt = 0.3961; Etr[158].N = 17; Etr[158].J = 18; Etr[158].Np = 19; Etr[158].Jp = 19; Etr[158].E = 438.4418; Etr[158].delta_E = 108.2632; Etr[158].cpt = 0.0022; Etr[159].N = 19; Etr[159].J = 19; Etr[159].Np = 21; Etr[159].Jp = 20; Etr[159].E = 546.7050; Etr[159].delta_E = 115.7318; Etr[159].cpt = 0.0019; Etr[160].N = 19; Etr[160].J = 20; Etr[160].Np = 21; Etr[160].Jp = 22; Etr[160].E = 544.5658; Etr[160].delta_E = 117.5372; Etr[160].cpt = 0.3922; Etr[161].N = 19; Etr[161].J = 19; Etr[161].Np = 21; Etr[161].Jp = 21; Etr[161].E = 546.7050; Etr[161].delta_E = 117.5560; Etr[161].cpt = 0.3921; Etr[162].N = 19; Etr[162].J = 18; Etr[162].Np = 21; Etr[162].Jp = 20; Etr[162].E = 544.8628; Etr[162].delta_E = 117.5740; Etr[162].cpt = 0.3940; Etr[163].N = 19; Etr[163].J = 20; Etr[163].Np = 21; Etr[163].Jp = 21; Etr[163].E = 544.5658; Etr[163].delta_E = 119.6952; Etr[163].cpt = 0.0018; Etr[164].N = 21; Etr[164].J = 22; Etr[164].Np = 23; Etr[164].Jp = 24; Etr[164].E = 662.1030; Etr[164].delta_E = 128.9314; Etr[164].cpt = 0.3908; Etr[165].N = 21; Etr[165].J = 21; Etr[165].Np = 23; Etr[165].Jp = 23; Etr[165].E = 664.2610; Etr[165].delta_E = 128.9490; Etr[165].cpt = 0.3907; Etr[166].N = 21; Etr[166].J = 20; Etr[166].Np = 23; Etr[166].Jp = 22; Etr[166].E = 662.4368; Etr[166].delta_E = 128.9677; Etr[166].cpt = 0.3922; Etr[167].N = 23; Etr[167].J = 24; Etr[167].Np = 25; Etr[167].Jp = 26; Etr[167].E = 791.0344; Etr[167].delta_E = 140.3046; Etr[167].cpt = 0.3895; Etr[168].N = 23; Etr[168].J = 23; Etr[168].Np = 25; Etr[168].Jp = 25; Etr[168].E = 793.2100; Etr[168].delta_E = 140.3230; Etr[168].cpt = 0.3895; Etr[169].N = 23; Etr[169].J = 22; Etr[169].Np = 25; Etr[169].Jp = 24; Etr[169].E = 791.4045; Etr[169].delta_E = 140.3405; Etr[169].cpt = 0.3908; Etr[170].N = 25; Etr[170].J = 26; Etr[170].Np = 27; Etr[170].Jp = 28; Etr[170].E = 931.3390; Etr[170].delta_E = 151.6551; Etr[170].cpt = 0.3885; Etr[171].N = 25; Etr[171].J = 25; Etr[171].Np = 27; Etr[171].Jp = 27; Etr[171].E = 933.5330; Etr[171].delta_E = 151.6730; Etr[171].cpt = 0.3885; Etr[172].N = 25; Etr[172].J = 24; Etr[172].Np = 27; Etr[172].Jp = 26; Etr[172].E = 931.7450; Etr[172].delta_E = 151.6906; Etr[172].cpt = 0.3896; Etr[173].N = 27; Etr[173].J = 28; Etr[173].Np = 29; Etr[173].Jp = 30; Etr[173].E = 1082.9941; Etr[173].delta_E = 162.9809; Etr[173].cpt = 0.3876; Etr[174].N = 27; Etr[174].J = 27; Etr[174].Np = 29; Etr[174].Jp = 29; Etr[174].E = 1085.2061; Etr[174].delta_E = 162.9980; Etr[174].cpt = 0.3876; Etr[175].N = 27; Etr[175].J = 26; Etr[175].Np = 29; Etr[175].Jp = 28; Etr[175].E = 1083.4355; Etr[175].delta_E = 163.0162; Etr[175].cpt = 0.3885; Etr[176].N = 29; Etr[176].J = 30; Etr[176].Np = 31; Etr[176].Jp = 32; Etr[176].E = 1245.9750; Etr[176].delta_E = 174.2802; Etr[176].cpt = 0.3868; Etr[177].N = 29; Etr[177].J = 29; Etr[177].Np = 31; Etr[177].Jp = 31; Etr[177].E = 1248.2040; Etr[177].delta_E = 174.2980; Etr[177].cpt = 0.3868; Etr[178].N = 29; Etr[178].J = 28; Etr[178].Np = 31; Etr[178].Jp = 30; Etr[178].E = 1246.4518; Etr[178].delta_E = 174.3154; Etr[178].cpt = 0.3876; Etr[179].N = 31; Etr[179].J = 32; Etr[179].Np = 33; Etr[179].Jp = 34; Etr[179].E = 1420.2552; Etr[179].delta_E = 185.5512; Etr[179].cpt = 0.3861; Etr[180].N = 31; Etr[180].J = 31; Etr[180].Np = 33; Etr[180].Jp = 33; Etr[180].E = 1422.5020; Etr[180].delta_E = 185.5690; Etr[180].cpt = 0.3861; Etr[181].N = 31; Etr[181].J = 30; Etr[181].Np = 33; Etr[181].Jp = 32; Etr[181].E = 1420.7672; Etr[181].delta_E = 185.5861; Etr[181].cpt = 0.3868; Etr[182].N = 33; Etr[182].J = 34; Etr[182].Np = 35; Etr[182].Jp = 36; Etr[182].E = 1605.8064; Etr[182].delta_E = 196.7919; Etr[182].cpt = 0.3855; Etr[183].N = 33; Etr[183].J = 33; Etr[183].Np = 35; Etr[183].Jp = 35; Etr[183].E = 1608.0710; Etr[183].delta_E = 196.8100; Etr[183].cpt = 0.3855; Etr[184].N = 33; Etr[184].J = 32; Etr[184].Np = 35; Etr[184].Jp = 34; Etr[184].E = 1606.3533; Etr[184].delta_E = 196.8269; Etr[184].cpt = 0.3861; double E_J = 0; /* Rotational energy of state J */ double W_J = 0; /* Fraction of molecules in the rotational state J at temperature T */ double b_J = 0; /* Placzek-Teller coefficient */ double lambda_inv_cm = 0, lambda_cm = 0, lambda_shifted_inv_cm = 0, lambda_shifted_nm = 0, lambda_shifted_cm = 0; double delta_nu = 0; double crs_i = 0, crs_j = 0; double fNJ = 0, Z = 0; /* Eq 5 and 10 */ double gam_i = 0, gam_j = 0; double volume_mixing_ratio = 0.2095; double sum = 0; int J = 0; /* Rotational state J */ int g_J = 0; int is = 0; int status = 0; int ivi = 0; if (verbose) { fprintf (stderr, "Calculating Raman scattering wavelength shifts and cross sections "); fprintf (stderr, "for O2, wavelength %5.1f nm, number of transitions %d, T=%6.2f, iv=%d.\n", lambda, n_transitions, temper, *iv); fprintf (stderr, " %3s %1s %7s %8s %7s %9s %10s %9s %17s %9s %12s %7s %12s %12s %11s %14s %12s\n", "ivi", "J", "E_J", "g_J", "W_J", "b_J", "W_J*b_J", "wvl", "wvl_shift_cm-1", "delta_nu", "wvl_shift_nm", "gam_i", "gam_j", "crs_i", "crs_j", "crs_i*vmr", "crs_j*vmr"); } ivi = *iv; sum = 0; for (is = 0; is < N_E; is++) { g_J = 1; /* Only states for odd J are included in the summation */ J = E_rot[is].J; E_J = E_rot[is].E * hc; sum += g_J * (2 * J + 1) * exp (-E_J / (BOLTZMANN * temper)); } Z = sum; /* fprintf(stderr," %13.6e", Z); */ for (is = 0; is < n_transitions; is++) { g_J = 1; /* Only states for odd J are included in the summation */ J = Etr[is].J; E_J = Etr[is].E * hc; W_J = g_J * (2 * J + 1) * exp (-E_J / (BOLTZMANN * temper)); fNJ = W_J / Z; b_J = Etr[is].cpt; delta_nu = -Etr[is].delta_E; /* Negative delta_E corresponds to a photon with a larger */ /* energy, shorter wavelength than the incident photon */ /* delta_E is the change in rotational energy of the */ /* molecule. The minus puts the photon in the right */ /* wavelength. */ lambda_inv_cm = nm_to_inv_cm (lambda); lambda_shifted_inv_cm = lambda_inv_cm + delta_nu; lambda_shifted_nm = inv_cm_to_nm (lambda_shifted_inv_cm); lambda_shifted_cm = lambda_shifted_nm * nm_to_cm; lambda_cm = lambda * nm_to_cm; gam_i = polarizability_anisotropy_O2 (lambda_shifted_inv_cm); gam_j = polarizability_anisotropy_O2 (lambda_inv_cm); crs_i = fNJ * gam_i * gam_i * raman_const * b_J / pow (lambda_shifted_cm, 4); crs_j = fNJ * gam_j * gam_j * raman_const * b_J / pow (lambda_cm, 4); (*crs)[ivi][0] = lambda_shifted_nm; (*crs)[ivi][1] = crs_i * volume_mixing_ratio; (*crs)[ivi][2] = crs_j * volume_mixing_ratio; if (verbose) fprintf (stderr, "Raman_crs_O2 %2d %2d %12.6e %2d %12.6e %6.3f %12.6e %10.6f %10.6f %10.4f %10.6f %12.6e %12.6e %12.6e %12.6e %12.6e " "%12.6e\n", ivi, J, E_J, g_J, W_J, b_J, W_J * b_J, lambda, lambda_shifted_inv_cm, delta_nu, lambda_shifted_nm, gam_i, gam_j, crs_i, crs_j, crs_i * volume_mixing_ratio, crs_j * volume_mixing_ratio); ivi++; } *iv = ivi; return status; } /***********************************************************************************/ /* Function: dewpoint */ /* Description: */ /* Calculates the dewpoint temperature in K */ /* */ /* Parameters: */ /* float press_h2o partial pressure of water vapour in hPa */ /* Return value: */ /* float dewpoint dewpoint temperature in K */ /* */ /* Example: */ /* Files: ancillary.c */ /* Known bugs: - */ /* Author: */ /* Jan 2009 U. Hamann converted from Fortran to C (see wvapour.f) */ /* */ /***********************************************************************************/ float dewpoint (float press_h2o) { float dewpoint = NOT_DEFINED_FLOAT; if (press_h2o != 0.0) { press_h2o = log (press_h2o); dewpoint = 273.15 + (243.5 * press_h2o - 440.8) / (19.48 - press_h2o); } else { dewpoint = 0.0; /* 0 Kelvin */ } return dewpoint; } /***********************************************************************************/ /* Function: Tcon */ /* Description: */ /* THIS FUNCTION RETURNS THE TEMPERATURE TCON (CELSIUS) AT */ /* THE LIFTING CONDENSATION LEVEL, GIVEN THE TEMPERATURE T (CELSIUS) */ /* AND THE DEW POINT D (CELSIUS). */ /* */ /* Parameters: */ /* T - REAL TEMPERATURE (K) */ /* TD - REAL DEWPOINT TEMPERATURE (K) */ /* Return value: */ /* float Tcon temperature at the lifting condensation level (CELSIUS) */ /* */ /* Example: */ /* Files: ancillary.c */ /* Known bugs: - */ /* Author: */ /* May 1982 D. Baker, T. Schlatter original version */ /* Jan 2009 U. Hamann converted from Fortran to C (see wvapour.f) */ /* change Celsius to Kelvin */ /* */ /***********************************************************************************/ float Tcon (float T, float TD) { float S = NOT_DEFINED_FLOAT; float dT = NOT_DEFINED_FLOAT; float Tcon = NOT_DEFINED_FLOAT; /* compute the dew point depression S. */ S = T - TD; /* the approximation below, a third order polynomial in S and T, */ /* is due to herman wobus. the source of data for fitting the */ /* polynomial is unknown. */ dT = S * (1.2185 + 1.278e-3 * (T - 273.15) + S * (-2.19e-3 + 1.173e-5 * S - 5.2e-6 * (T - 273.15))); Tcon = T - dT; return Tcon; } /***********************************************************************************/ /* Function: EPT */ /* Description: */ /* This function returns the equivalent potential temperature EPT */ /* (Celsius) for a parcel of air initially at temperature t (celsius), */ /* dew point Td (celsius) and pressure p (millibars). The formula used */ /* is eq.(43) in Bolton, David, 1980: "the computation of equivalent */ /* potential temperature," Monthly Weather Review, vol. 108, no. 7 */ /* (july), pp. 1046-1053. the maximum error in ept in 0.3c. in most */ /* cases the error is less than 0.1c. */ /* */ /* Parameters: */ /* T - REAL TEMPERATURE (K) */ /* TD - REAL DEWPOINT TEMPERATURE (K) */ /* p - REAL PRESSURE (hPa) */ /* Return value: */ /* float EPT equivalent potential temperature (CELSIUS) */ /* */ /* Example: */ /* Files: ancillary.c */ /* Known bugs: - */ /* Author: */ /* May 1982 T. Schlatter original version */ /* Jan 2009 U. Hamann converted from Fortran to C (see wvapour.f) */ /* replaced function WMR by real calculaton */ /* change Celsius to Kelvin */ /* */ /***********************************************************************************/ float EPT (float T, float TD, float p, float N_AIR, float N_H2O) { float kappa = (C_P_DRY_STD - C_V_DRY_STD) / C_P_DRY_STD; float W = NOT_DEFINED_FLOAT; float TL = NOT_DEFINED_FLOAT; float PT = NOT_DEFINED_FLOAT; float EPT = NOT_DEFINED_FLOAT; /* replaced the wmr (compute the water mixing ratio) function as */ /* it is not valid for all wanted temperatures and pressures !!! */ /* 1000 == kg(water)/kg(dry air) -> g(water)/kg(dry air) */ W = N_H2O * MOL_MASS_WV / (N_AIR * MOL_MASS_AIR) * 1000.; /* compute the temperature (celsius) at the lifting condensation level. */ TL = Tcon (T, TD); PT = T * pow (1000. / p, kappa * (1. - 0.00028 * W)); EPT = PT * exp ((3.376 / TL - 0.00254) * W * (1. + 0.00081 * W)); return EPT; }
{ "alphanum_fraction": 0.4928968355, "avg_line_length": 38.287175553, "ext": "c", "hexsha": "c128ed21b5192897ba52c0c7c4c9738fd2781e5e", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0182f991db6a13e0cacb3bf9f43809e6850593e4", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "AmberCrafter/docker-compose_libRadtran", "max_forks_repo_path": "ubuntu20/projects/libRadtran-2.0.4/src/ancillary.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "0182f991db6a13e0cacb3bf9f43809e6850593e4", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "AmberCrafter/docker-compose_libRadtran", "max_issues_repo_path": "ubuntu20/projects/libRadtran-2.0.4/src/ancillary.c", "max_line_length": 180, "max_stars_count": null, "max_stars_repo_head_hexsha": "0182f991db6a13e0cacb3bf9f43809e6850593e4", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "AmberCrafter/docker-compose_libRadtran", "max_stars_repo_path": "ubuntu20/projects/libRadtran-2.0.4/src/ancillary.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 175964, "size": 598888 }
/*--- Flow*: A Verification Tool for Cyber-Physical Systems. Authors: Xin Chen, Sriram Sankaranarayanan, and Erika Abraham. Email: Xin Chen <chenxin415@gmail.com> if you have questions or comments. The code is released as is under the GNU General Public License (GPL). ---*/ #ifndef MATRIX_H_ #define MATRIX_H_ #include "include.h" #include <gsl/gsl_matrix.h> #include <gsl/gsl_linalg.h> namespace flowstar { class Real; class Interval; class UnivariatePolynomial; class Polynomial; class RowVector; class ColVector; class rMatrix; class iMatrix; class upMatrix; class mpMatrix; class iMatrix2; class HybridSystem; class TaylorModelVec; // The matrix class is implemented based on the data structure of gsl matrix. class Matrix { private: gsl_matrix *data; public: Matrix(); Matrix(const int m, const int n); // Create an m x n matrix, all of the entries are 0. Matrix(const int n); // Create an n x n matrix, all of the entries are 0. Matrix(const Matrix & A); ~Matrix(); double get(const int i, const int j) const; // Get the entry at position [i,j]. void set(const double v, const int i, const int j); // Set A[i,j] = v. int rows() const; int cols() const; void row(RowVector & result, const int i) const; // Return the (i+1)-st row. void sortColumns(); // Sort the columns by size in descending order. int rank() const; void neg(Matrix & result) const; void neg_assign(); void inverse(Matrix & result) const; void inverse_assign(); void transpose(Matrix & result) const; void svd(Matrix & U) const; void QR(Matrix & D); void QRfactor(Matrix & Q); void output(FILE *fp) const; Matrix & operator += (const Matrix & A); Matrix & operator -= (const Matrix & A); Matrix & operator *= (const Matrix & A); Matrix operator + (const Matrix & A) const; Matrix operator - (const Matrix & A) const; Matrix operator * (const Matrix & A) const; Matrix & operator = (const Matrix & A); }; class RowVector { private: Matrix vec; public: RowVector(); RowVector(const int n); RowVector(const RowVector & v); ~RowVector(); void set(const double v, const int pos); double get(const int pos) const; int size() const; void transpose(ColVector & result) const; void neg(RowVector & result) const; void neg_assign(); void dump(FILE *fp) const; double innerProd(const RowVector & v) const; double EuclideanNorm() const; void normalize(); bool operator == (const RowVector & v) const; RowVector & operator += (const RowVector & v); RowVector & operator -= (const RowVector & v); RowVector operator + (const RowVector & v) const; RowVector operator - (const RowVector & v) const; RowVector & operator = (const RowVector & v); friend class ColVector; }; class ColVector { private: Matrix vec; public: ColVector(); ColVector(const int n); ColVector(const ColVector & v); ~ColVector(); void set(const double v, const int pos); double get(const int pos) const; int size() const; void transpose(RowVector & result) const; void neg(ColVector & result) const; void neg_assign(); void mul(ColVector & result, const Matrix & m) const; void mul_assign(const Matrix & m); ColVector & operator += (const ColVector & v); ColVector & operator -= (const ColVector & v); ColVector operator + (const ColVector & v) const; ColVector operator - (const ColVector & v) const; ColVector & operator = (const ColVector & v); friend class RowVector; }; class bMatrix { protected: bool *data; int size1; int size2; public: bMatrix(); bMatrix(const int m, const int n); bMatrix(const bMatrix & B); ~bMatrix(); int rows() const; int cols() const; void output(FILE *fp) const; bMatrix & operator += (const bMatrix & B); bool * operator [] (const int i); bMatrix & operator = (const bMatrix & B); }; class rMatrix { protected: Real *data; int size1; int size2; public: rMatrix(); rMatrix(const int m, const int n); // zero matrix rMatrix(const int n); // identity matrix rMatrix(const rMatrix & rmatrix); ~rMatrix(); int rows() const; int cols() const; void abs(rMatrix & result) const; void transpose(rMatrix & result) const; void add_RNDD(rMatrix & result, const rMatrix & rmatrix) const; void add_assign_RNDD(const rMatrix & rmatrix); void add_RNDU(rMatrix & result, const rMatrix & rmatrix) const; void add_assign_RNDU(const rMatrix & rmatrix); void mul_RNDD(rMatrix & result, const rMatrix & rmatrix) const; void mul_RNDU(rMatrix & result, const rMatrix & rmatrix) const; void output(FILE *fp) const; rMatrix & operator += (const rMatrix & B); rMatrix & operator -= (const rMatrix & B); rMatrix & operator *= (const rMatrix & B); rMatrix operator + (const rMatrix & B) const; rMatrix operator - (const rMatrix & B) const; rMatrix operator * (const rMatrix & B) const; rMatrix & operator *= (const Real & r); rMatrix & operator *= (const double d); rMatrix & operator = (const rMatrix & rmatrix); // should always be the same precision Real * operator [] (const int i); friend void to_iMatrix2(iMatrix2 & result, const rMatrix & lo, const rMatrix & up); friend class iMatrix; friend class iMatrix2; friend class upMatrix; }; // matrix for intervals class iMatrix { protected: Interval *data; int size1; int size2; public: iMatrix(); iMatrix(const int m, const int n); // zero matrix iMatrix(const int n); // identity matrix iMatrix(const iMatrix & A); iMatrix(const rMatrix & A); iMatrix(const iMatrix2 & A); iMatrix(const std::vector<Interval> & box); iMatrix(const std::string & matlab_format); ~iMatrix(); void clear(); void mul(iMatrix & result, iMatrix & A); int rows() const; int cols() const; bool isZero() const; void pow(iMatrix & result, const int order) const; void pow_assign(const int order); double max_norm() const; void max_norm(Real & norm) const; void transpose(iMatrix & result) const; void times_pars(mpMatrix & result) const; void center(); void linearTrans(std::vector<Polynomial> & result, const std::vector<Polynomial> & polyVec) const; void right_scale_assign(const std::vector<Interval> & scalars); void to_iMatrix2(iMatrix2 & A) const; void output(FILE *fp) const; iMatrix & operator += (const iMatrix & A); iMatrix & operator += (const Real & rad); iMatrix & operator -= (const iMatrix & A); iMatrix & operator *= (const iMatrix & A); iMatrix & operator *= (const Interval & I); iMatrix & operator *= (const double c); iMatrix & operator /= (const Interval & I); iMatrix & operator /= (const double c); iMatrix operator + (const iMatrix & A) const; iMatrix operator - (const iMatrix & A) const; iMatrix operator * (const iMatrix & A) const; iMatrix operator * (const Interval & I) const; iMatrix operator * (const double c) const; upMatrix operator * (const upMatrix & upm) const; mpMatrix operator * (const mpMatrix & mpm) const; TaylorModelVec operator * (const TaylorModelVec & tmv) const; Interval * operator [] (const int i); iMatrix & operator = (const iMatrix & A); iMatrix & operator = (const rMatrix & A); iMatrix & operator = (const iMatrix2 & A); iMatrix & operator = (const std::vector<Interval> & box); friend class upMatrix; friend class mpMatrix; friend class rMatrix; friend class iMatrix2; friend class Zonotope; }; class iMatrix2 { public: rMatrix center; rMatrix radius; public: iMatrix2(); iMatrix2(const int m, const int n); // zero matrix iMatrix2(const int n); // identity matrix iMatrix2(const iMatrix2 & A); iMatrix2(const iMatrix & A); iMatrix2(const std::vector<Interval> & box); ~iMatrix2(); int rows() const; int cols() const; void to_iMatrix(iMatrix & A) const; void transpose(iMatrix2 & result) const; void mag(Interval & I, const int i, const int j); void mag(Real & r, const int i, const int j); void add_assign(const Interval & I, const int i, const int j); iMatrix2 & operator += (const iMatrix2 & A); iMatrix2 & operator += (const iMatrix & A); iMatrix2 & operator += (const Real & rad); iMatrix2 & operator *= (const iMatrix2 & A); iMatrix2 & operator *= (const Interval & I); iMatrix2 operator + (const iMatrix2 & A) const; iMatrix2 operator * (const iMatrix2 & A) const; iMatrix operator * (const iMatrix & A) const; upMatrix operator * (const upMatrix & upm) const; TaylorModelVec operator * (const TaylorModelVec & tmv) const; iMatrix2 & operator = (const iMatrix2 & A); void output(FILE *fp) const; friend void to_iMatrix2(iMatrix2 & result, const rMatrix & lo, const rMatrix & up); friend class upMatrix; friend class mpMatrix; friend class rMatrix; friend class iMatrix; friend class Zonotope; friend class Polynomial; friend class HybridSystem; }; // matrix for univariate polynomials class upMatrix { protected: UnivariatePolynomial *data; int size1; int size2; public: upMatrix(); upMatrix(const int m, const int n); upMatrix(const int n); upMatrix(const iMatrix & A); upMatrix(const iMatrix2 & A); upMatrix(const upMatrix & upm); ~upMatrix(); int rows() const; int cols() const; int degree() const; bool isZero() const; void intEval(iMatrix & result, const std::vector<Interval> & val_exp_table) const; // interval evaluation based on the monomial form void intEval(iMatrix & result, const Interval & val) const; // interval evaluation based on the Horner form void intEval(iMatrix2 & result, const std::vector<Interval> & val_exp_table) const; void intEval(iMatrix2 & result, const Interval & val) const; void integral(); void times_x(const int order); void transpose(upMatrix & result) const; void times_pars(mpMatrix & result) const; void ctrunc(iMatrix & rem, const int order, const std::vector<Interval> & val_exp_table); void ctrunc(iMatrix & rem, const int order, const Interval & val); void ctrunc(iMatrix & rem1, iMatrix & rem2, const int order, const std::vector<Interval> & val1_exp_table, const std::vector<Interval> & val2_exp_table); void ctrunc(iMatrix & rem1, iMatrix & rem2, const int order, const Interval & val1, const Interval & val2); void ctrunc(const int order, const std::vector<Interval> & val_exp_table); void ctrunc(const int order, const Interval & val); void nctrunc(const int order); void substitute(upMatrix & result, const std::vector<UnivariatePolynomial> & t_exp_table) const; void substitute(upMatrix & result, const UnivariatePolynomial & t) const; void output(FILE *fp) const; void decompose(upMatrix & positive, upMatrix & negative, iMatrix2 & im2_rem) const; upMatrix & operator += (const upMatrix & upm); upMatrix & operator += (const iMatrix & A); upMatrix & operator += (const Real & rad); upMatrix & operator -= (const upMatrix & upm); upMatrix & operator -= (const iMatrix & A); upMatrix & operator *= (const upMatrix & upm); upMatrix & operator *= (const iMatrix & A); upMatrix & operator *= (const Interval & I); upMatrix operator + (const upMatrix & upm) const; upMatrix operator + (const iMatrix & A) const; upMatrix operator - (const upMatrix & upm) const; upMatrix operator - (const iMatrix & A) const; upMatrix operator * (const upMatrix & upm) const; upMatrix operator * (const iMatrix & A) const; upMatrix operator * (const iMatrix2 & A) const; upMatrix operator * (const Interval & I) const; mpMatrix operator * (const mpMatrix & mpm) const; UnivariatePolynomial * operator [] (const int i); upMatrix & operator = (const upMatrix & upm); friend class iMatrix; friend class iMatrix2; }; // matrix for multivariate polynomials class mpMatrix { protected: Polynomial *data; int size1; int size2; public: mpMatrix(); mpMatrix(const int m, const int n); mpMatrix(const int n); mpMatrix(const mpMatrix & mpm); ~mpMatrix(); int rows() const; int cols() const; void intEval(mpMatrix & result, const std::vector<Interval> & val_exp_table) const; void intEval(iMatrix & result, const std::vector<Interval> & domain) const; void output(FILE *fp, const std::vector<std::string> & varNames) const; mpMatrix & operator += (const mpMatrix & mpm); mpMatrix operator + (const mpMatrix & mpm) const; Polynomial * operator [] (const int i); mpMatrix & operator = (const mpMatrix & mpm); friend class iMatrix; friend class upMatrix; }; class MatrixParseSetting { public: std::string strExpression; iMatrix result; public: MatrixParseSetting(); MatrixParseSetting(const MatrixParseSetting & setting); ~MatrixParseSetting(); MatrixParseSetting & operator = (const MatrixParseSetting & setting); }; void to_iMatrix2(iMatrix2 & result, const rMatrix & lo, const rMatrix & up); extern MatrixParseSetting matrixParseSetting; } void parse_Matrix(); #endif /* MATRIX_H_ */
{ "alphanum_fraction": 0.7051029261, "avg_line_length": 25.2067594433, "ext": "h", "hexsha": "107ce6bbbcf1cb1aa105c514dc35afe939295749", "lang": "C", "max_forks_count": 12, "max_forks_repo_forks_event_max_datetime": "2021-10-05T04:16:44.000Z", "max_forks_repo_forks_event_min_datetime": "2018-02-05T15:13:05.000Z", "max_forks_repo_head_hexsha": "763e5817cca2b69f0e96560835a442434980b3a8", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "souradeep-111/sherlock_2", "max_forks_repo_path": "flowstar-release/Matrix.h", "max_issues_count": 4, "max_issues_repo_head_hexsha": "763e5817cca2b69f0e96560835a442434980b3a8", "max_issues_repo_issues_event_max_datetime": "2021-01-15T14:32:02.000Z", "max_issues_repo_issues_event_min_datetime": "2018-02-09T07:58:44.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "souradeep-111/sherlock_2", "max_issues_repo_path": "flowstar-release/Matrix.h", "max_line_length": 154, "max_stars_count": 34, "max_stars_repo_head_hexsha": "bf34fb4713e5140b893c98382055fb963230d69d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "souradeep-111/sherlock", "max_stars_repo_path": "flowstar-release/Matrix.h", "max_stars_repo_stars_event_max_datetime": "2022-03-08T19:21:00.000Z", "max_stars_repo_stars_event_min_datetime": "2018-02-17T14:18:57.000Z", "num_tokens": 3291, "size": 12679 }
#ifndef basis_dcab07d2_8e6a_4150_b6fb_1eb09f015f76_h #define basis_dcab07d2_8e6a_4150_b6fb_1eb09f015f76_h #include <gslib\error.h> //#include <gslib\string.h> #include <gslib\std.h> #include <rathen\config.h> #include <rathen\buildin.h> __rathen_begin__ class __gs_novtable type abstract { public: virtual ~type() {} virtual const gchar* get_name() const = 0; virtual int get_size() const = 0; virtual type* get_oprret(const oprinfo& opr) = 0; /* * Commutative attribute, if an operator was commutative, then a series of transformation could be done * in the expression optimization, for example, * it would always prefer to put the variables to the left side of the operator, and the constants to the * right side, so that we could generate the code try to put the constants within the instructions. */ virtual bool is_comutative(const oprinfo& opr) const { if (opr.opr == _t("=")) return false; return true; } /* Complex attribute, a complex type always has a construction/destruction operation. */ virtual bool is_complex() const { return false; } }; class __gs_novtable com_type abstract: public type { public: virtual bool is_complex() const { return true; } }; class type_manager { public: typedef std::pair<string, type*> type_pair; typedef unordered_map<string, type*> type_map; typedef type_map::iterator iterator; typedef type_map::const_iterator const_iterator; public: static type_manager* get_singleton_ptr() { static type_manager inst; return &inst; } ~type_manager() { std::for_each(_tpmap.begin(), _tpmap.end(), [](const type_pair& tp) { delete tp.second; }); _tpmap.clear(); } type* find_type(const gchar* name) { return find_type(string(name)); } type* find_type(const gchar* name, int len) { return find_type(string(name, len)); } type* find_type(const string& name) { auto i = _tpmap.find(name); return i == _tpmap.end() ? 0 : i->second; } private: type_map _tpmap; public: template<class _cst> bool regtype() { type* t = gs_new(_cst); if(!_reginner(t)) { gs_del(type, t); return false; } return true; } private: type_manager(); bool _reginner(type* t) { assert(t); if(find_type(t->get_name())) return false; _tpmap.insert(std::make_pair(string(t->get_name()), t)); return true; } }; #define _type_manager type_manager::get_singleton_ptr() class int_type: public type { public: virtual const gchar* get_name() const { return _t("int"); } virtual int get_size() const { return 4; } virtual type* get_oprret(const oprinfo& opr) { return this; } // TODO }; class bool_type: public type { public: virtual const gchar* get_name() const { return _t("bool"); } virtual int get_size() const { return 4; } virtual type* get_oprret(const oprinfo& opr) { return this; } // TODO }; class void_type: public type { public: virtual const gchar* get_name() const { return _t("void"); } virtual int get_size() const { return 0; } virtual type* get_oprret(const oprinfo& opr) { return 0; } }; class string_type: public com_type { public: virtual const gchar* get_name() const { return _t("string"); } virtual int get_size() const { return (int)sizeof(gs::string); } virtual type* get_oprret(const oprinfo& opr) { return this; } // TODO }; class __gs_novtable object abstract { public: enum { tag_data, tag_const, tag_block, tag_node, tag_scope, //tag_ //... }; protected: string _name; public: object() {} virtual ~object() {} virtual void set_name(const gchar* name) { _name = name; } virtual uint get_tag() const = 0; virtual bool is_holder() const = 0; virtual const string& get_name() const { return _name; } }; struct unikey { public: typedef gchar* vckey; typedef const gchar* cckey; protected: cckey _key; public: unikey(): _key(0) {} unikey(const gchar* k): _key(k) {} unikey(const string& k): _key(k.c_str()) {} unikey(const object* obj): _key(obj->get_name().c_str()) {} const gchar* get_key() const { return _key; } }; struct indexing { public: struct hash { size_t operator()(const unikey& k) const { return string_hash(k.get_key()); } }; struct equal { bool operator()(const unikey& k1, const unikey& k2) const { return string_hash(k1.get_key()) == string_hash(k2.get_key()); } }; typedef unordered_map<unikey, object*, hash, equal> unimap; typedef unimap::iterator iterator; typedef unimap::const_iterator const_iterator; protected: unimap _pairs; public: indexing() {} ~indexing() {} bool add_value(object* ptr) { assert(ptr); if(_pairs.insert(std::make_pair(unikey(ptr), ptr)).second) { set_error(_t("udt insert dp failed, maybe an error in name mangling.")); return false; } return true; } object* find_value(const gchar* name) { assert(name); iterator i = _pairs.find(unikey(name)); return i == _pairs.end() ? 0 :i->second; } const object* find_value(const gchar* name) const { assert(name); const_iterator i = _pairs.find(unikey(name)); return i == _pairs.end() ? 0 : i->second; } }; // class __gnvt pass_object: // virtual public object // { // public: // virtual bool is_holder() const { return false; } // }; // // class __gnvt hold_object: // virtual public object // { // public: // virtual bool is_holder() const { return true; } // virtual void add_indexing(object* obj) { _indexing.add_value(obj); } // virtual indexing& get_indexing() { return _indexing; } // // protected: // indexing _indexing; // // public: // object* find_object(const gchar* name) { return _indexing.find_value(name); } // const object* find_object(const gchar* name) const { return _indexing.find_value(name); } // }; // class block: // public object // { // protected: // indexing _indexing; // // public: // virtual void add_indexing(object* obj) { _indexing.add_value(obj); } // virtual indexing& get_indexing() { return _indexing; } // // public: // object* find_object(const gchar* name) { return _indexing.find_value(name); } // const object* find_object(const gchar* name) const { return _indexing.find_value(name); } // }; // // class data: // public object // { // protected: // type* _type; // // public: // virtual tag get_tag() const { return tag_data; } // virtual void set_type(type* t) { _type = t; } // virtual type* get_type() const { return _type; } // }; // // class reference: // public object // { // protected: // string _origin; // // public: // virtual tag get_tag() const { return tag_const; } // virtual void set_origin(const gchar* str, int len) { _origin.assign(str, len); } // virtual const gchar* get_origin() const { return _origin.c_str(); } // // public: // void set_origin(const gchar* str) { set_origin(str, strtool::length(str)); } // }; __rathen_end__ #endif
{ "alphanum_fraction": 0.6018975825, "avg_line_length": 26.7152777778, "ext": "h", "hexsha": "cc998e860bfb91c1dc9adc42c5529815e467f224", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2016-10-19T15:20:58.000Z", "max_forks_repo_forks_event_min_datetime": "2016-10-19T15:20:58.000Z", "max_forks_repo_head_hexsha": "1b165b7a812526c4b2a3179588df9a7c2ff602a6", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "lymastee/gslib", "max_forks_repo_path": "include/rathen/basis.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "1b165b7a812526c4b2a3179588df9a7c2ff602a6", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "lymastee/gslib", "max_issues_repo_path": "include/rathen/basis.h", "max_line_length": 110, "max_stars_count": 9, "max_stars_repo_head_hexsha": "1b165b7a812526c4b2a3179588df9a7c2ff602a6", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "lymastee/gslib", "max_stars_repo_path": "include/rathen/basis.h", "max_stars_repo_stars_event_max_datetime": "2022-02-11T09:44:51.000Z", "max_stars_repo_stars_event_min_datetime": "2016-10-18T09:40:09.000Z", "num_tokens": 1952, "size": 7694 }
#include <gsl/gsl_multifit.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_matrix.h> #include "2d_array.h" #include "const.h" #include "input.h" #include "utilities.h" #include "misc.h" /****************************************************************************** MODULE: greenband_test PURPOSE: Multitemporal cloud, cloud shadow, & snow masks (global version) RETURN VALUE: Type = int ERROR error out due to memory allocation SUCCESS no error encounted HISTORY: Date Programmer Reason -------- --------------- ------------------------------------- 05282019 Su Ye NOTES: ******************************************************************************/ int greenband_test ( int *clrx, float **clry, int start, int end, float rmse, float n_t, int *bl_ids, float *C0, float *C1 ) { char FUNC_NAME[] = "greenband_test"; int i; float **x; float pred; int nums; float coefs[ROBUST_COEFFS]; nums = end - start + 1; /* Allocate memory */ x = (float **)allocate_2d_array(nums, ROBUST_COEFFS - 1, sizeof(float)); if (x == NULL) { RETURN_ERROR("ERROR allocating x memory", FUNC_NAME, ERROR); } for (i = 0; i < nums; i++) { x[i][0] = (float)clrx[i+start]; } /******************************************************************/ /* */ /* Do robust fitting for band 2 */ /* */ /******************************************************************/ auto_robust_fit(x, clry, nums, start, 1, coefs); *C0 = coefs[0]; *C1 = coefs[1]; /******************************************************************/ /* */ /* predict band 2 and band 4 refs, bl_ids value of 0 is clear and */ /* 1 otherwise */ /* */ /******************************************************************/ for (i = 0; i < nums; i++) { pred = coefs[0] + coefs[1] * (float)clrx[i+start]; if (clry[1][i+start]-pred > (n_t * rmse)) { // int testy = clry[1][i+start]; // int testx = clrx[i+start]; bl_ids[i] = 1; } else { bl_ids[i] = 0; } } /* Free allocated memory */ if (free_2d_array ((void **) x) != SUCCESS) { RETURN_ERROR ("Freeing memory: x\n", FUNC_NAME, ERROR); } x = NULL; return (SUCCESS); } /****************************************************************************** MODULE: nirband_test PURPOSE: nirband_test used to filter out shadow RETURN VALUE: Type = int ERROR error out due to memory allocation SUCCESS no error encounted HISTORY: Date Programmer Reason -------- --------------- ------------------------------------- 05282019 Su Ye NOTES: ******************************************************************************/ int nirband_test ( int *clrx, float **clry, int start, int end, float rmse, float n_t, int *bl_ids, float *C0, float *C1 ) { char FUNC_NAME[] = "nirband_test"; int i; float **x; float pred; int nums; float coefs[ROBUST_COEFFS]; nums = end - start + 1; /* Allocate memory */ x = (float **)allocate_2d_array(nums, ROBUST_COEFFS - 1, sizeof(float)); if (x == NULL) { RETURN_ERROR("ERROR allocating x memory", FUNC_NAME, ERROR); } for (i = 0; i < nums; i++) { x[i][0] = (float)clrx[i+start]; } /******************************************************************/ /* */ /* Do robust fitting for band 4 */ /* */ /******************************************************************/ auto_robust_fit(x, clry, nums, start, 3, coefs); *C0 = coefs[0]; *C1 = coefs[1]; /******************************************************************/ /* */ /* predict band 2 and band 4 refs, bl_ids value of 0 is clear and */ /* 1 otherwise */ /* */ /******************************************************************/ for (i = 0; i < nums; i++) { pred = coefs[0] + coefs[1] * (float)clrx[i+start]; if (clry[3][i+start]-pred < -(n_t * rmse)) { bl_ids[i] = 1; } else { bl_ids[i] = 0; } } /* Free allocated memory */ if (free_2d_array ((void **) x) != SUCCESS) { RETURN_ERROR ("Freeing memory: x\n", FUNC_NAME, ERROR); } x = NULL; return (SUCCESS); } /****************************************************************************** MODULE: average_compositing PURPOSE: Running average compositing for pixel-based time series RETURN VALUE: Type = int (SUCCESS, ERROR or FAILURE) HISTORY: Date Programmer Reason -------- --------------- ------------------------------------- 06/02/2019 Su Ye Original Development ******************************************************************************/ int average_compositing ( short int **buf, /* I: pixel-based time series */ int *valid_date_array, /* I: valid date time series */ int valid_date_count, /* I: the number of valid dates */ int lower_ordinal, int upper_ordinal, int i_col, short int **out_compositing /* O: outputted compositing results for four bands */ ) { int i, j; double index_sum[TOTAL_IMAGE_BANDS]; int valid_count_window= 0; for(i = 0; i < TOTAL_IMAGE_BANDS; i++) index_sum[i] = 0; for(i = 0; i < valid_date_count; i++) { if((valid_date_array[i] > lower_ordinal - 1) && (valid_date_array[i] < upper_ordinal + 1)) { for(j = 0; j < TOTAL_IMAGE_BANDS; j++) { index_sum[j] = index_sum[j] + buf[j][i]; } valid_count_window++; } } if(valid_count_window==0) { for(i = 0; i < TOTAL_IMAGE_BANDS; i++) { out_compositing[i][i_col] = -9999; } return SUCCESS; } for(j = 0; j < TOTAL_IMAGE_BANDS; j++) { out_compositing[j][i_col] = (short int)(index_sum[j] / valid_count_window); } } /****************************************************************************** MODULE: average_compositing PURPOSE: Running average compositing for pixel-based time series RETURN VALUE: Type = int (SUCCESS, ERROR or FAILURE) HISTORY: Date Programmer Reason -------- --------------- ------------------------------------- 06/03/2019 Su Ye Original Development ******************************************************************************/ int median_compositing ( short int **buf, /* I: pixel-based time series */ int *valid_date_array, /* I: valid date time series */ int valid_date_count, /* I: the number of valid dates */ int i_col, short int **out_compositing /* O: outputted compositing results for four bands */ ) { char FUNC_NAME[] = "median_compositing"; short int *var; /* pointer for allocation variable memory */ int i, j, m; if (valid_date_count == 1) { for (i = 0; i < TOTAL_IMAGE_BANDS; i++) out_compositing[i][i_col] = buf[i][0]; return SUCCESS; } var = malloc(valid_date_count * sizeof(short int)); if (var == NULL) { RETURN_ERROR ("Allocating var memory", FUNC_NAME, ERROR); } for (i = 0; i < TOTAL_IMAGE_BANDS; i++) { for (j = 0; j < valid_date_count; j++) { var[j] = buf[i][j]; } quick_sort_float(var, 0, valid_date_count-1); // for (j = 0; j < dim2_end; j++) // { // printf("%f\n", var[j]); // } m = (valid_date_count) / 2; if (valid_date_count % 2 == 0) { //printf("%f\n", var[m-1]); //printf("%f\n", var[m]); out_compositing[i][i_col] = (short int)(var[m-1] + var[m]) / 2.0; } else out_compositing[i][i_col] = var[m]; } free(var); return SUCCESS; } /****************************************************************************** MODULE: hot_compositing PURPOSE: Running compositing for pixel-based time series RETURN VALUE: Type = int (SUCCESS, ERROR or FAILURE) HISTORY: Date Programmer Reason -------- --------------- ------------------------------------- 05/02/2019 Su Ye Original Development ******************************************************************************/ int hot_compositing ( short int **buf, /* I: pixel-based time series */ int *valid_date_array, /* I: valid date time series */ int valid_date_count, /* I: the number of valid dates */ int lower_ordinal, int upper_ordinal, int i_col, short int **out_compositing /* O: outputted compositing results for four bands */ ) { int i, j; double wt; double index_sum[TOTAL_IMAGE_BANDS]; for(i = 0; i < TOTAL_IMAGE_BANDS; i++) index_sum[i] = 0; double wt_sum = 0; int valid_count_window = 0; for(i = 0; i < valid_date_count; i++) { if((valid_date_array[i] > lower_ordinal - 1) && (valid_date_array[i] < upper_ordinal + 1)) { wt = (double)1.0/((buf[BLUE_INDEX][i] - 0.5 * buf[RED_INDEX][i]) * (buf[BLUE_INDEX][i] - 0.5 * buf[RED_INDEX][i])); //wt = (float)1/(abs(buf[BLUE_INDEX][i] - 0.5 * buf[RED_INDEX][i])); for(j = 0; j < TOTAL_IMAGE_BANDS; j++) { index_sum[j] = index_sum[j] + buf[j][i] * wt; } wt_sum = wt_sum + wt; valid_count_window++; } } /********************************************/ /* condition 1: zero valid observation */ /********************************************/ if(valid_count_window==0) { for(i = 0; i < TOTAL_IMAGE_BANDS; i++) { out_compositing[i][i_col] = -9999; } return SUCCESS; } /********************************************/ /* condition 2: standard procedures */ /********************************************/ for(j = 0; j < TOTAL_IMAGE_BANDS; j++) { out_compositing[j][i_col] = (short int)(index_sum[j] / wt_sum); //out_compositing[j][i_col] = (short int)valid_count_window; } return SUCCESS; } /****************************************************************************** MODULE: modified_hot_compositing PURPOSE: Running compositing for pixel-based time series by adding shadow consideration RETURN VALUE: Type = int (SUCCESS, ERROR or FAILURE) HISTORY: Date Programmer Reason -------- --------------- ------------------------------------- 06/22/2019 Su Ye Original Development ******************************************************************************/ int modified_hot_compositing ( short int **buf, /* I: pixel-based time series */ int *valid_date_array, /* I: valid date time series */ int valid_date_count, /* I: the number of valid dates */ int lower_ordinal, int upper_ordinal, int i_col, short int **out_compositing /* O: outputted compositing results for four bands */ ) { int i, j; int status; double wt; double wt_shadow; double wt_cloud; double index_sum[TOTAL_IMAGE_BANDS]; for(i = 0; i < TOTAL_IMAGE_BANDS; i++) index_sum[i] = 0; double wt_sum = 0; int valid_count_window = 0; short int** ts_subset; short int* ts_subset_selected_shadow; short int* ts_subset_selected_blue; short int variogram_shadow; short int medium_shadow; char FUNC_NAME[] = "modified_hot_compositing"; ts_subset = (short int**)allocate_2d_array(TOTAL_IMAGE_BANDS, valid_date_count, sizeof(short int)); if(ts_subset == NULL) { RETURN_ERROR ("Allocating ts_subset memory", FUNC_NAME, ERROR); } ts_subset_selected_shadow = (short int*)malloc(valid_date_count*sizeof(short int)); if(ts_subset_selected_shadow == NULL) { RETURN_ERROR ("Allocating ts_subset_selected_shadow memory", FUNC_NAME, ERROR); } ts_subset_selected_blue = (short int*)malloc(valid_date_count*sizeof(short int)); if(ts_subset_selected_blue == NULL) { RETURN_ERROR ("Allocating ts_subset_selected_blue memory", FUNC_NAME, ERROR); } for(i = 0; i < valid_date_count; i++) { if((valid_date_array[i] > lower_ordinal - 1) && (valid_date_array[i] < upper_ordinal + 1)) { for(j = 0; j < TOTAL_IMAGE_BANDS; j++) { ts_subset[j][valid_count_window] = buf[j][i]; if(j == NIR_INDEX) { //ts_subset_selected_shadow[valid_count_window] = (buf[BLUE_INDEX][i] + buf[RED_INDEX][i] + buf[GREEN_INDEX][i])/3; ts_subset_selected_shadow[valid_count_window] = buf[NIR_INDEX][i]; //printf("%i\n", ts_subset_selected[valid_count_window]); } if(j == BLUE_INDEX) { ts_subset_selected_blue[valid_count_window] = buf[BLUE_INDEX][i]; //printf("%i\n", ts_subset_selected[valid_count_window]); } } valid_count_window++; } } /********************************************/ /* condition 1: zero valid observation */ /********************************************/ if(valid_count_window < 3) { for(i = 0; i < TOTAL_IMAGE_BANDS; i++) { out_compositing[i][i_col] = -9999; } return SUCCESS; } //single_mean_rmse(ts_subset_selected, 0, valid_count_window - 1, &rmse, &mean); //quick_sort_shortint(ts_subset_selected, 0, valid_count_window - 1); //m = valid_count_window / 2; //medium_shadow = (ts_subset_selected[m] + ts_subset_selected[m - 1] + ts_subset_selected[m + 1])/3; single_median_variogram(ts_subset_selected_shadow, 0, valid_count_window - 1, &variogram_shadow, &medium_shadow); //single_median_quantile(ts_subset_selected_blue, 0, valid_count_window - 1, &quantile_blue, &medium_blue); //single_median_variogram(ts_subset_selected_hot, 0, valid_count_window - 1, &variogram_hot, &medium_hot); //penalty_slope = - 9.0 / (1000.0 * variogram); // penalty_slope = - 33.0 / (50.0 * variogram); // penalty_intercept = PENALTY_INTERCEPT; // for(i = 0; i < valid_count_window; i++) // { // if (ts_subset_selected[i] - medium < - 1.5 * variogram) // { // ts_subset_id[i] = 1; // } // else if (ts_subset_selected[i] - medium < - 2 * variogram) // { // ts_subset_id[i] = 2; // } // else if (ts_subset_selected[i] - medium < - 3 * variogram) // { // ts_subset_id[i] = 3; // } // else // { // ts_subset_id[i] = 0; // } // } // for(i = 0; i < valid_count_window; i++) // { // if (ts_subset_selected[i] - medium < - 1.5 * variogram) // { // ts_subset_id[i] = 1; // } // else // { // ts_subset_id[i] = 0; // } // } /********************************************/ /* condition 2: standard procedures */ /********************************************/ double ratio; for(i = 0; i < valid_count_window; i++) { wt_cloud = (double) 1.0 / (ts_subset_selected_blue[i] * ts_subset_selected_blue[i]); if (ts_subset_selected_shadow[i] < medium_shadow) { ratio = (double)ts_subset_selected_shadow[i] / medium_shadow; wt_shadow = ratio * ratio * ratio * ratio; } else wt_shadow = 1.0; wt = wt_cloud * wt_shadow; //wt = (float)1/(abs(buf[BLUE_INDEX][i] - 0.5 * buf[RED_INDEX][i])); for(j = 0; j < TOTAL_IMAGE_BANDS; j++) { index_sum[j] = index_sum[j] + ts_subset[j][i] * wt; } wt_sum = wt_sum + wt; } for(j = 0; j < TOTAL_IMAGE_BANDS; j++) { out_compositing[j][i_col] = (short int)(index_sum[j] / wt_sum); //out_compositing[j][i_col] = (short int)valid_count_window; } /* free memory*/ status = free_2d_array(ts_subset); if (status != SUCCESS) { RETURN_ERROR ("Freeing memory: ts_subset\n", FUNC_NAME, FAILURE); } free(ts_subset_selected_shadow); free(ts_subset_selected_blue); return SUCCESS; } /****************************************************************************** MODULE: valid_obs_count PURPOSE: count valid observation for each pixels RETURN VALUE: Type = int (SUCCESS, ERROR or FAILURE) HISTORY: Date Programmer Reason -------- --------------- ------------------------------------- 06/22/2019 Su Ye Original Development ******************************************************************************/ int valid_obs_count ( short int **buf, /* I: pixel-based time series */ int *valid_date_array, /* I: valid date time series */ int valid_date_count, /* I: the number of valid dates */ int lower_ordinal, int upper_ordinal, int i_col, short int **out_compositing /* O: outputted compositing results for four bands */ ) { int index_sum[TOTAL_IMAGE_BANDS]; int i, j, status; for(i = 0; i < TOTAL_IMAGE_BANDS; i++) index_sum[i] = 0; int valid_count_window = 0; short int** ts_subset; short int* ts_subset_selected; char FUNC_NAME[] = "modified_hot_compositing"; ts_subset = (short int**)allocate_2d_array(TOTAL_IMAGE_BANDS, valid_date_count, sizeof(short int)); if(ts_subset == NULL) { RETURN_ERROR ("Allocating ts_subset memory", FUNC_NAME, ERROR); } ts_subset_selected = (short int*)malloc(valid_date_count*sizeof(short int)); if(ts_subset_selected == NULL) { RETURN_ERROR ("Allocating ts_subset_selected memory", FUNC_NAME, ERROR); } for(i = 0; i < valid_date_count; i++) { if((valid_date_array[i] > lower_ordinal - 1) && (valid_date_array[i] < upper_ordinal + 1)) { for(j = 0; j < TOTAL_IMAGE_BANDS; j++) { ts_subset[j][valid_count_window] = buf[j][i]; if(j == NIR_INDEX) { ts_subset_selected[valid_count_window] = buf[NIR_INDEX][i]; //printf("%i\n", ts_subset_selected[valid_count_window]); } } valid_count_window++; } } for(j = 0; j < TOTAL_IMAGE_BANDS; j++) { out_compositing[j][i_col] = (short int)valid_count_window; } status = free_2d_array((void **)ts_subset); if (status != SUCCESS) { RETURN_ERROR ("Freeing memory: ts_subset\n", FUNC_NAME, FAILURE); } free(ts_subset_selected); return SUCCESS; } /****************************************************************************** MODULE: medium_compositing PURPOSE: Running compositing for pixel-based time series by adding shadow consideration RETURN VALUE: Type = int (SUCCESS, ERROR or FAILURE) HISTORY: Date Programmer Reason -------- --------------- ------------------------------------- 06/22/2019 Su Ye Original Development ******************************************************************************/ int medium_compositing ( short int **buf, /* I: pixel-based time series */ int *valid_date_array, /* I: valid date time series */ int valid_date_count, /* I: the number of valid dates */ int lower_ordinal, int upper_ordinal, int i_col, short int **out_compositing /* O: outputted compositing results for four bands */ ) { int i, j, m; int status; double wt; double index_sum[TOTAL_IMAGE_BANDS]; for(i = 0; i < TOTAL_IMAGE_BANDS; i++) index_sum[i] = 0; double wt_sum = 0; int valid_count_window = 0; short int** ts_subset; short int* ts_subset_selected; int* ts_subset_selected_index; char FUNC_NAME[] = "medium_compositing"; ts_subset = (short int*)allocate_2d_array(TOTAL_IMAGE_BANDS, valid_date_count, sizeof(short int)); if(ts_subset == NULL) { RETURN_ERROR ("Allocating ts_subset memory", FUNC_NAME, ERROR); } for(i = 0; i < valid_date_count; i++) { if((valid_date_array[i] > lower_ordinal - 1) && (valid_date_array[i] < upper_ordinal + 1)) { for(j = 0; j < TOTAL_IMAGE_BANDS; j++) { ts_subset[j][valid_count_window] = buf[j][i]; } valid_count_window++; } } ts_subset_selected = (short int*)malloc(valid_count_window*sizeof(short int)); if(ts_subset_selected == NULL) { RETURN_ERROR ("Allocating ts_subset_selected memory", FUNC_NAME, ERROR); } ts_subset_selected_index = (int*)malloc(valid_count_window*sizeof(int)); if(ts_subset_selected == NULL) { RETURN_ERROR ("Allocating ts_subset_selected_index memory", FUNC_NAME, ERROR); } for(i = 0; i < valid_count_window; i++) { ts_subset_selected[i] = ts_subset[NIR_INDEX][i]; ts_subset_selected_index[i] = i; //printf("%i\n", ts_subset_selected[valid_count_window]); } /********************************************/ /* condition 1: zero valid observation */ /********************************************/ if(valid_count_window == 0) { for(i = 0; i < TOTAL_IMAGE_BANDS; i++) { out_compositing[i][i_col] = -9999; } return SUCCESS; } quick_sort_shortint_index(ts_subset_selected, ts_subset_selected_index, 0, valid_count_window - 1); // for (j = 0; j < valid_count_window; j++) // { // printf("%i %i\n", ts_subset_selected[j], ts_subset_selected_index[j]); // } m = valid_count_window / 2; if (valid_count_window % 2 == 0) { //printf("%f\n", var[m-1]); //printf("%f\n", var[m]); for(i = 0; i < TOTAL_IMAGE_BANDS; i++) { out_compositing[i][i_col] = (short int)((ts_subset[i][ts_subset_selected_index[m-1]] + ts_subset[i][ts_subset_selected_index[m]]) / 2); } } else { for(i = 0; i < TOTAL_IMAGE_BANDS; i++) { out_compositing[i][i_col] = (short int)(ts_subset[i][ts_subset_selected_index[m]]); } //printf("%i\n", out_compositing[i][i_col]); } status = free_2d_array((void**)ts_subset); if (status != SUCCESS) { RETURN_ERROR ("Freeing memory: ts_subset\n", FUNC_NAME, FAILURE); } free(ts_subset_selected); return SUCCESS; } /****************************************************************************** MODULE: compositing_scanline PURPOSE: Running compositing for scanline-based time series RETURN VALUE: Type = int (SUCCESS, ERROR or FAILURE) HISTORY: Date Programmer Reason -------- --------------- ------------------------------------- 05/02/2019 Su Ye Original Development ******************************************************************************/ int compositing_scanline ( short int **buf, /* I: scanline-based time series */ int **valid_datearray_scanline, /* I: valid date time series */ int *valid_datecount_scanline, /* I: the number of valid dates */ int lower_ordinal, /* I: lower ordinal date */ int upper_ordinal, /* I: upper_ordinal for temporal range of composition */ int num_samples, /* I: the pixel number in a row */ int num_scenes, /* I: the number of scenes */ short int **out_compositing, /* O: outputted compositing results for four bands */ int method /* I: the compositing method{1 - fitting-weighted; 2 - fitting-normal; 3 - hot; 4 - average} */ ) { int j; int i_col; short int **tmp_buf; /* This is the image bands buffer, valid pixel only*/ char FUNC_NAME[] = "compositing_scanline"; int b_diagnosis = FALSE; Output_t* rec_c; tmp_buf = (short int **) allocate_2d_array (TOTAL_IMAGE_BANDS, num_scenes, sizeof (short int)); if(tmp_buf == NULL) { RETURN_ERROR("ERROR allocating tmp_buf memory", FUNC_NAME, FAILURE); } rec_c = malloc(sizeof(Output_t)); if(rec_c == NULL) { RETURN_ERROR("ERROR allocating rec_c memory", FUNC_NAME, FAILURE); } for(i_col = 0; i_col < num_samples; i_col++) { for(j = 0; j < TOTAL_IMAGE_BANDS; j++) { tmp_buf[j] = buf[j] + i_col * num_scenes; } /*weighted fitting*/ if (1==method) { fitting_compositing(tmp_buf, valid_datearray_scanline[i_col], valid_datecount_scanline[i_col], lower_ordinal, upper_ordinal, i_col, out_compositing, TRUE, TRUE, b_diagnosis, rec_c); } /*normal fitting*/ else if (2==method) { fitting_compositing(tmp_buf, valid_datearray_scanline[i_col], valid_datecount_scanline[i_col], lower_ordinal, upper_ordinal, i_col, out_compositing, TRUE, FALSE, b_diagnosis, rec_c); } else if (3==method) { hot_compositing(tmp_buf, valid_datearray_scanline[i_col], valid_datecount_scanline[i_col], lower_ordinal, upper_ordinal, i_col, out_compositing); } else if (4==method) { average_compositing(tmp_buf, valid_datearray_scanline[i_col], valid_datecount_scanline[i_col], lower_ordinal, upper_ordinal, i_col, out_compositing); } else if (5==method) { fitting_compositing(tmp_buf, valid_datearray_scanline[i_col], valid_datecount_scanline[i_col], lower_ordinal, upper_ordinal, i_col, out_compositing, FALSE, FALSE, b_diagnosis, rec_c); } else if (6==method) { modified_hot_compositing(tmp_buf, valid_datearray_scanline[i_col], valid_datecount_scanline[i_col], lower_ordinal, upper_ordinal, i_col, out_compositing); } else if (7==method) { medium_compositing(tmp_buf, valid_datearray_scanline[i_col], valid_datecount_scanline[i_col], lower_ordinal, upper_ordinal, i_col, out_compositing); } else if (8==method) { valid_obs_count(tmp_buf, valid_datearray_scanline[i_col], valid_datecount_scanline[i_col], lower_ordinal, upper_ordinal, i_col, out_compositing); } } free(rec_c); free_2d_array((void **)tmp_buf); return SUCCESS; } /****************************************************************************** MODULE: fitting_compositing PURPOSE: Running compositing for pixel-based time series RETURN VALUE: Type = int (SUCCESS, ERROR or FAILURE) HISTORY: Date Programmer Reason -------- --------------- ------------------------------------- 05/02/2019 Su Ye Original Development ******************************************************************************/ int fitting_compositing ( short int **buf, /* I: pixel-based time series */ int *valid_date_array, /* I: valid date time series */ int valid_date_count, /* I: the number of valid dates */ int lower_ordinal, int upper_ordinal, int i_col, short int **out_compositing, /* O: outputted compositing results for four bands */ int bfit, int bweighted, int b_diagnosis, Output_t* rec_c ) { char FUNC_NAME[] = "fitting_compositing"; int status; float date_vario; /* I: median date */ float max_date_difference; /* I: maximum difference between two neighbor dates */ float adj_rmse[TOTAL_IMAGE_BANDS]; /* Adjusted RMSE for all bands */ int *bl_ids; int n_clr; int n_clr_1; int n_outlier_1; int n_clr_2; int n_outlier_2; int k, b; int* clrx; float **clry; int* clrx_1; float **clry_1; int* clrx_2; float **clry_2; int i; float C0; // intercept from each test output float C1; // slope from each test output clrx = (int*)calloc(valid_date_count, sizeof(int)); clry = (float **) allocate_2d_array (TOTAL_IMAGE_BANDS, valid_date_count, sizeof (float)); if (clry == NULL) { RETURN_ERROR ("Allocating clry memory", FUNC_NAME, FAILURE); } /**************************************************************/ /* */ /* select observations in the observation window */ /* */ /**************************************************************/ n_clr = 0; for(i = 0; i < valid_date_count; i++) { if((valid_date_array[i] > lower_ordinal - 1) && (valid_date_array[i] < upper_ordinal + 1)) { clrx[n_clr] = valid_date_array[i]; for(b = 0; b < TOTAL_IMAGE_BANDS; b++) { clry[b][n_clr] = (float)buf[b][i]; } n_clr++; } } /********************************************/ /* condition 1: zero valid observation */ /********************************************/ if(n_clr==0) { for(i = 0; i < TOTAL_IMAGE_BANDS; i++) { out_compositing[i][i_col] = -9999; } if(TRUE == b_diagnosis) { rec_c->condition = NOOBS_CONDITION; } free(clrx); status = free_2d_array((void **)clry); if (status != SUCCESS) { RETURN_ERROR ("Freeing memory: clry\n", FUNC_NAME, FAILURE); } return SUCCESS; } /**********************************************/ /* condition 2: inefficient observations */ /**********************************************/ else if (n_clr < MIN_SAMPLE) { median_compositing(buf, valid_date_array, valid_date_count, i_col, out_compositing); if(TRUE == b_diagnosis) { rec_c->condition = INEFFICIENT_CONDITION; } free(clrx); status = free_2d_array((void **)clry); if (status != SUCCESS) { RETURN_ERROR ("Freeing memory: clry\n", FUNC_NAME, FAILURE); } return SUCCESS; } else { if(TRUE == b_diagnosis) { rec_c->condition = NORMAL_CONDITION; } } /**********************************************/ /* condition 3: standard procedure */ /**********************************************/ bl_ids = (int *)calloc(n_clr, sizeof(int)); if (bl_ids == NULL) { RETURN_ERROR("ERROR allocating bl_ids memory", FUNC_NAME, FAILURE); } clrx_1 = (int *)calloc(n_clr, sizeof(int)); clry_1 = (float **) allocate_2d_array (TOTAL_IMAGE_BANDS, n_clr, sizeof (float)); if (clry_1 == NULL) { RETURN_ERROR ("Allocating clry_1 memory", FUNC_NAME, FAILURE); } clrx_2 = (int *)calloc(n_clr, sizeof(int)); clry_2 = (float **) allocate_2d_array (TOTAL_IMAGE_BANDS, n_clr, sizeof (float)); if (clry_2 == NULL) { RETURN_ERROR ("Allocating clry_2 memory", FUNC_NAME, FAILURE); } /**************************************************************/ /* */ /* calculate variogram for each band and dates. */ /* */ /**************************************************************/ status = adjust_median_variogram(clrx, clry, TOTAL_IMAGE_BANDS, 0, n_clr-1, &date_vario, &max_date_difference, adj_rmse); if (status != SUCCESS) { RETURN_ERROR("ERROR calling median_variogram routine", FUNC_NAME, FAILURE); } status = greenband_test(clrx, clry, 0, n_clr-1, adj_rmse[1], T_CONST_SINGLETAIL_9999, bl_ids, &C0, &C1); if (status != SUCCESS) { RETURN_ERROR("ERROR calling greenband_test", FUNC_NAME, FAILURE); } if(TRUE == b_diagnosis) { rec_c->C0_green = C0; rec_c->C1_green = C1; } /**************************************************/ /* */ /* remove outliers. */ /* */ /**************************************************/ n_clr_1 = 0; n_outlier_1 = 0; for(i = 0; i < n_clr; i++) { if(bl_ids[i] == 0) { clrx_1[n_clr_1] = clrx[i]; for (b = 0; b < TOTAL_IMAGE_BANDS; b++) { clry_1[b][n_clr_1] = clry[b][i]; } n_clr_1 = n_clr_1 + 1; } else { if(TRUE == b_diagnosis) { rec_c->outlier_dates_green[n_outlier_1] = clrx[i]; n_outlier_1 = n_outlier_1 + 1; } } } rec_c->n_outlier_green = n_outlier_1; /* if n_clr_1 < MIN_SAMPLE, means that green test failed, need to reset*/ if (n_clr_1 < MIN_SAMPLE) { n_clr_1 = 0; for(i = 0; i < n_clr; i++) { clrx_1[n_clr_1] = clrx[i]; for (b = 0; b < TOTAL_IMAGE_BANDS; b++) { clry_1[b][n_clr_1] = (float)clry[b][i]; } n_clr_1 = n_clr_1 + 1; } if(TRUE == b_diagnosis) rec_c->b_success_green = FAILURE; } else{ if(TRUE == b_diagnosis) rec_c->b_success_green = SUCCESS; } /**************************************************/ /* */ /* nir band test */ /* */ /**************************************************/ for (k = 0; k < n_clr_1; k++) bl_ids[k] = 0; status = nirband_test(clrx_1, clry_1, 0, n_clr_1-1, adj_rmse[3], T_CONST_SINGLETAIL_9999, bl_ids, &C0, &C1); if (status != SUCCESS) { RETURN_ERROR("ERROR calling nirband_test", FUNC_NAME, FAILURE); } if(TRUE == b_diagnosis) { rec_c->C0_nir = C0; rec_c->C1_nir = C1; } /**************************************************/ /* */ /* remove outliers. */ /* */ /**************************************************/ n_clr_2 = 0; n_outlier_2 = 0; for(i = 0; i < n_clr_1; i++) { if(bl_ids[i] == 0) { clrx_2[n_clr_2] = clrx_1[i]; for (b = 0; b < TOTAL_IMAGE_BANDS; b++) { clry_2[b][n_clr_2] = clry_1[b][i]; } n_clr_2 = n_clr_2 + 1; } else { if(TRUE == b_diagnosis) { rec_c->outlier_dates_nir[n_outlier_2] = clrx_1[i]; n_outlier_2 = n_outlier_2 + 1; } } } rec_c->n_outlier_nir = n_outlier_2; /* if n_clr_1 < MIN_SAMPLE, means that nir test failed, reset*/ if (n_clr_2 < MIN_SAMPLE) { for(i = 0; i < n_clr_1; i++) { n_clr_2 = 0; clrx_2[n_clr_2] = clrx_1[i]; for (b = 0; b < TOTAL_IMAGE_BANDS; b++) { clry_2[b][n_clr_2] = clry_1[b][i]; } n_clr_2 = n_clr_2 + 1; } if(TRUE == b_diagnosis) rec_c->b_success_nir = FAILURE; } else{ if(TRUE == b_diagnosis) rec_c->b_success_nir = SUCCESS; } if(bfit == TRUE) linear_fit_centerdate(clrx_2, clry_2, n_clr_2, 0, (lower_ordinal + upper_ordinal)/2,i_col, out_compositing, bweighted, rec_c->C0_final, rec_c->C1_final); else { float wt; int j; double index_sum[TOTAL_IMAGE_BANDS]; for(i = 0; i < TOTAL_IMAGE_BANDS; i++) index_sum[i] = 0; float wt_sum = 0; for(i = 0; i < n_clr_2; i++) { wt = (float)1/((clry_2[BLUE_INDEX][i] - 0.5 * clry_2[RED_INDEX][i]) * (clry_2[BLUE_INDEX][i] - 0.5 * clry_2[RED_INDEX][i])); for(j = 0; j < TOTAL_IMAGE_BANDS; j++) { index_sum[j] = index_sum[j] + clry_2[j][i] * wt; } wt_sum = wt_sum + wt; } /********************************************/ /* condition 2: standard procedures */ /********************************************/ for(j = 0; j < TOTAL_IMAGE_BANDS; j++) { out_compositing[j][i_col] = (short int)(index_sum[j] / wt_sum); } } free(bl_ids); free(clrx); status = free_2d_array((void **)clry); if (status != SUCCESS) { RETURN_ERROR ("Freeing memory: clry\n", FUNC_NAME, FAILURE); } free(clrx_1); status = free_2d_array((void **)clry_1); if (status != SUCCESS) { RETURN_ERROR ("Freeing memory: clry_1\n", FUNC_NAME, FAILURE); } free(clrx_2); status = free_2d_array((void **)clry_2); if (status != SUCCESS) { RETURN_ERROR ("Freeing memory: clry_2\n", FUNC_NAME, FAILURE); } return SUCCESS; } //int fitting_compositing_scanline //( // short int **buf, /* I: scanline-based time series */ // int **valid_datearray_scanline, /* I: valid date time series */ // int *valid_datecount_scanline, /* I: the number of valid dates */ // int lower_ordinal, /* I: lower ordinal dates */ // int upper_ordinal, /* I: upper_ordinal for temporal range of composition */ // int num_samples, /* I: the pixel number in a row */ // int num_scenes, /* I: the number of scenes */ // short int **out_compositing /* O: outputted compositing results for four bands */ //) //{ // int i, j; // int i_col; // short int **tmp_buf; /* This is the image bands buffer, valid pixel only*/ // int result; // char FUNC_NAME[] = "compositing_scanline"; // tmp_buf = (short int **) allocate_2d_array (TOTAL_IMAGE_BANDS, num_scenes, sizeof (short int)); // if(tmp_buf == NULL) // { // RETURN_ERROR("ERROR allocating tmp_buf memory", FUNC_NAME, FAILURE); // } // for(i_col = 0; i_col < num_samples; i_col++) // { // for(j = 0; j < TOTAL_IMAGE_BANDS; j++) // { // tmp_buf[j] = buf[j] + i_col * num_scenes; // } // fitting_compositing(tmp_buf, valid_datearray_scanline[i_col], // valid_datecount_scanline[i_col], lower_ordinal, // upper_ordinal, i_col, out_compositing); // } // free_2d_array((void **)tmp_buf); // return SUCCESS; //}
{ "alphanum_fraction": 0.468115942, "avg_line_length": 30.7121661721, "ext": "c", "hexsha": "5ff1972e57c0244546f56bc21c3958c9c103783b", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0fe8819a51e069c1e010cea0975c51a2a8794c42", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "agroimpacts/imager", "max_forks_repo_path": "C/AFMapTSComposite/compositing.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "0fe8819a51e069c1e010cea0975c51a2a8794c42", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "agroimpacts/imager", "max_issues_repo_path": "C/AFMapTSComposite/compositing.c", "max_line_length": 136, "max_stars_count": 1, "max_stars_repo_head_hexsha": "0fe8819a51e069c1e010cea0975c51a2a8794c42", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "agroimpacts/imager", "max_stars_repo_path": "C/AFMapTSComposite/compositing.c", "max_stars_repo_stars_event_max_datetime": "2021-09-01T18:48:12.000Z", "max_stars_repo_stars_event_min_datetime": "2021-09-01T18:48:12.000Z", "num_tokens": 9795, "size": 41400 }
//////////////////////////////////////////////////////////// // // Copyright (c) 2018 Jan Filipowicz, Filip Turobos // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // //////////////////////////////////////////////////////////// #ifndef SALESMAN_EXAMPLE_PATH_EVALUATOR_H #define SALESMAN_EXAMPLE_PATH_EVALUATOR_H #include <functional> #include <numeric> #include <gsl/gsl_assert> #include "permutation.h" template<class Matrix> class path_evaluator { public: using matrix_type = Matrix; using row_type = typename matrix_type::value_type; using value_type = typename row_type::value_type; explicit path_evaluator(const matrix_type& matrix); value_type operator()(const permutation& perm) const; private: matrix_type matrix; }; template<class Matrix> inline path_evaluator<Matrix>::path_evaluator(const matrix_type& matrix) : matrix(matrix) {} template<class Matrix> inline auto path_evaluator<Matrix>::operator()(const permutation& perm) const -> value_type { Expects(perm.size() == matrix.size()); Expects(perm.size() > 0); const auto distance = [this](unsigned dest, unsigned src) { return gsl::at(gsl::at(matrix, src), dest); }; return std::inner_product(std::next(perm.begin()), perm.end(), perm.begin(), distance(perm.front(), perm.back()), std::plus<>(), distance); } #endif
{ "alphanum_fraction": 0.7184006879, "avg_line_length": 38.7666666667, "ext": "h", "hexsha": "e74e1e8ab63d4ce44cbcc9a1a632446ea8912adb", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8a093d42ca935556e7d6ccaee5a57b8dd0c6b3e2", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "SirEmentaler/Eugenics-Wars", "max_forks_repo_path": "SalesmanExample/path_evaluator.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "8a093d42ca935556e7d6ccaee5a57b8dd0c6b3e2", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "SirEmentaler/Eugenics-Wars", "max_issues_repo_path": "SalesmanExample/path_evaluator.h", "max_line_length": 140, "max_stars_count": null, "max_stars_repo_head_hexsha": "8a093d42ca935556e7d6ccaee5a57b8dd0c6b3e2", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "SirEmentaler/Eugenics-Wars", "max_stars_repo_path": "SalesmanExample/path_evaluator.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 496, "size": 2326 }
/* usleep, inet_aton */ #define _GNU_SOURCE #include <stdio.h> #include <time.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <pthread.h> #include <fcntl.h> #include <assert.h> #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_blas.h> #include "agent.h" /* * Grafo con 6 nodi -> dmax = 5 * devo prendere una frequenza fs > 2*dmax/pi * e quindi una ts < pi/(2*dmax) = 0.314 sec * prendiamo h di runge kutta = ts */ int _agents_number = 6; float h = 0.05; int N = 32768; double x0[6] = {0.00, 0.00, 0.00, 0.00, 0.00, 0.00}; double z0[6] = {1.00, 0.00, 0.00, 0.00, 0.00, 0.00}; typedef enum { log_debug = 1, log_normal = 2, log_details = 4 } log_t; int log_level = log_normal; /*int log_level = log_debug | log_normal | log_details;*/ struct msg_t { int msg_id; int msg_time; float msg_x; float msg_z; }; struct agent_t { int agent_id; int agent_x; int agent_z; pthread_mutex_t agent_mutex; }; typedef struct msg_t *msg; static pthread_mutex_t agent_class_mutex; static int agentObjectsNumber = 0; static void initClassAgent() { pthread_mutex_init(&(agent_class_mutex), NULL); } static void cleanClassAgent() { pthread_mutex_destroy(&(agent_class_mutex)); } agent allocAgent() { agent agent_M_; if (agentObjectsNumber == 0) initClassAgent(); agentObjectsNumber++; agent_M_ = (agent) malloc(sizeof (struct agent_t)); agent_M_->agent_id = 0; pthread_mutex_init(&(agent_M_->agent_mutex), NULL); return agent_M_; } void freeAgent(agent _F_agent) { pthread_mutex_destroy(&(_F_agent->agent_mutex)); free(_F_agent); _F_agent = NULL; agentObjectsNumber--; if (agentObjectsNumber == 0) cleanClassAgent(); } void setIdAgent(agent agent_p) { agent_p = NULL; } void getIdAgent(agent agent_p) { agent_p = NULL; } void setAgentState(agent agent_p) { agent_p = NULL; } void getAgentState(agent agent_p) { agent_p = NULL; } static int connectTo(char *_ip_address_remote, int _ip_port_remote) { int socket_M_; struct sockaddr_in sockaddr_in_remote_; memset(&sockaddr_in_remote_, 0, sizeof (sockaddr_in_remote_)); sockaddr_in_remote_.sin_family = AF_INET; sockaddr_in_remote_.sin_port = htons(_ip_port_remote); inet_aton(_ip_address_remote, &(sockaddr_in_remote_.sin_addr)); if ((socket_M_ = socket(AF_INET, SOCK_STREAM, 0)) == -1) { perror("connectTo: socket() Error\n"); exit(1); } while (connect(socket_M_, (struct sockaddr *) & sockaddr_in_remote_, sizeof (struct sockaddr_in)) == -1) { perror("connectTo: connect() Error\n"); exit(1); } return socket_M_; } static void sendMsg(int _socket, msg _msg) { if (send(_socket, _msg, sizeof (struct msg_t), 0) == -1) { perror("sendMsg: send() Error\n"); exit(1); }; } static msg recvMsg(int _socket) { msg _M_msg = (struct msg_t *) malloc(sizeof (struct msg_t)); if (recv(_socket, _M_msg, sizeof (struct msg_t), 0) == -1) { perror("recvMsg: recv() Error\n"); exit(1); } return _M_msg; } parametri_agent allocParamAgent(int _identity, char *_agent_router_ip, int _port_agent_to_router, int _port_router_to_agent) { parametri_agent _M_parametri_agent = (parametri_agent) malloc(sizeof (struct parametri_agent_t)); _M_parametri_agent->identity = _identity; _M_parametri_agent->agent_router_ip = _agent_router_ip; _M_parametri_agent->port_router_to_agent = _port_router_to_agent; _M_parametri_agent->port_agent_to_router = _port_agent_to_router; return _M_parametri_agent; } void *runAgent(void *_parametri_agent) { parametri_agent _parametri = (parametri_agent) _parametri_agent; int _identity = _parametri->identity; char *_agent_router_ip; int _port_router_to_agent; int _port_agent_to_router; int agent_to_router_socket_; int router_to_agent_socket_; int _agent_neighborhood; int i, c; msg _M_msg_in, _M_msg_out; double x[4] = {0.00, 0.00, 0.00, 0.00}; double z[4] = {0.00, 0.00, 0.00, 0.00}; double r[4] = {0.00, 0.00, 0.00, 0.00}; double sx[4] = {0.00, 0.00, 0.00, 0.00}; double sz[4] = {0.00, 0.00, 0.00, 0.00}; FILE *agent_log; char agent_log_file_name[FILENAME_MAX]; gsl_matrix *_adjacency_matrix; FILE *agent_adjacency_matrix_file; _adjacency_matrix = gsl_matrix_alloc(_agents_number, _agents_number); agent_adjacency_matrix_file = fopen("adjacency_matrix.txt", "r"); gsl_matrix_fscanf(agent_adjacency_matrix_file, _adjacency_matrix); fclose(agent_adjacency_matrix_file); /* * Apertura del file di log */ sprintf(agent_log_file_name, "agent_%d.log", _identity); agent_log = fopen(agent_log_file_name, "w"); if (agent_log == NULL) { perror("Errore apertura file: \"agent.log\"\n"); exit(1); }; setbuf(agent_log, NULL); if (log_level & log_debug) fprintf(agent_log, "A%d: Open log\n", _identity); if (log_level & log_debug) fprintf(agent_log, "A%d: Parametri di invocazione:\n", _identity); if (log_level & log_debug) fprintf(agent_log, "A%d: > _identity = %d\n", _identity, _identity); _agent_router_ip = _parametri->agent_router_ip; if (log_level & log_debug) fprintf(agent_log, "A%d: > _agent_router_ip = %s\n", _identity, _agent_router_ip); _port_agent_to_router = _parametri->port_agent_to_router; if (log_level & log_debug) fprintf(agent_log, "A%d: > _port_agent_to_router = %d\n", _identity, _port_agent_to_router); _port_router_to_agent = _parametri->port_router_to_agent; if (log_level & log_debug) fprintf(agent_log, "A%d: > _port_router_to_agent = %d\n", _identity, _port_router_to_agent); if (log_level & log_debug) fprintf(agent_log, "A%d: Memoria:\n", _identity); if (log_level & log_debug) fprintf(agent_log, "A%d: > h = %2.8g\n", _identity, h); /* * Calcolo del numero di agent vicini attraverso l'analisi della adjacency_matrix */ _agent_neighborhood = 0; for (i = 0; i < _agents_number; i++) { _agent_neighborhood = _agent_neighborhood + (int) gsl_matrix_get(_adjacency_matrix, i, _identity - 1); if (log_level & log_debug) fprintf(agent_log, "A%d: > gsl_matrix_get(%d,%d) = %d\n", _identity, i, _identity - 1, (int) gsl_matrix_get(_adjacency_matrix, i, _identity - 1)); }; if (log_level & log_debug) fprintf(agent_log, "A%d: > _agent_neighborhood = %d\n", _identity, _agent_neighborhood); if (log_level & log_debug) fprintf(agent_log, "A%d: > _agents_number = %d\n", _identity, _agents_number); _M_msg_out = (struct msg_t *) malloc(sizeof (struct msg_t)); if (log_level & log_debug) fprintf(agent_log, "A%d: > alloc(msg_out)\n", _identity); if (log_level & log_debug) fprintf(agent_log, "A%d: Socket:\n", _identity); agent_to_router_socket_ = connectTo(_agent_router_ip, _port_agent_to_router); if (log_level & log_debug) fprintf(agent_log, "A%d: > connectTo(%s,%d)\n", _identity, _agent_router_ip, _port_agent_to_router); router_to_agent_socket_ = connectTo(_agent_router_ip, _port_router_to_agent); if (log_level & log_debug) fprintf(agent_log, "A%d: > connectTo(%s,%d)\n", _identity, _agent_router_ip, _port_router_to_agent); if (log_level & log_debug) fprintf(agent_log, "A%d: Signal:\n", _identity); /* * Imposto l'identità dei messaggi in output */ _M_msg_out->msg_id = _identity; /* * Imposto il timer interno */ _M_msg_out->msg_time = 0; /* * Invio di un messaggio contenente la propria identità sui due canali di in/out */ _M_msg_out->msg_x = 0.00; _M_msg_out->msg_z = 0.00; sendMsg(router_to_agent_socket_, _M_msg_out); if (log_level & log_debug) fprintf(agent_log, "A%d: > snd (%d,%d,%2.8g,%2.8g) > %d\n", _identity, _M_msg_out->msg_id, _M_msg_out->msg_time, _M_msg_out->msg_x, _M_msg_out->msg_z, _port_router_to_agent); sendMsg(agent_to_router_socket_, _M_msg_out); if (log_level & log_debug) fprintf(agent_log, "A%d: > snd (%d,%d,%2.8g,%2.8g) > %d\n", _identity, _M_msg_out->msg_id, _M_msg_out->msg_time, _M_msg_out->msg_x, _M_msg_out->msg_z, _port_agent_to_router); /* * Ciclo principale */ if (log_level & log_debug) fprintf(agent_log, "A%d: Loop:\n", _identity); if (log_level & log_debug) fprintf(agent_log, "A%d: > for i=[0,%d]\n", _identity, N); if (log_level & log_debug) fprintf(agent_log, "A%d: ----- Loop Start -----\n", _identity); x[0] = x0[_identity - 1]; z[0] = z0[_identity - 1]; for (c = 0; c < N; c++) { if (log_level & log_debug) fprintf(agent_log, "A%d: > i = %d\n", _identity, c); /* * snd x */ _M_msg_out->msg_time = 0; _M_msg_out->msg_x = x[0]; _M_msg_out->msg_z = z[0]; sendMsg(agent_to_router_socket_, _M_msg_out); if (log_level & log_normal) fprintf(agent_log, "%2.8g %2.8g\n", _M_msg_out->msg_x, _M_msg_out->msg_z); if (log_level & log_details) fprintf(agent_log, "A%d: > snd (%d,%d,%2.8g,%2.8g)\n", _identity, _M_msg_out->msg_id, _M_msg_out->msg_time, _M_msg_out->msg_x, _M_msg_out->msg_z); /* * k1 */ while (r[0] < _agent_neighborhood) { _M_msg_in = recvMsg(router_to_agent_socket_); if (log_level & log_details) fprintf(agent_log, "A%d: > rcv (%d,%d,%2.8g,%2.8g)\n", _identity, _M_msg_in->msg_id, _M_msg_out->msg_time, _M_msg_in->msg_x, _M_msg_in->msg_z); sx[_M_msg_in->msg_time] = sx[_M_msg_in->msg_time] + _M_msg_in->msg_x; sz[_M_msg_in->msg_time] = sz[_M_msg_in->msg_time] + _M_msg_in->msg_z; r[_M_msg_in->msg_time]++; } x[1] = r[0] * z[0] - sz[0]; z[1] = sx[0] - r[0] * x[0]; sx[0] = 0; sz[0] = 0; r[0] = 0; _M_msg_out->msg_time = 1; _M_msg_out->msg_x = x[1]; _M_msg_out->msg_z = z[1]; sendMsg(agent_to_router_socket_, _M_msg_out); if (log_level & log_details) fprintf(agent_log, "A%d: > snd (%d,%d,%2.8g,%2.8g)\n", _identity, _M_msg_out->msg_id, _M_msg_out->msg_time, _M_msg_out->msg_x, _M_msg_out->msg_z); /* * k2 */ while (r[1] < _agent_neighborhood) { _M_msg_in = recvMsg(router_to_agent_socket_); if (log_level & log_details) fprintf(agent_log, "A%d: > rcv (%d,%d,%2.8g,%2.8g)\n", _identity, _M_msg_in->msg_id, _M_msg_out->msg_time, _M_msg_in->msg_x, _M_msg_in->msg_z); sx[_M_msg_in->msg_time] = sx[_M_msg_in->msg_time] + _M_msg_in->msg_x; sz[_M_msg_in->msg_time] = sz[_M_msg_in->msg_time] + _M_msg_in->msg_z; r[_M_msg_in->msg_time]++; } x[2] = x[1] + h * (r[1] * z[1] - sz[1]) / 2; z[2] = z[1] + h * (sx[1] - r[1] * x[1]) / 2; sx[1] = 0; sz[1] = 0; r[1] = 0; _M_msg_out->msg_time = 2; _M_msg_out->msg_x = x[2]; _M_msg_out->msg_z = z[2]; sendMsg(agent_to_router_socket_, _M_msg_out); if (log_level & log_details) fprintf(agent_log, "A%d: > snd (%d,%d,%2.8g,%2.8g)\n", _identity, _M_msg_out->msg_id, _M_msg_out->msg_time, _M_msg_out->msg_x, _M_msg_out->msg_z); /* * k3 */ while (r[2] < _agent_neighborhood) { _M_msg_in = recvMsg(router_to_agent_socket_); if (log_level & log_details) fprintf(agent_log, "A%d: > rcv (%d,%d,%2.8g,%2.8g)\n", _identity, _M_msg_in->msg_id, _M_msg_out->msg_time, _M_msg_in->msg_x, _M_msg_in->msg_z); sx[_M_msg_in->msg_time] = sx[_M_msg_in->msg_time] + _M_msg_in->msg_x; sz[_M_msg_in->msg_time] = sz[_M_msg_in->msg_time] + _M_msg_in->msg_z; r[_M_msg_in->msg_time]++; } x[3] = x[1] + h * (r[2] * z[2] - sz[2]) / 2; z[3] = z[1] + h * (sx[2] - r[2] * x[2]) / 2; sx[2] = 0; sz[2] = 0; r[2] = 0; _M_msg_out->msg_time = 3; _M_msg_out->msg_x = x[3]; _M_msg_out->msg_z = z[3]; sendMsg(agent_to_router_socket_, _M_msg_out); if (log_level & log_details) fprintf(agent_log, "A%d: > snd (%d,%d,%2.8g,%2.8g)\n", _identity, _M_msg_out->msg_id, _M_msg_out->msg_time, _M_msg_out->msg_x, _M_msg_out->msg_z); /* * k4, x */ while (r[3] < _agent_neighborhood) { _M_msg_in = recvMsg(router_to_agent_socket_); if (log_level & log_details) fprintf(agent_log, "A%d: > rcv (%d,%d,%2.8g,%2.8g)\n", _identity, _M_msg_in->msg_id, _M_msg_out->msg_time, _M_msg_in->msg_x, _M_msg_in->msg_z); sx[_M_msg_in->msg_time] = sx[_M_msg_in->msg_time] + _M_msg_in->msg_x; sz[_M_msg_in->msg_time] = sz[_M_msg_in->msg_time] + _M_msg_in->msg_z; r[_M_msg_in->msg_time]++; } x[0] = x[0] + h * (x[1] + x[2] + x[3] + h * (r[3] * z[3] - sz[3]) / 2) / 3; z[0] = z[0] + h * (z[1] + z[2] + z[3] + h * (sx[3] - r[3] * x[3]) / 2) / 3; sx[3] = 0; sz[3] = 0; r[3] = 0; } if (log_level & log_debug) fprintf(agent_log, "A%d: ----- Loop End ------\n", _identity); if (log_level & log_debug) fprintf(agent_log, "A%d: Signal:\n", _identity); /* * Invio del killer message */ _M_msg_out->msg_id = -1; _M_msg_out->msg_x = 0; _M_msg_out->msg_z = 0; sendMsg(agent_to_router_socket_, _M_msg_out); if (log_level & log_debug) fprintf(agent_log, "A%d: > snd (%d,%d,%2.8g,%2.8g) > %d\n", _identity, _M_msg_out->msg_id, _M_msg_out->msg_time, _M_msg_out->msg_x, _M_msg_out->msg_z, _port_agent_to_router); if (log_level & log_debug) fprintf(agent_log, "A%d: Socket:\n", _identity); /* * Chiusura delle sockets */ close(router_to_agent_socket_); if (log_level & log_debug) fprintf(agent_log, "A%d: > close(%d)\n", _identity, _port_router_to_agent); close(agent_to_router_socket_); if (log_level & log_debug) fprintf(agent_log, "A%d: > close(%d)\n", _identity, _port_agent_to_router); if (log_level & log_debug) fprintf(agent_log, "A%d: Memoria:\n", _identity); /* * Free delle variabili usate */ free(_M_msg_out); if (log_level & log_debug) fprintf(agent_log, "A%d: > free(msg_out)\n", _identity); free(_parametri_agent); if (log_level & log_debug) fprintf(agent_log, "A%d: > parametri\n", _identity); /* * fflush e chiusura del logfile */ if (log_level & log_debug) fprintf(agent_log, "A%d: Close log\n", _identity); fflush(agent_log); fclose(agent_log); return NULL; }
{ "alphanum_fraction": 0.6313547864, "avg_line_length": 30.2494887526, "ext": "c", "hexsha": "cd307fc7e4a06419fa2c7be2d50231e73d73e10b", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0455a86a79ef2c5a817b9a1ec26d9025703d22ab", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "vittorioc/VirtualAgent", "max_forks_repo_path": "agent.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "0455a86a79ef2c5a817b9a1ec26d9025703d22ab", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "vittorioc/VirtualAgent", "max_issues_repo_path": "agent.c", "max_line_length": 205, "max_stars_count": 1, "max_stars_repo_head_hexsha": "0455a86a79ef2c5a817b9a1ec26d9025703d22ab", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "vittorioc/VirtualAgent", "max_stars_repo_path": "agent.c", "max_stars_repo_stars_event_max_datetime": "2017-01-29T01:05:11.000Z", "max_stars_repo_stars_event_min_datetime": "2017-01-29T01:05:11.000Z", "num_tokens": 4762, "size": 14792 }
/* Ballistic: a software to benchmam ballistic models. AUTHORS: Javier Burguete Tolosa. Copyright 2018, AUTHORS. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY AUTHORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * \file runge-kutta.c * \brief Source file to define the numerical method data and functions. * \author Javier Burguete Tolosa. * \copyright Copyright 2018. */ #define _GNU_SOURCE #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <gsl/gsl_rng.h> #include <libxml/parser.h> #include <glib.h> #include "config.h" #include "utils.h" #include "equation.h" #include "method.h" #define DEBUG_METHOD 0 ///< macro to debug the numerical method functions. /** * Function to init the numerical method. */ void method_init (Method * m, ///< Method struct. unsigned int nsteps, ///< number of steps. unsigned int order) ///< order of accuracy. { #if DEBUG_METHOD fprintf (stderr, "method_init: start\n"); #endif m->nsteps = nsteps; m->order = order; #if DEBUG_METHOD fprintf (stderr, "method_init: end\n"); #endif } /** * Function to init the variables of the numerical method. */ void method_init_variables (Method * m) ///< Method struct. { unsigned int i, n; #if DEBUG_METHOD fprintf (stderr, "method_init_variables: start\n"); fprintf (stderr, "method_init_variables: nsteps=%u\n", m->nsteps); #endif n = m->nsteps + 1; m->r0 = (long double **) g_slice_alloc (n * sizeof (long double *)); m->r1 = (long double **) g_slice_alloc (n * sizeof (long double *)); m->r2 = (long double **) g_slice_alloc (n * sizeof (long double *)); for (i = 0; i < n; ++i) { m->r0[i] = (long double *) g_slice_alloc (3 * sizeof (long double)); m->r1[i] = (long double *) g_slice_alloc (3 * sizeof (long double)); m->r2[i] = (long double *) g_slice_alloc (3 * sizeof (long double)); } m->et0 = m->et1 = 0.L; #if DEBUG_METHOD fprintf (stderr, "method_init_variables: end\n"); #endif } /** * Function to calculate the following numerical step size based on error * control. * * \return next time step size. */ long double method_dt (Method * m, ///< Method struct. long double dt) ///< actual time step size. { long double dt2; #if DEBUG_METHOD fprintf (stderr, "method_dt: start\n"); #endif dt2 = dt * fminl (m->alpha, powl (m->emt * dt / m->e0, 1.L / (m->order - 1.L))); #if DEBUG_METHOD fprintf (stderr, "method_dt: dt=%Lg\n", dt2); fprintf (stderr, "method_dt: end\n"); #endif return dt2; } /** * Function to free the memory used by the Method struct. */ void method_delete (Method * m) ///< Method struct. { unsigned int i, n; #if DEBUG_METHOD fprintf (stderr, "method_delete: start\n"); fprintf (stderr, "method_delete: nsteps=%u\n", m->nsteps); #endif n = i = m->nsteps + 1; do { --i; g_slice_free1 (3 * sizeof (long double), m->r2[i]); g_slice_free1 (3 * sizeof (long double), m->r1[i]); g_slice_free1 (3 * sizeof (long double), m->r0[i]); } while (i); g_slice_free1 (n * sizeof (long double *), m->r2); g_slice_free1 (n * sizeof (long double *), m->r1); g_slice_free1 (n * sizeof (long double *), m->r0); #if DEBUG_METHOD fprintf (stderr, "method_delete: end\n"); #endif } /** * Function to read the numerical method data on a XML node. * * \return 1 on success, 0 on error. */ int method_read_xml (Method * m, ///< Method struct. xmlNode * node) ///< XML node. { const char *message[] = { "Bad dt", "Bad alpha", "Bad beta", "Bad error per time", "Unknown error control type" }; int e, error_code; #if DEBUG_METHOD fprintf (stderr, "method_read_xml: start\n"); #endif m->error_dt = xml_node_get_uint (node, XML_TIME_STEP, &error_code); if (error_code) { e = 0; goto fail; } switch (m->error_dt) { case 0: m->emt = 0.L; break; case 1: m->alpha = xml_node_get_float (node, XML_ALPHA, &error_code); if (error_code) { e = 1; goto fail; } m->beta = xml_node_get_float (node, XML_BETA, &error_code); if (error_code) { e = 2; goto fail; } m->emt = xml_node_get_float (node, XML_ERROR_TIME, &error_code); if (error_code) { e = 3; goto fail; } break; default: e = 4; goto fail; } #if DEBUG_METHOD fprintf (stderr, "method_read_xml: success\n"); fprintf (stderr, "method_read_xml: end\n"); #endif return 1; fail: error_add (message[e]); #if DEBUG_METHOD fprintf (stderr, "method_read_xml: error\n"); fprintf (stderr, "method_read_xml: end\n"); #endif return 0; }
{ "alphanum_fraction": 0.6454330974, "avg_line_length": 27.3456221198, "ext": "c", "hexsha": "816e5e026cab0d5cc6bad18e759d4a895aa24494", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-06-24T07:19:47.000Z", "max_forks_repo_forks_event_min_datetime": "2020-06-24T07:19:47.000Z", "max_forks_repo_head_hexsha": "e557bce6e63bb667f1e698cff6e68013bb4e5e6f", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "jburguete/ballistic", "max_forks_repo_path": "1.1.0/method.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "e557bce6e63bb667f1e698cff6e68013bb4e5e6f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "jburguete/ballistic", "max_issues_repo_path": "1.1.0/method.c", "max_line_length": 80, "max_stars_count": 1, "max_stars_repo_head_hexsha": "e557bce6e63bb667f1e698cff6e68013bb4e5e6f", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "jburguete/ballistic", "max_stars_repo_path": "1.1.0/method.c", "max_stars_repo_stars_event_max_datetime": "2020-08-02T14:03:09.000Z", "max_stars_repo_stars_event_min_datetime": "2020-08-02T14:03:09.000Z", "num_tokens": 1586, "size": 5934 }
/* * File: spce_sect.h * External definitions for spce_sect.c * * */ #ifndef spce_sect_H #define spce_sect_H #include <math.h> #include <gsl/gsl_roots.h> #include "aXe_grism.h" #include "spc_trace_functions.h" /** @package spce_sect */ /** A structure to support old gsl 0.9 gsl_interval */ typedef struct { double lower; double upper; } gsl_interval; /** A structure describing the function that has a zero at the section point, including the measuring point and the slope of the line through it, the function describing the spectrum trace, functions related to gsl and a flag to initiate special handling of vertically oriented objects. @see fill_in_sectionfun @see free_sectionfun */ typedef struct { int vertical; /* Special handling if orientation is close to vertical */ double m; /* slope of line through x0, y0 that is to */ double x0, y0; /* intersect the trace */ trace_func *func; /* Parametrization of the trace */ /* the GSL stuff has to be kept in here to avoid excessive re-allocing of the solver for each pixel */ gsl_interval *interv; gsl_function *gslfun; gsl_root_fsolver *solver; } sectionfun; /* public */ int fill_in_sectionfun (sectionfun * const sf, const double inclination, const beam * const b); int find_section_point (sectionfun *sf, const double x, const double y, double *const res); void free_sectionfun (sectionfun * const sf); #endif
{ "alphanum_fraction": 0.7103825137, "avg_line_length": 22.875, "ext": "h", "hexsha": "73d7b0bd412f50f1f8e7248ad5d6dccaac3e5887", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f57de55daf77de21d5868ace08b69090778d5975", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "sosey/pyaxe", "max_forks_repo_path": "cextern/src/spce_sect.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "f57de55daf77de21d5868ace08b69090778d5975", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "sosey/pyaxe", "max_issues_repo_path": "cextern/src/spce_sect.h", "max_line_length": 79, "max_stars_count": null, "max_stars_repo_head_hexsha": "f57de55daf77de21d5868ace08b69090778d5975", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "sosey/pyaxe", "max_stars_repo_path": "cextern/src/spce_sect.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 373, "size": 1464 }
/* specfunc/bessel_Inu.c * * Copyright (C) 1996, 1997, 1998, 1999, 2000 Gerard Jungman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Author: G. Jungman */ #include <config.h> #include <gsl/gsl_math.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_sf_exp.h> #include <gsl/gsl_sf_gamma.h> #include <gsl/gsl_sf_bessel.h> #include "error.h" #include "bessel.h" #include "bessel_temme.h" /*-*-*-*-*-*-*-*-*-*-*-* Functions with Error Codes *-*-*-*-*-*-*-*-*-*-*-*/ int gsl_sf_bessel_Inu_scaled_e(double nu, double x, gsl_sf_result * result) { /* CHECK_POINTER(result) */ if(x < 0.0 || nu < 0.0) { DOMAIN_ERROR(result); } else if(x*x < 10.0*(nu+1.0)) { gsl_sf_result b; double ex = exp(-x); int stat = gsl_sf_bessel_IJ_taylor_e(nu, x, 1, 100, GSL_DBL_EPSILON, &b); result->val = b.val * ex; result->err = b.err * ex; result->err += 2.0 * GSL_DBL_EPSILON * fabs(result->val); return stat; } else if(0.5/(nu*nu + x*x) < GSL_ROOT3_DBL_EPSILON) { return gsl_sf_bessel_Inu_scaled_asymp_unif_e(nu, x, result); } else { int N = (int)(nu + 0.5); double mu = nu - N; /* -1/2 <= mu <= 1/2 */ double K_mu, K_mup1, Kp_mu; double K_nu, K_nup1, K_num1; double I_nu_ratio; int stat_Irat; int stat_Kmu; int n; /* obtain K_mu, K_mup1 */ if(x < 2.0) { stat_Kmu = gsl_sf_bessel_K_scaled_temme(mu, x, &K_mu, &K_mup1, &Kp_mu); } else { stat_Kmu = gsl_sf_bessel_K_scaled_steed_temme_CF2(mu, x, &K_mu, &K_mup1, &Kp_mu); } /* recurse forward to obtain K_num1, K_nu */ K_nu = K_mu; K_nup1 = K_mup1; for(n=0; n<N; n++) { K_num1 = K_nu; K_nu = K_nup1; K_nup1 = 2.0*(mu+n+1)/x * K_nu + K_num1; } /* calculate I_{nu+1}/I_nu */ stat_Irat = gsl_sf_bessel_I_CF1_ser(nu, x, &I_nu_ratio); /* solve for I_nu */ result->val = 1.0/(x * (K_nup1 + I_nu_ratio * K_nu)); result->err = GSL_DBL_EPSILON * (0.5*N + 2.0) * fabs(result->val); return GSL_ERROR_SELECT_2(stat_Kmu, stat_Irat); } } int gsl_sf_bessel_Inu_e(double nu, double x, gsl_sf_result * result) { gsl_sf_result b; int stat_I = gsl_sf_bessel_Inu_scaled_e(nu, x, &b); int stat_e = gsl_sf_exp_mult_err_e(x, fabs(x*GSL_DBL_EPSILON), b.val, b.err, result); return GSL_ERROR_SELECT_2(stat_e, stat_I); } /*-*-*-*-*-*-*-*-*-* Functions w/ Natural Prototypes *-*-*-*-*-*-*-*-*-*-*/ #include "eval.h" double gsl_sf_bessel_Inu_scaled(double nu, double x) { EVAL_RESULT(gsl_sf_bessel_Inu_scaled_e(nu, x, &result)); } double gsl_sf_bessel_Inu(double nu, double x) { EVAL_RESULT(gsl_sf_bessel_Inu_e(nu, x, &result)); }
{ "alphanum_fraction": 0.6299765808, "avg_line_length": 27.7723577236, "ext": "c", "hexsha": "aa6d06af3b878846d7f989b5056985451df9380c", "lang": "C", "max_forks_count": 40, "max_forks_repo_forks_event_max_datetime": "2022-03-03T23:23:37.000Z", "max_forks_repo_forks_event_min_datetime": "2015-02-26T15:31:16.000Z", "max_forks_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "manggoguy/parsec-modified", "max_forks_repo_path": "pkgs/libs/gsl/src/specfunc/bessel_Inu.c", "max_issues_count": 12, "max_issues_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_issues_repo_issues_event_max_datetime": "2022-03-13T03:54:24.000Z", "max_issues_repo_issues_event_min_datetime": "2020-12-15T08:30:19.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "manggoguy/parsec-modified", "max_issues_repo_path": "pkgs/libs/gsl/src/specfunc/bessel_Inu.c", "max_line_length": 87, "max_stars_count": 64, "max_stars_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "manggoguy/parsec-modified", "max_stars_repo_path": "pkgs/libs/gsl/src/specfunc/bessel_Inu.c", "max_stars_repo_stars_event_max_datetime": "2022-03-24T13:26:53.000Z", "max_stars_repo_stars_event_min_datetime": "2015-03-06T00:30:56.000Z", "num_tokens": 1133, "size": 3416 }
/** Function for dealiasing weather radar radial velocities. * @file libdealias.c * @author Adriaan Dokter, adapted from dealias.c RAVE functions by Gunther Haase * @date 2016-09-22 */ #include "libdealias.h" #include <stdio.h> #include <gsl/gsl_multimin.h> #include <gsl/gsl_vector.h> void printDealias(const float *points, const int nDims, const float nyquist[], const float vradObs[], float vradDealias[], const int nPoints, const int iProfileType, const int iLayer, const int iPass){ fprintf(stderr,"#iProfile iLayer iPass azim elev nyquist vrad vradd\n"); for(int i=0; i<nPoints;i++){ fprintf(stderr,"%i %i %i %3.1f %3.1f %3.1f %3.1f %3.1f\n", iProfileType,iLayer,iPass,points[i*nDims],points[i*nDims+1],nyquist[i],vradObs[i],vradDealias[i]); } } double test_field(float u, float v,const float *points, const float *pointsTrigon, const int nPoints, const int nDims, double x[], double y[], const float nyquist[]){ double xt, yt, e, vm; double esum = 0; for (int iPoint=0; iPoint<nPoints; iPoint++) { // calculate the radial velocities for the test wind fields, eq 4 in Haase et al. 2004 jaot vm = (u*pointsTrigon[3*iPoint] + v*pointsTrigon[3*iPoint+1])*pointsTrigon[3*iPoint+2]; // Eq. 7 for the test radial wind: xt = nyquist[iPoint]/M_PI * cos(vm*M_PI/nyquist[iPoint]); // Eq. 6 for the test radial wind: yt = nyquist[iPoint]/M_PI * sin(vm*M_PI/nyquist[iPoint]); // summed absolute differences between observed velocity field and test fields e = fabs(xt-x[iPoint]) + fabs(yt-y[iPoint]); // fprintf(stderr,"test_field: i=%i,u=%f,v=%f,vm=%f,xt=%f,yt=%f,e=%f,sin=%f,cos=%f,cos=%f\n",iPoint,u,v,vm,xt,yt,e,pointsTrigon[3*iPoint],pointsTrigon[3*iPoint+1],pointsTrigon[3*iPoint+2]); if (!isnan(e)) { esum = esum + e; } } return esum; } double test_field_gsl(const gsl_vector *uv, void* params){ double u,v; float *points = ((void **) params)[0]; float *pointsTrigon = ((void **) params)[1]; int nPoints = *(int *) ((void **) params)[2]; int nDims = *(int *) ((void **) params)[3]; double *x = ((void **) params)[4]; double *y = ((void **) params)[5]; float *nyquist = ((void **) params)[6]; u = gsl_vector_get(uv, 0); v = gsl_vector_get(uv, 1); return test_field(u, v, points, pointsTrigon, nPoints, nDims, x, y, nyquist); } int fit_field_gsl(gsl_vector *uv, void *params){ gsl_vector *ss; // define which minimizer to use const gsl_multimin_fminimizer_type *T = gsl_multimin_fminimizer_nmsimplex2; gsl_multimin_fminimizer *s = NULL; int iter = 0; int status; double size; double u1 = 0; double v1 = 0; // Set initial step sizes to 1 ss = gsl_vector_alloc(2); gsl_vector_set_all(ss, 1); // Initialize method gsl_multimin_function minex_func; minex_func.n = 2; minex_func.f = &test_field_gsl; minex_func.params = params; s = gsl_multimin_fminimizer_alloc (T, 2); gsl_multimin_fminimizer_set (s, &minex_func, uv, ss); // minimize by iteration do { iter++; status = gsl_multimin_fminimizer_iterate(s); if (status) break; size = gsl_multimin_fminimizer_size (s); status = gsl_multimin_test_size (size, 1e-2); if (status == GSL_SUCCESS) { #ifdef FPRINTFON printf ("converged to minimum on iteration "); printf ("%d at %10.3e %10.3e f() = %7.3f size = %.3f\n", iter, gsl_vector_get (s->x, 0), gsl_vector_get (s->x, 1), s->fval, size); #endif u1=gsl_vector_get(s->x, 0); v1=gsl_vector_get(s->x, 1); gsl_vector_set(uv, 0, u1); gsl_vector_set(uv, 1, v1); } } while (status == GSL_CONTINUE && iter < 100); #ifdef FPRINTFON fprintf(stdout,"Finished dealias at (x,y)=%f,%f at f()=%f ...\n",u1,v1,s->fval); #endif // clean up gsl_vector_free(ss); gsl_multimin_fminimizer_free (s); if (status != GSL_SUCCESS) return 0; return 1; } int dealias_points(const float *points, const int nDims, const float nyquist[], const double NI_MIN, const float vo[], float vradDealias[], const int nPoints){ int i, j, n, m, eind, fitOk; double min1, esum, u1, v1, min2, dmy; // number of rows m = DEALIAS_VAF; // number of columns n = DEALIAS_NF; // max number of folds of nyquist interval to test for double MVA=2*ceil(DEALIAS_VMAX/(2*NI_MIN)); // polarscan matrix, torus projected x coordinate, eq. 6 Haase et al. 2004 jaot double *x = RAVE_CALLOC ((size_t)nPoints, sizeof(double)); // polarscan matrix, torus projected y coordinate, eq. 7 Haase et al. 2004 jaot double *y = RAVE_CALLOC ((size_t)nPoints, sizeof(double)); // U-components of test velocity fields double *uh = RAVE_CALLOC ((size_t)(m*n), sizeof(double)); // V-components of test velocity fields double *vh = RAVE_CALLOC ((size_t)(m*n), sizeof(double)); // radial velocities of the best fitting test field double *vt1 = RAVE_CALLOC ((size_t)nPoints, sizeof(double)); // array with trigonometric conversions of the points array float *pointsTrigon = RAVE_CALLOC ((size_t)(3*nPoints), sizeof(double)); // map measured data to 3D for (i=0; i<nPoints; i++) { x[i] = nyquist[i]/M_PI * cos(vo[i]*M_PI/nyquist[i]); y[i] = nyquist[i]/M_PI * sin(vo[i]*M_PI/nyquist[i]); } // trigonometric conversion points array (compute once to speed up code) for (int iPoint=0; iPoint<nPoints; iPoint++) { pointsTrigon[3*iPoint+0] = sin(points[nDims*iPoint]*DEG2RAD); pointsTrigon[3*iPoint+1] = cos(points[nDims*iPoint]*DEG2RAD); pointsTrigon[3*iPoint+2] = cos(points[nDims*iPoint+1]*DEG2RAD); } // Setting up the u and v component of the test velocity fields: // index n=DEALIAS_NF gives number of azimuthal directions (default n=40, i.e. steps of 360/40=9 degrees) // index m=DEALIAS_VAF/NI_MIN*DEALIAS_VMAX gives number of speeds (maximum speed is DEALIAS_VMAX, steps of NI_MIN/DEALIAS_VAF) for (i=0; i<n; i++) { for (j=0; j<m; j++) { *(uh+i*m+j) = DEALIAS_VMAX/DEALIAS_VAF*(j+1) * sin(2*M_PI/DEALIAS_NF*i); *(vh+i*m+j) = DEALIAS_VMAX/DEALIAS_VAF*(j+1) * cos(2*M_PI/DEALIAS_NF*i); } } min1 = 1e32; eind = 0; u1 = 0; v1 = 0; gsl_vector *uv; uv = gsl_vector_alloc(2); void *params[7] = {(void *) points, (void *) pointsTrigon, (void *) &nPoints, (void *) &nDims, (void *) x, (void *) y, (void *) nyquist}; // try several test velocity fields for use as starting point in GSL fit for (i=0; i<m*n; i++) { gsl_vector_set(uv, 0, *(uh+i)); gsl_vector_set(uv, 1, *(vh+i)); esum = test_field_gsl(uv, &params); if (esum<min1) { min1 = esum; eind = i; } u1 = *(uh+eind); v1 = *(vh+eind); } gsl_vector_set(uv, 0, u1); gsl_vector_set(uv, 1, v1); #ifdef FPRINTFON fprintf(stdout,"Start dealiasing at (x,y)=%f,%f at f()=%f ...\n",u1,v1,esum); #endif fitOk = fit_field_gsl(uv, &params); if(!fitOk) goto cleanup; // the radial velocity of the best fitting test velocity field: for (int iPoint=0; iPoint<nPoints; iPoint++) { *(vt1+iPoint) = (u1*sin(points[nDims*iPoint]*DEG2RAD) + v1*cos(points[nDims*iPoint]*DEG2RAD)) *cos(points[nDims*iPoint+1]*DEG2RAD); } // dealias the observed velocities using the best test velocity field for (int iPoint=0; iPoint<nPoints; iPoint++) { min2 = 1e32; dmy = 0; float diffVTest = (*(vt1+iPoint)-*(vo+iPoint)); float dv = 0; for (i=0; i<MVA+1; i++) { // get a candidate velocity fold, i.e. integer multiple of nyquist velocity dv = nyquist[iPoint]*(2*i-MVA); // checking how many folds we have, vt1-vo is the residual between the real velocity // and the folded velocity; which equals a multiple of the nyquist interval dmy = fabs(dv-diffVTest); if ((dmy<min2) && (!isnan(dmy))) { // add the aliased interval to the observed velocity field, and obtain dealiased velocity *(vradDealias+iPoint) = *(vo+iPoint) + dv; min2 = dmy; } } // loop MVA } // loop over points cleanup: RAVE_FREE(x); RAVE_FREE(y); RAVE_FREE(uh); RAVE_FREE(vh); RAVE_FREE(vt1); RAVE_FREE(pointsTrigon); gsl_vector_free(uv); if(fitOk) return 1; else return 0; }
{ "alphanum_fraction": 0.5928937403, "avg_line_length": 35.4861660079, "ext": "c", "hexsha": "bfaff6526ac4f40eef362ed7537310b8d6d9e998", "lang": "C", "max_forks_count": 17, "max_forks_repo_forks_event_max_datetime": "2022-02-11T14:38:24.000Z", "max_forks_repo_forks_event_min_datetime": "2016-04-22T09:15:57.000Z", "max_forks_repo_head_hexsha": "61ad71ae02bbd1ad1a8bc75792f757ea9d281ce0", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adokter/radar", "max_forks_repo_path": "lib/libdealias.c", "max_issues_count": 143, "max_issues_repo_head_hexsha": "61ad71ae02bbd1ad1a8bc75792f757ea9d281ce0", "max_issues_repo_issues_event_max_datetime": "2022-03-15T14:54:43.000Z", "max_issues_repo_issues_event_min_datetime": "2015-11-07T12:41:26.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adokter/radar", "max_issues_repo_path": "lib/libdealias.c", "max_line_length": 196, "max_stars_count": 17, "max_stars_repo_head_hexsha": "61ad71ae02bbd1ad1a8bc75792f757ea9d281ce0", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adokter/radar", "max_stars_repo_path": "lib/libdealias.c", "max_stars_repo_stars_event_max_datetime": "2022-02-14T11:32:32.000Z", "max_stars_repo_stars_event_min_datetime": "2017-11-20T04:22:05.000Z", "num_tokens": 2752, "size": 8978 }
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> //#include<ctype.h> #include<string.h> #include <gsl/gsl_fit.h> #include "system_evol.h" #include "initial_conditions.h" #include "solver.h" #include "chaotic_variables.h" #define Distance(x, y) (sqrt((x) * (x) + (y) * (y))) #define M 1 //masa de la particula #define R sqrt(9) // define una region efectiva del potencial de radio R. #define T 0 #define X 1 #define Y 2 #define VX 3 #define VY 4 #define En 5 extern double dt, t, phi; //declaradas en system_evol.c
{ "alphanum_fraction": 0.6930147059, "avg_line_length": 20.9230769231, "ext": "h", "hexsha": "b8b3171ecbd9a9043b5bd5ccc3b83d2f200abc58", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9d97128fdf990fae28143ef1c2450aff2ddef66f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "DanielEstrada971102/Proyecto", "max_forks_repo_path": "headers/allvars.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "9d97128fdf990fae28143ef1c2450aff2ddef66f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "DanielEstrada971102/Proyecto", "max_issues_repo_path": "headers/allvars.h", "max_line_length": 73, "max_stars_count": null, "max_stars_repo_head_hexsha": "9d97128fdf990fae28143ef1c2450aff2ddef66f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "DanielEstrada971102/Proyecto", "max_stars_repo_path": "headers/allvars.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 166, "size": 544 }
#ifndef BASIS_PURSUIT_E_H #define BASIS_PURSUIT_E_H #include "Matrix.h" #include "LADMM.h" #include <string> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <stdio.h> /* printf */ #include <time.h> #include <fstream> #include <algorithm> #include <iomanip> #include <ctime> #include <math.h> //This class solves problem of the form f(x)+g(x) under the constraint Mx=c by IPALM_APPROX; // where f(x)=0 //and g(x)=\frac{lambda2}{2}\|x\|_2+lambda1\|x\|_1. template<typename L, typename D> class Basis_pursuit_e: public LADMM<L, D> { private: D lambda1; D lambda2; Matrix<L,D> my_M; Matrix<L,D> my_A; D val_lambda_f; protected: public: Basis_pursuit_e(const char* Matrix_file,D val_lambda1, D val_lambda2) :LADMM<L,D>(), my_M(Matrix_file) { lambda1=val_lambda1; lambda2=val_lambda2; this->lambda_f= 0; } L get_n(){return my_M.nfeatures;} L get_m(){return my_M.nsamples;} inline D value_of_g_j(D x, L i){ return lambda2*x*x/2+lambda1*fabs(x); } inline D value_of_f_j(D x, L i){return 0;} inline D value_of_h_j(D x, L i){ return 0; } inline D gradient_of_f_j(D x, L i){return 0;} inline D prox_of_h_j(D x1,D x2, L i){ return my_M.b[i]; } inline D prox_of_g_j(D x1,D x2, L i){ D new_x; if(x1*x2> lambda1) new_x=(x1*x2- lambda1)/(x2+lambda2); else if(x1*x2< -lambda1) new_x=(x1*x2+ lambda1)/(x2+ lambda2); else new_x=0; return new_x; } inline void set_matrix_M(){ this->data_M=my_M; } inline void set_matrix_A(){ this->data_A.nsamples=0; this->data_A.nfeatures=this->data_M.nfeatures; this->data_A.nnz=0; this->data_A.ptr.resize(1,0); this->data_A.ptr_t.resize(this->data_M.nfeatures+1,0); } void LADMM_solver(D beta_0, D rho,vector<D> & x0,vector<D> & y0, vector<D> & lambda0, L max_nb_outer, L p_N_1, string filename1, D time){ this->ADMM_solve_with_Linear(beta_0, rho,x0,y0, lambda0, max_nb_outer, p_N_1, filename1, time); } }; #endif
{ "alphanum_fraction": 0.6403846154, "avg_line_length": 18.0869565217, "ext": "h", "hexsha": "9942c104f68ce4fb16317efb6cf20c0cf8697573", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-01-15T04:23:24.000Z", "max_forks_repo_forks_event_min_datetime": "2021-01-15T04:23:24.000Z", "max_forks_repo_head_hexsha": "3d5d8c281411fdfd6379480429a1fbb9b21464ff", "max_forks_repo_licenses": [ "BSD-Source-Code" ], "max_forks_repo_name": "lifei16/supplementary_code", "max_forks_repo_path": "IPALM_OPENMP/Basis_pursuit_e.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "3d5d8c281411fdfd6379480429a1fbb9b21464ff", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-Source-Code" ], "max_issues_repo_name": "lifei16/supplementary_code", "max_issues_repo_path": "IPALM_OPENMP/Basis_pursuit_e.h", "max_line_length": 140, "max_stars_count": null, "max_stars_repo_head_hexsha": "3d5d8c281411fdfd6379480429a1fbb9b21464ff", "max_stars_repo_licenses": [ "BSD-Source-Code" ], "max_stars_repo_name": "lifei16/supplementary_code", "max_stars_repo_path": "IPALM_OPENMP/Basis_pursuit_e.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 687, "size": 2080 }
/* randist/gauss.c * * Copyright (C) 1996, 1997, 1998, 1999, 2000, 2006 James Theiler, Brian Gough * Copyright (C) 2006 Charles Karney * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <config.h> #include <math.h> #include <gsl/gsl_math.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> /* Of the two methods provided below, I think the Polar method is more * efficient, but only when you are actually producing two random * deviates. We don't produce two, because then we'd have to save one * in a static variable for the next call, and that would screws up * re-entrant or threaded code, so we only produce one. This makes * the Ratio method suddenly more appealing. * * [Added by Charles Karney] We use Leva's implementation of the Ratio * method which avoids calling log() nearly all the time and makes the * Ratio method faster than the Polar method (when it produces just one * result per call). Timing per call (gcc -O2 on 866MHz Pentium, * average over 10^8 calls) * * Polar method: 660 ns * Ratio method: 368 ns * */ /* Polar (Box-Mueller) method; See Knuth v2, 3rd ed, p122 */ double gsl_ran_gaussian (const gsl_rng * r, const double sigma) { double x, y, r2; do { /* choose x,y in uniform square (-1,-1) to (+1,+1) */ x = -1 + 2 * gsl_rng_uniform_pos (r); y = -1 + 2 * gsl_rng_uniform_pos (r); /* see if it is in the unit circle */ r2 = x * x + y * y; } while (r2 > 1.0 || r2 == 0); /* Box-Muller transform */ return sigma * y * sqrt (-2.0 * log (r2) / r2); } /* Ratio method (Kinderman-Monahan); see Knuth v2, 3rd ed, p130. * K+M, ACM Trans Math Software 3 (1977) 257-260. * * [Added by Charles Karney] This is an implementation of Leva's * modifications to the original K+M method; see: * J. L. Leva, ACM Trans Math Software 18 (1992) 449-453 and 454-455. */ double gsl_ran_gaussian_ratio_method (const gsl_rng * r, const double sigma) { double u, v, x, y, Q; const double s = 0.449871; /* Constants from Leva */ const double t = -0.386595; const double a = 0.19600; const double b = 0.25472; const double r1 = 0.27597; const double r2 = 0.27846; do /* This loop is executed 1.369 times on average */ { /* Generate a point P = (u, v) uniform in a rectangle enclosing the K+M region v^2 <= - 4 u^2 log(u). */ /* u in (0, 1] to avoid singularity at u = 0 */ u = 1 - gsl_rng_uniform (r); /* v is in the asymmetric interval [-0.5, 0.5). However v = -0.5 is rejected in the last part of the while clause. The resulting normal deviate is strictly symmetric about 0 (provided that v is symmetric once v = -0.5 is excluded). */ v = gsl_rng_uniform (r) - 0.5; /* Constant 1.7156 > sqrt(8/e) (for accuracy); but not by too much (for efficiency). */ v *= 1.7156; /* Compute Leva's quadratic form Q */ x = u - s; y = fabs (v) - t; Q = x * x + y * (a * y - b * x); /* Accept P if Q < r1 (Leva) */ /* Reject P if Q > r2 (Leva) */ /* Accept if v^2 <= -4 u^2 log(u) (K+M) */ /* This final test is executed 0.012 times on average. */ } while (Q >= r1 && (Q > r2 || v * v > -4 * u * u * log (u))); return sigma * (v / u); /* Return slope */ } double gsl_ran_gaussian_pdf (const double x, const double sigma) { double u = x / fabs (sigma); double p = (1 / (sqrt (2 * M_PI) * fabs (sigma))) * exp (-u * u / 2); return p; } double gsl_ran_ugaussian (const gsl_rng * r) { return gsl_ran_gaussian (r, 1.0); } double gsl_ran_ugaussian_ratio_method (const gsl_rng * r) { return gsl_ran_gaussian_ratio_method (r, 1.0); } double gsl_ran_ugaussian_pdf (const double x) { return gsl_ran_gaussian_pdf (x, 1.0); }
{ "alphanum_fraction": 0.6357380688, "avg_line_length": 31.2847222222, "ext": "c", "hexsha": "2ceb80487501a49b406d6c75046fc963d0573484", "lang": "C", "max_forks_count": 40, "max_forks_repo_forks_event_max_datetime": "2022-03-03T23:23:37.000Z", "max_forks_repo_forks_event_min_datetime": "2015-02-26T15:31:16.000Z", "max_forks_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "manggoguy/parsec-modified", "max_forks_repo_path": "pkgs/libs/gsl/src/randist/gauss.c", "max_issues_count": 12, "max_issues_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_issues_repo_issues_event_max_datetime": "2022-03-13T03:54:24.000Z", "max_issues_repo_issues_event_min_datetime": "2020-12-15T08:30:19.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "manggoguy/parsec-modified", "max_issues_repo_path": "pkgs/libs/gsl/src/randist/gauss.c", "max_line_length": 83, "max_stars_count": 64, "max_stars_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "manggoguy/parsec-modified", "max_stars_repo_path": "pkgs/libs/gsl/src/randist/gauss.c", "max_stars_repo_stars_event_max_datetime": "2022-03-24T13:26:53.000Z", "max_stars_repo_stars_event_min_datetime": "2015-03-06T00:30:56.000Z", "num_tokens": 1386, "size": 4505 }
#pragma once #include <stdlib.h> #include <stdio.h> #include <MathLib/MathLibDll.h> #include <MathLib/MathLib.h> #include <MathLib/ThreeTuple.h> #include <gsl/matrix/gsl_matrix.h> #define MATRIX_AT(m, i, j) (*((m->data + ((i) * m->tda + (j))))) class Vector3d; /*====================================================================================================================================================================* | This class will be used to represent matrices of arbitrary sizes (m rows by n columns) that have elements of type double. The underlying data strucutre used by | | this class is gsl's (Gnu Scientific Library) matrix class. This class also makes use of the ATLAS implementation of BLAS for some operations such as matrix-matrix | | multiplication. This class is meant to improve performance, not necessarily ease of use. | *====================================================================================================================================================================*/ class MATHLIB_DECLSPEC Matrix { friend class Vector; friend class ParticleEngine; protected: //this data structure holds all the matrix data gsl_matrix *matrix; public: /** constructor - creates an m rows by n columns matrix that is not initialized to any particular values */ Matrix(int m, int n); /** default constructor */ Matrix(); /** copy constructor - performs a deep copy of the matrix passed in as a parameter. */ Matrix(const Matrix& other); /** destructor. */ ~Matrix(); /** loads the matrix with all zero values. */ void loadZero(); /** loads the matrix with 1's on the diagonal, 0's everywhere else - note: the matrix doesn't have to be square. */ void loadIdentity(); /** this method sets the current matrix to a 3x3 matrix that is equal to the outer product of the vectors a and b */ void setToOuterproduct(const Vector3d& a, const Vector3d& b); /** this method resizes the current matrix to have m rows and n cols. If not enough space is allocated for it, then a new matrix of correct dimensions is allocated. There is no guarantee with regards to the data that is contained in the matrix after a resize operation. */ void resizeTo(int m, int n); /** copy operator - performs a deep copy of the matrix passed in as a parameter. */ Matrix& operator=(const Matrix &other); /** this method performs a shallow copy of the matrix that is passed in as a parameter. */ void shallowCopy(const Matrix& other, int startRow = 0, int startCol = 0, int endRow = -1, int endCol = -1); /** this method performs a deep copy of the matrix that is passed in as a paramerer. */ void deepCopy(const Matrix& other); /** Returns the number of columns */ int getColumnCount() const; /** Returns the number of rows */ int getRowCount()const; /** Multiplies each element in the current matrix by a constant */ void multiplyBy(const double val); /** This method sets the current matrix to be equal to one of the products: a * b, a'*b, a*b' or a'*b'. The values of transA and transB indicate which of the matrices are tranposed and which ones are not. */ void setToProductOf(const Matrix& a, const Matrix& b, bool transA = false, bool transB = false); /** This method computes the inverse of the matrix a and writes it over the current matrix. The implementation for the inverse of a matrix was obtained from Graphite. The parameter t is used as a threshold value for determinants, etc so that we still get a result for the inverse of our matrix even if it is very poorly conditioned. */ void setToInverseOf(const Matrix &a, double t = 0); /** This method prints the contents of the matrix - testing purpose only. */ void printMatrix() const; /** This method sets the current matrix to be a sub-matrix (starting at (i,j) and ending at (i+rows, j+cols) of the one that is passed in as a parameter - shallow copy only. */ void setToSubmatrix(const Matrix &a, int i, int j, int rows, int cols); /** This method returns a copy of the value of the matrix at (i,j) */ double get(int i, int j) const; /** This method sets the value of the matrix at (i,j) to newVal. */ void set(int i, int j, double newVal); /** This method is used to set the values in the matrix to the ones that are passed in the array of doubles. It is assumed that the array contains the right number of elements and that there is no space between consecutive rows (tda == nrCols). */ void setValues(double* vals); /** this method returns a pointer to its internal matrix data structure */ gsl_matrix* getMatrixPointer() const; /** Implement this operator to have a quick way of multiplying 3x3 matrices by vectors - used for dynamics for instance */ Vector3d operator * (const Vector3d &other); /** Implement a potentially faster function for 3x3 matrix - vector3d multiplication. Result and v need to be different!! */ void postMultiplyVector(const Vector3d& v, Vector3d& result); /** Still need to do (if need be): get row/col vector. Initialize a matrix from a vector so that we can create a 1xn matrix easily. make sure dgemv works properly, implement vector class, etc, swap, add, scale matrices, etc */ /** this method adds the matrix that is passed in as a parameter to the current matrix. In addition, it scales, both matrices by the two numbers that are passed in as parameters: *this = a * *this + b * other. */ void add(const Matrix& other, double scaleA = 1.0, double scaleB = 1.0); /** this method subtracts the matrix that is passed in as a parameter from the current matrix. In addition, it scales, both matrices by the two numbers that are passed in as parameters: *this = a * *this - b * other. */ void sub(const Matrix& other, double scaleA = 1.0, double scaleB = 1.0); }; MATHLIB_DECLSPEC void testMatrixClass();
{ "alphanum_fraction": 0.6652202937, "avg_line_length": 31.8723404255, "ext": "h", "hexsha": "bf663bcf43c9e3e289f2cd6dc604eb6e650c2ab4", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "58778f148e65749e1dfc443043e9fc054ca3ff4d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "MontyThibault/centre-of-mass-awareness", "max_forks_repo_path": "Cartwheel/cartwheel-3d/MathLib/Matrix.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "58778f148e65749e1dfc443043e9fc054ca3ff4d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "MontyThibault/centre-of-mass-awareness", "max_issues_repo_path": "Cartwheel/cartwheel-3d/MathLib/Matrix.h", "max_line_length": 168, "max_stars_count": null, "max_stars_repo_head_hexsha": "58778f148e65749e1dfc443043e9fc054ca3ff4d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "MontyThibault/centre-of-mass-awareness", "max_stars_repo_path": "Cartwheel/cartwheel-3d/MathLib/Matrix.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1420, "size": 5992 }
#include <gsl/gsl_complex.h> #include <gsl/gsl_complex_math.h> #include <stdlib.h> /* Representation */ gsl_complex *alloc_gsl_complex(void) { gsl_complex *c = malloc(sizeof(gsl_complex)); return c; } void free_gsl_complex(gsl_complex *c) { free(c); } gsl_complex_float *alloc_gsl_complex_float(void) { gsl_complex_float *c = malloc(sizeof(gsl_complex_float)); return c; } void free_gsl_complex_float(gsl_complex_float *c) { free(c); } void mgsl_complex_rect(double x, double y, gsl_complex *res) { gsl_complex ret = gsl_complex_rect(x, y); *res = ret; } void mgsl_complex_float_rect(float x, float y, gsl_complex_float *res) { GSL_SET_COMPLEX(res, x, y); } void mgsl_complex_polar(double r, double theta, gsl_complex *res) { gsl_complex ret = gsl_complex_polar(r, theta); *res = ret; } void mgsl_complex_float_polar(float r, float theta, gsl_complex_float *res) { gsl_complex ret = gsl_complex_polar((double)r, (double)theta); GSL_SET_COMPLEX(res, ret.dat[0], ret.dat[1]); } /* Properties */ double mgsl_complex_arg(gsl_complex *c) { return gsl_complex_arg(*c); } double mgsl_complex_abs(gsl_complex *c) { return gsl_complex_abs(*c); } double mgsl_complex_abs2(gsl_complex *c) { return gsl_complex_abs2(*c); } double mgsl_complex_logabs(gsl_complex *c) { return gsl_complex_logabs(*c); } /* Arithmetic operators */ void mgsl_complex_add(gsl_complex *a, gsl_complex *b, gsl_complex *res) { *res = gsl_complex_add(*a, *b); } void mgsl_complex_sub(gsl_complex *a, gsl_complex *b, gsl_complex *res) { *res = gsl_complex_sub(*a, *b); } void mgsl_complex_mul(gsl_complex *a, gsl_complex *b, gsl_complex *res) { *res = gsl_complex_mul(*a, *b); } void mgsl_complex_div(gsl_complex *a, gsl_complex *b, gsl_complex *res) { *res = gsl_complex_div(*a, *b); } void mgsl_complex_add_real(gsl_complex *a, double x, gsl_complex *res) { *res = gsl_complex_add_real(*a, x); } void mgsl_complex_sub_real(gsl_complex *a, double x, gsl_complex *res) { *res = gsl_complex_sub_real(*a, x); } void mgsl_complex_mul_real(gsl_complex *a, double x, gsl_complex *res) { *res = gsl_complex_mul_real(*a, x); } void mgsl_complex_div_real(gsl_complex *a, double x, gsl_complex *res) { *res = gsl_complex_div_real(*a, x); } void mgsl_complex_add_imag(gsl_complex *a, double y, gsl_complex *res) { *res = gsl_complex_add_imag(*a, y); } void mgsl_complex_sub_imag(gsl_complex *a, double y, gsl_complex *res) { *res = gsl_complex_sub_imag(*a, y); } void mgsl_complex_mul_imag(gsl_complex *a, double y, gsl_complex *res) { *res = gsl_complex_mul_imag(*a, y); } void mgsl_complex_div_imag(gsl_complex *a, double y, gsl_complex *res) { *res = gsl_complex_div_imag(*a, y); } void mgsl_complex_conjugate(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_conjugate(*z); } void mgsl_complex_inverse(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_inverse(*z); } void mgsl_complex_negative(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_negative(*z); } /* Elementary Complex Functions */ void mgsl_complex_sqrt(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_sqrt(*z); } void mgsl_complex_sqrt_real(double x, gsl_complex *res) { *res = gsl_complex_sqrt_real(x); } void mgsl_complex_pow(gsl_complex *z, gsl_complex *a, gsl_complex *res) { *res = gsl_complex_pow(*z, *a); } void mgsl_complex_pow_real(gsl_complex *z, double x, gsl_complex *res) { *res = gsl_complex_pow_real(*z, x); } void mgsl_complex_exp(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_exp(*z); } void mgsl_complex_log(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_log(*z); } void mgsl_complex_log10(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_log10(*z); } void mgsl_complex_log_b(gsl_complex *z, gsl_complex *b, gsl_complex *res) { *res = gsl_complex_log_b(*z, *b); } /* Complex Trigonometric Functions */ void mgsl_complex_sin(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_sin(*z); } void mgsl_complex_cos(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_cos(*z); } void mgsl_complex_tan(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_tan(*z); } void mgsl_complex_sec(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_sec(*z); } void mgsl_complex_csc(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_csc(*z); } void mgsl_complex_cot(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_cot(*z); } /* Inverse Complex Trigonometric Functions */ void mgsl_complex_arcsin(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_arcsin(*z); } void mgsl_complex_arcsin_real(double z, gsl_complex *res) { *res = gsl_complex_arcsin_real(z); } void mgsl_complex_arccos(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_arccos(*z); } void mgsl_complex_arccos_real(double z, gsl_complex *res) { *res = gsl_complex_arccos_real(z); } void mgsl_complex_arctan(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_arctan(*z); } void mgsl_complex_arcsec(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_arcsec(*z); } void mgsl_complex_arcsec_real(double z, gsl_complex *res) { *res = gsl_complex_arcsec_real(z); } void mgsl_complex_arccsc(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_arccsc(*z); } void mgsl_complex_arccsc_real(double z, gsl_complex *res) { *res = gsl_complex_arccsc_real(z); } void mgsl_complex_arccot(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_arccot(*z); } /* Complex Hyperbolic Functions */ void mgsl_complex_sinh(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_sinh(*z); } void mgsl_complex_cosh(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_cosh(*z); } void mgsl_complex_tanh(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_tanh(*z); } void mgsl_complex_sech(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_sech(*z); } void mgsl_complex_csch(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_csch(*z); } void mgsl_complex_coth(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_coth(*z); } /* Inverse Complex Hyperbolic Functions */ void mgsl_complex_arcsinh(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_arcsinh(*z); } void mgsl_complex_arccosh(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_arccosh(*z); } void mgsl_complex_arccosh_real(double z, gsl_complex *res) { *res = gsl_complex_arccosh_real(z); } void mgsl_complex_arctanh(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_arctanh(*z); } void mgsl_complex_arctanh_real(double z, gsl_complex *res) { *res = gsl_complex_arctanh_real(z); } void mgsl_complex_arcsech(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_arcsech(*z); } void mgsl_complex_arccsch(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_arccsch(*z); } void mgsl_complex_arccoth(gsl_complex *z, gsl_complex *res) { *res = gsl_complex_arccoth(*z); }
{ "alphanum_fraction": 0.7312201476, "avg_line_length": 20.1428571429, "ext": "c", "hexsha": "3a62916a16c344f5c08abdb2372ab3c5775df08d", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c11bdcd5c118c2b564a30e746e12afeb865bf25d", "max_forks_repo_licenses": [ "Artistic-2.0" ], "max_forks_repo_name": "frithnanth/raku-Math-Libgsl-Complex", "max_forks_repo_path": "src/complex.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "c11bdcd5c118c2b564a30e746e12afeb865bf25d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Artistic-2.0" ], "max_issues_repo_name": "frithnanth/raku-Math-Libgsl-Complex", "max_issues_repo_path": "src/complex.c", "max_line_length": 75, "max_stars_count": 1, "max_stars_repo_head_hexsha": "c11bdcd5c118c2b564a30e746e12afeb865bf25d", "max_stars_repo_licenses": [ "Artistic-2.0" ], "max_stars_repo_name": "frithnanth/raku-Math-Libgsl-Complex", "max_stars_repo_path": "src/complex.c", "max_stars_repo_stars_event_max_datetime": "2022-03-22T14:05:54.000Z", "max_stars_repo_stars_event_min_datetime": "2022-03-22T14:05:54.000Z", "num_tokens": 2089, "size": 6909 }
#include "linear.h" #include <assert.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> #include <lauxlib.h> #include <cblas.h> #include <lapacke.h> /* matrix orders */ static const char * const ORDERS[] = { "row", "col", NULL }; /* checks an order */ static CBLAS_ORDER checkorder (lua_State *L, int index) { switch (luaL_checkoption(L, index, "row", ORDERS)) { case 0: return CblasRowMajor; case 1: return CblasColMajor; } /* not reached */ assert(0); return (CBLAS_ORDER)0; } /* checks a transpose */ static CBLAS_TRANSPOSE checktranspose (lua_State *L, int index) { static const char * const TRANSPOSES[] = { "notrans", "trans", NULL }; switch (luaL_checkoption(L, index, "notrans", TRANSPOSES)) { case 0: return CblasNoTrans; case 1: return CblasTrans; } /* not reached */ assert(0); return (CBLAS_TRANSPOSE)0; } /* translates a transpose for LAPACK */ static char lapacktranspose (CBLAS_TRANSPOSE transpose) { switch (transpose) { case CblasNoTrans: return 'N'; case CblasTrans: return 'T'; default: /* not reached */ assert(0); return '\0'; } } /* returns an int value from a table */ static int intvalue (lua_State *L, const char *key, int dfl) { int result, isinteger; lua_getfield(L, -1, key); if (!lua_isnil(L, -1)) { result = lua_tointegerx(L, -1, &isinteger); if (!isinteger) { luaL_error(L, "bad field " LUA_QS, key); } } else { if (dfl < 0) { luaL_error(L, "missing field " LUA_QS, key); } result = dfl; } lua_pop(L, 1); return result; } /* returns an option value from a table */ static int optionvalue (lua_State *L, const char *key, const char *dfl, const char *options[]) { const char *str; int i; lua_getfield(L, -1, key); if (!lua_isnil(L, -1)) { str = lua_tostring(L, -1); if (str == NULL) { luaL_error(L, "bad field " LUA_QS, key); } } else { if (dfl == NULL) { luaL_error(L, "missing field " LUA_QS, key); } str = dfl; } lua_pop(L, 1); for (i = 0; options[i] != NULL; i++) { if (strcmp(options[i], str) == 0) { return i; } } luaL_error(L, "bad option " LUA_QS " in field " LUA_QS, str, key); return 0; /* not reached */ } /* raises a linear argument error */ static int argerror (lua_State *L, int index) { return luaL_argerror(L, index, lua_pushfstring(L, "vector, or matrix " "expected, got %s", luaL_typename(L, index))); } /* pushes a new vector onto the stack */ static struct vector *newvector (lua_State *L, int size) { return lualinear_newvector(L, size); } /* pushes an existing vector onto the stack */ static struct vector *wrapvector (lua_State *L, int size, float *values) { return lualinear_wrapvector(L, size, values); } /* creates a new vector */ static int vector (lua_State *L) { int size; /* process arguments */ size = luaL_checkinteger(L, 1); luaL_argcheck(L, size >= 1, 1, "bad dimension"); /* create */ newvector(L, size); return 1; } /* vector length implementation */ static int vector_len (lua_State *L) { struct vector *x; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); lua_pushinteger(L, x->size); return 1; } /* vector index implementation */ static int vector_index (lua_State *L) { struct vector *x; int index; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1 && index <= x->size, 2, "bad index"); lua_pushnumber(L, x->values[(size_t)(index - 1) * x->inc]); return 1; } /* matrix vector newindex implementation */ static int vector_newindex (lua_State *L) { struct vector *x; int index; float value; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1 && index <= x->size, 2, "bad index"); value = luaL_checknumber(L, 3); x->values[(size_t)(index - 1) * x->inc] = value; return 0; } /* vector next function */ static int vector_next (lua_State *L) { struct vector *x; int index; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = luaL_checkinteger(L, 2); if (index >= 0 && index < x->size) { lua_pushinteger(L, index + 1); lua_pushnumber(L, x->values[(size_t)index]); return 2; } lua_pushnil(L); return 1; } /* vector ipairs function */ static int vector_ipairs (lua_State *L) { luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); lua_pushcfunction(L, vector_next); lua_pushvalue(L, 1); lua_pushinteger(L, 0); return 3; } /* returns the string representation of a vector */ static int vector_tostring (lua_State *L) { struct vector *x; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); lua_pushfstring(L, "vector: %p", x); return 1; } /* frees a vector */ static int vector_free (lua_State *L) { struct vector *x; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x->ref == LUA_NOREF) { free(x->values); } else { luaL_unref(L, LUA_REGISTRYINDEX, x->ref); } return 0; } /* pushes a new matrix onto the stack */ static struct matrix *newmatrix (lua_State *L, int rows, int cols, CBLAS_ORDER order) { return lualinear_newmatrix(L, rows, cols, order); } /* pushes an existing matrix onto the stack */ static struct matrix *wrapmatrix (lua_State *L, int rows, int cols, CBLAS_ORDER order, float *values) { return lualinear_wrapmatrix(L, rows, cols, order, values); } /* creates a new matrix */ static int matrix (lua_State *L) { int rows, cols; CBLAS_ORDER order; /* process arguments */ rows = luaL_checkinteger(L, 1); luaL_argcheck(L, rows >= 1, 1, "bad dimension"); cols = luaL_checkinteger(L, 2); luaL_argcheck(L, cols >= 1, 2, "bad dimension"); order = checkorder(L, 3); /* create */ newmatrix(L, rows, cols, order); return 1; } /* returns the length of a matrix */ static int matrix_len (lua_State *L) { struct matrix *X; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); switch (X->order) { case CblasRowMajor: lua_pushinteger(L, X->rows); break; case CblasColMajor: lua_pushinteger(L, X->cols); break; } return 1; } /* matrix index implementation */ static int matrix_index (lua_State *L) { struct matrix *X; int index, size; struct vector *x; /* process arguments */ X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1, 2, "bad index"); switch (X->order) { case CblasRowMajor: luaL_argcheck(L, index <= X->rows, 2, "bad index"); size = X->cols; break; case CblasColMajor: luaL_argcheck(L, index <= X->cols, 2, "bad index"); size = X->rows; break; default: /* not reached */ size = -1; assert(0); } /* create vector */ x = wrapvector(L, size, &X->values[(size_t)(index - 1) * X->ld]); lua_pushvalue(L, 1); x->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } /* matrix next function */ static int matrix_next (lua_State *L) { struct matrix *X; int index, majorsize, minorsize; struct vector *x; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); index = luaL_checkinteger(L, 2); switch (X->order) { case CblasRowMajor: majorsize = X->rows; minorsize = X->cols; break; case CblasColMajor: majorsize = X->cols; minorsize = X->rows; break; default: /* not reached */ assert(0); return 0; } if (index >= 0 && index < majorsize) { lua_pushinteger(L, index + 1); x = wrapvector(L, minorsize, &X->values[(size_t)index * X->ld]); lua_pushvalue(L, 1); x->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 2; } lua_pushnil(L); return 1; } /* matrix ipairs function */ static int matrix_ipairs (lua_State *L) { luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); lua_pushcfunction(L, matrix_next); lua_pushvalue(L, 1); lua_pushinteger(L, 0); return 3; } /* returns the string representation of a matrix */ static int matrix_tostring (lua_State *L) { struct matrix *X; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); lua_pushfstring(L, "matrix: %p", X); return 1; } /* frees a matrix */ static int matrix_free (lua_State *L) { struct matrix *X; X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X->ref == LUA_NOREF) { free(X->values); } else { luaL_unref(L, LUA_REGISTRYINDEX, X->ref); } return 0; } /* returns the type of a linear object */ static int type (lua_State *L) { if (luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE) != NULL) { lua_pushliteral(L, "vector"); return 1; } if (luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE) != NULL) { lua_pushliteral(L, "matrix"); return 1; } lua_pushnil(L); return 1; } /* returns the size of a linear object */ static int size (lua_State *L) { struct vector *x; struct matrix *X; x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { lua_pushinteger(L, x->size); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { lua_pushinteger(L, X->rows); lua_pushinteger(L, X->cols); lua_pushstring(L, ORDERS[X->order == CblasRowMajor ? 0 : 1]); return 3; } return argerror(L, 1); } /* transposed vector */ static int tvector (lua_State *L) { struct matrix *X; int index, size; struct vector *x; /* process arguments */ X = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); index = luaL_checkinteger(L, 2); luaL_argcheck(L, index >= 1, 2, "bad index"); switch (X->order) { case CblasRowMajor: luaL_argcheck(L, index <= X->cols, 2, "bad index"); size = X->rows; break; case CblasColMajor: luaL_argcheck(L, index <= X->rows, 2, "bad index"); size = X->cols; break; default: /* not reached */ size = -1; assert(0); } /* create vector */ x = wrapvector(L, size, &X->values[index - 1]); x->inc = X->ld; lua_pushvalue(L, 1); x->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } /* subvector or submatrix */ static int sub (lua_State *L) { struct vector *x, *s; struct matrix *X, *S; /* process arguments */ x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { int start, end; start = luaL_optinteger(L, 2, 1); luaL_argcheck(L, start >= 1 && start <= x->size, 2, "bad index"); end = luaL_optinteger(L, 3, x->size); luaL_argcheck(L, end >= start && end <= x->size, 3, "bad index"); s = wrapvector(L, end - start + 1, &x->values[ (size_t)(start - 1) * x->inc]); s->inc = x->inc; lua_pushvalue(L, 1); s->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { int rowstart, rowend, colstart, colend; switch (X->order){ case CblasRowMajor: rowstart = luaL_optinteger(L, 2, 1); luaL_argcheck(L, rowstart >= 1 && rowstart <= X->rows, 2, "bad index"); colstart = luaL_optinteger(L, 3, 1); luaL_argcheck(L, colstart >= 1 && colstart <= X->cols, 3, "bad index"); rowend = luaL_optinteger(L, 4, X->rows); luaL_argcheck(L, rowend >= rowstart && rowend <= X->rows, 4, "bad index"); colend = luaL_optinteger(L, 5, X->cols); luaL_argcheck(L, colend >= colstart && colend <= X->cols, 5, "bad index"); S = wrapmatrix(L, rowend - rowstart + 1, colend - colstart + 1, X->order, &X->values[ (size_t)(rowstart - 1) * X->ld + colstart - 1]); break; case CblasColMajor: colstart = luaL_optinteger(L, 2, 1); luaL_argcheck(L, colstart >= 1 && colstart <= X->cols, 2, "bad index"); rowstart = luaL_optinteger(L, 3, 1); luaL_argcheck(L, rowstart >= 1 && rowstart <= X->rows, 3, "bad index"); colend = luaL_optinteger(L, 4, X->cols); luaL_argcheck(L, colend >= colstart && colend <= X->cols, 4, "bad index"); rowend = luaL_optinteger(L, 5, X->rows); luaL_argcheck(L, rowend >= rowstart && rowend <= X->rows, 5, "bad index"); S = wrapmatrix(L, rowend - rowstart + 1, colend - colstart + 1, X->order, &X->values[ (size_t)(colstart - 1) * X->ld + rowstart - 1]); break; default: /* not reached */ assert(0); return 0; } S->ld = X->ld; lua_pushvalue(L, 1); S->ref = luaL_ref(L, LUA_REGISTRYINDEX); return 1; } return argerror(L, 1); } /* unwinds matrices into a vector */ static int unwind (lua_State *L) { struct vector *x; int index, i, j, k; size_t base; struct matrix *X; if (lua_gettop(L) == 0) { return luaL_error(L, "wrong number of arguments"); } x = luaL_checkudata(L, lua_gettop(L), LUALINEAR_VECTOR_METATABLE); index = 1; i = 0; while (i < x->size) { X = luaL_checkudata(L, index, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, X->rows * X->cols <= x->size - i, index, "matrix too large"); switch (X->order) { case CblasRowMajor: for (j = 0; j < X->rows; j++) { base = (size_t)j * X->ld; for (k = 0; k < X->cols; k++) { x->values[(size_t)i * x->inc] = X->values[base + k]; i++; } } break; case CblasColMajor: for (j = 0; j < X->cols; j++) { base = (size_t)j * X->ld; for (k = 0; k < X->rows; k++) { x->values[(size_t)i * x->inc] = X->values[base + k]; i++; } } break; } index++; } return 0; } /* reshapes a vector into matrices */ static int reshape (lua_State *L) { struct vector *x; int index, i, j, k; size_t base; struct matrix *X; x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); index = 2; i = 0; while (i < x->size) { X = luaL_checkudata(L, index, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, X->rows * X->cols <= x->size - i, index, "matrix too large"); switch (X->order) { case CblasRowMajor: for (j = 0; j < X->rows; j++) { base = (size_t)j * X->ld; for (k = 0; k < X->cols; k++) { X->values[base + k] = x->values[ (size_t)i * x->inc]; i++; } } break; case CblasColMajor: for (j = 0; j < X->cols; j++) { base = (size_t)j * X->ld; for (k = 0; k < X->rows; k++) { X->values[base + k] = x->values[ (size_t)i * x->inc]; i++; } } break; } index++; } return 0; } /* converts a vector or matrix to a table */ static int totable (lua_State *L) { struct vector *x; struct matrix *X; int i, j; const float *value; /* check and process arguments */ x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { lua_createtable(L, 0, 3); lua_pushliteral(L, "vector"); lua_setfield(L, -2, "type"); lua_pushinteger(L, x->size); lua_setfield(L, -2, "length"); lua_createtable(L, x->size, 0); value = x->values; for (i = 0; i < x->size; i++) { lua_pushnumber(L, *value); lua_rawseti(L, -2, i + 1); value += x->inc; } lua_setfield(L, -2, "values"); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { lua_createtable(L, 0, 5); lua_pushliteral(L, "matrix"); lua_setfield(L, -2, "type"); lua_pushinteger(L, X->rows); lua_setfield(L, -2, "rows"); lua_pushinteger(L, X->cols); lua_setfield(L, -2, "cols"); switch (X->order) { case CblasRowMajor: lua_pushliteral(L, "rowmajor"); lua_setfield(L, -2, "order"); lua_createtable(L, X->rows, 0); for (i = 0; i < X->rows; i++) { lua_createtable(L, X->cols, 0); value = &X->values[(size_t)i * X->ld]; for (j = 0; j < X->cols; j++) { lua_pushnumber(L, *value++); lua_rawseti(L, -2, j + 1); } lua_rawseti(L, -2, i + 1); } lua_setfield(L, -2, "values"); break; case CblasColMajor: lua_pushliteral(L, "colmajor"); lua_setfield(L, -2, "order"); lua_createtable(L, X->cols, 0); for (i = 0; i < X->cols; i++) { lua_createtable(L, X->rows, 0); value = &X->values[(size_t)i * X->ld]; for (j = 0; j < X->rows; j++) { lua_pushnumber(L, *value++); lua_rawseti(L, -2, j + 1); } lua_rawseti(L, -2, i + 1); } lua_setfield(L, -2, "values"); break; } return 1; } return argerror(L, 1); } /* converts a table to a vector or matrix */ static int tolinear (lua_State *L) { static const char *types[] = { "vector", "matrix", NULL }; static const char *orders[] = { "rowmajor", "colmajor", NULL }; struct vector *x; struct matrix *X; int size, rows, cols, major, minor; CBLAS_ORDER order; int i, j; int isnum; float *value; /* check arguments */ luaL_checktype(L, 1, LUA_TTABLE); lua_settop(L, 1); /* handle types */ switch (optionvalue(L, "type", NULL, types)) { case 0: /* vector */ size = intvalue(L, "length", -1); if (size < 1) { return luaL_error(L, "bad field " LUA_QS, "length"); } x = newvector(L, size); lua_getfield(L, 1, "values"); if (lua_type(L, -1) != LUA_TTABLE) { return luaL_error(L, "bad field " LUA_QS, "values"); } value = x->values; for (i = 0; i < size; i++) { lua_rawgeti(L, -1, i + 1); *value++ = lua_tonumberx(L, -1, &isnum); if (!isnum) { return luaL_error(L, "bad value at index %d", i + 1); } lua_pop(L, 1); } lua_pop(L, 1); return 1; case 1: /* matrix */ rows = intvalue(L, "rows", -1); if (rows < 1) { return luaL_error(L, "bad field " LUA_QS, "rows"); } cols = intvalue(L, "cols", -1); if (cols < 1) { return luaL_error(L, "bad field " LUA_QS, "cols"); } switch (optionvalue(L, "order", NULL, orders)) { case 0: order = CblasRowMajor; major = rows; minor = cols; break; case 1: order = CblasColMajor; major = cols; minor = rows; break; default: /* not reched */ assert(0); return 0; } X = newmatrix(L, rows, cols, order); lua_getfield(L, 1, "values"); if (lua_type(L, -1) != LUA_TTABLE) { return luaL_error(L, "bad field " LUA_QS, "values"); } for (i = 0; i < major; i++) { value = &X->values[i * X->ld]; lua_rawgeti(L, -1, i + 1); if (lua_type(L, -1) != LUA_TTABLE) { return luaL_error(L, "bad value at index %d", i + 1); } for (j = 0; j < minor; j++) { lua_rawgeti(L, -1, j + 1); *value++ = lua_tonumberx(L, -1, &isnum); if (!isnum) { return luaL_error(L, "bad value at " "index (%d,%d)", i + 1, j + 1); } lua_pop(L, 1); } lua_pop(L, 1); } lua_pop(L, 1); return 1; } /* not reached */ assert(0); return 0; } /* invokes the DOT subprogram (x' y) */ static int dot (lua_State *L) { struct vector *x, *y; float dot; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); luaL_argcheck(L, y->size == x->size, 2, "dimension mismatch"); /* invoke subprogram */ dot = cblas_sdot(x->size, x->values, x->inc, y->values, y->inc); lua_pushnumber(L, dot); return 1; } /* invokes the NRM2 subprogram (||x||_2) */ static int nrm2 (lua_State *L) { struct vector *x; float nrm2; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); /* invoke subprogram */ nrm2 = cblas_snrm2(x->size, x->values, x->inc); lua_pushnumber(L, nrm2); return 1; } /* invokes the ASUM subprogram (sigma |x|) */ static int asum (lua_State *L) { struct vector *x; float asum; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); /* invoke subprogram */ asum = cblas_sasum(x->size, x->values, x->inc); lua_pushnumber(L, asum); return 1; } /* invokes the IAMAX subprogram (argmax |x|) */ static int iamax (lua_State *L) { struct vector *x; int iamax; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); /* invoke subprogram */ iamax = cblas_isamax(x->size, x->values, x->inc); lua_pushinteger(L, iamax + 1); return 1; } /* sum implementation */ static float _sum (const float *values, int size, int inc) { float sum; int i; sum = 0.0; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) reduction(+:sum) for (i = 0; i < size; i++) { sum += values[(size_t)i * inc]; } return sum; } /* sum implementation (sigma x_i) */ static int sum (lua_State *L) { struct vector *x, *y; struct matrix *X; int i; /* check and process arguments */ x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { lua_pushnumber(L, _sum(x->values, x->size, x->inc)); return 1; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); switch (checktranspose(L, 3)) { case CblasNoTrans: switch (X->order) { case CblasRowMajor: luaL_argcheck(L, y->size == X->rows, 2, "dimension mismatch"); for (i = 0; i < X->rows; i++) { y->values[(size_t)i * y->inc] = _sum( &X->values[(size_t)i * X->ld], X->cols, 1); } break; case CblasColMajor: luaL_argcheck(L, y->size == X->cols, 2, "dimension mismatch"); for (i = 0; i < X->cols; i++) { y->values[(size_t)i * y->inc] = _sum( &X->values[(size_t)i * X->ld], X->rows, 1); } break; } break; case CblasTrans: switch (X->order) { case CblasRowMajor: luaL_argcheck(L, y->size == X->cols, 2, "dimension mismatch"); for (i = 0; i < X->cols; i++) { y->values[(size_t)i * y->inc] = _sum( &X->values[(size_t)i], X->rows, X->ld); } break; case CblasColMajor: luaL_argcheck(L, y->size == X->rows, 2, "dimension mismatch"); for (i = 0; i < X->rows; i++) { y->values[(size_t)i * y->inc] = _sum( &X->values[(size_t)i], X->cols, X->ld); } break; } break; default: /* not reached */ assert(0); break; } return 0; } return argerror(L, 1); } /* xy function */ typedef void(*xyfunction)(int, float *, int, float *, int, float); /* invokes an (x,y) subproram */ static int xy (lua_State *L, xyfunction s, int hasy, int hasalpha) { int index, i; float alpha; struct vector *x, *y; struct matrix *X, *Y; /* check and process arguments */ index = 2; x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { if (hasy) { y = luaL_testudata(L, 2, LUALINEAR_VECTOR_METATABLE); Y = luaL_testudata(L, 2, LUALINEAR_MATRIX_METATABLE); if (y == NULL && Y == NULL) { return argerror(L, 2); } index++; } else { y = x; Y = NULL; } if (hasalpha) { alpha = luaL_optnumber(L, index, 1.0); index++; } else { alpha = 0.0; } if (y != NULL) { /* invoke subprogram on vector-vector */ luaL_argcheck(L, y->size == x->size, 2, "dimension mismatch"); s(x->size, x->values, x->inc, y->values, y->inc, alpha); return 0; } /* invoke subprogram on vector-matrix */ switch (checktranspose(L, index)) { case CblasNoTrans: switch (Y->order) { case CblasRowMajor: luaL_argcheck(L, 1, x->size == Y->cols, "dimension mismatch"); for (i = 0; i < Y->rows; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t)i * Y->ld], 1, alpha); } break; case CblasColMajor: luaL_argcheck(L, 1, x->size == Y->rows, "dimension mismatch"); for (i = 0; i < Y->cols; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t)i * Y->ld], 1, alpha); } break; } break; case CblasTrans: switch (Y->order) { case CblasRowMajor: luaL_argcheck(L, 1, x->size == Y->rows, "dimension mismatch"); for (i = 0; i < Y->rows; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t)i], Y->ld, alpha); } break; case CblasColMajor: luaL_argcheck(L, 1, x->size == Y->cols, "dimension mismatch"); for (i = 0; i < Y->cols; i++) { s(x->size, x->values, x->inc, &Y->values[(size_t)i], Y->ld, alpha); } break; } break; default: /* not reached */ assert(0); } return 0; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { if (hasy) { Y = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, X->order == Y->order, 2, "order mismatch"); luaL_argcheck(L, X->rows == Y->rows && X->cols == Y->cols, 2, "dimension mismatch"); index++; } else { Y = X; } if (hasalpha) { alpha = luaL_optnumber(L, index, 1.0); index++; } else { alpha = 0.0; } /* invoke subprogram on matrix-matrix */ switch (X->order) { case CblasRowMajor: for (i = 0; i < X->rows; i++) { s(X->cols, &X->values[(size_t)i * X->ld], 1, &Y->values[(size_t)i * Y->ld], 1, alpha); } break; case CblasColMajor: for (i = 0; i < X->cols; i++) { s(X->rows, &X->values[(size_t)i * X->ld], 1, &Y->values[(size_t)i * Y->ld], 1, alpha); } break; } return 0; } return argerror(L, 1); } /* wraps the SWAP subprogram */ static void _swap (int size, float *x, int incx, float *y, int incy, float alpha) { (void)alpha; cblas_sswap(size, x, incx, y, incy); } /* invokes the SWAP subprogram (y <-> x) */ static int swap (lua_State *L) { return xy(L, _swap, 1, 0); } /* wraps the COPY subprogram */ static void _copy (int size, float *x, int incx, float *y, int incy, float alpha) { (void)alpha; cblas_scopy(size, x, incx, y, incy); } /* invokes the COPY subprogram (y <- x) */ static int copy (lua_State *L) { return xy(L, _copy, 1, 0); } /* wraps the AXPY subprogram */ static void _axpy (int size, float *x, int incx, float *y, int incy, float alpha) { cblas_saxpy(size, alpha, x, incx, y, incy); } /* invokes the AXPY subprogram (y <- alpha x + y) */ static int axpy (lua_State *L) { return xy(L, _axpy, 1, 1); } /* wraps the SCAL subprogram */ static void _scal (int size, float *x, int incx, float *y, int incy, float alpha) { (void)y; (void)incy; cblas_sscal(size, alpha, x, incx); } /* invokes the SCAL subprogram (x <- alpha x) */ static int scal (lua_State *L) { return xy(L, _scal, 0, 1); } /* set operation implementation */ static void _set (int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { x[(size_t)i * incx] = alpha; } } /* performs a set operation (x <- alpha) */ static int set (lua_State *L) { return xy(L, _set, 0, 1); } /* uniform RNG implementation */ static void _uniform (int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; (void)alpha; for (i = 0; i < size; i++) { *x = (float)random() * (1.0 / ((float)RAND_MAX + 1.0)); x += incx; } } /* performs a uniform operation (x <- uniform) */ static int uniform (lua_State *L) { return xy(L, _uniform, 0, 0); } /* normal RNG implementation */ static void _normal (int size, float *x, int incx, float *y, int incy, float alpha) { int i; float u1, u2, r, s, c; (void)y; (void)incy; (void)alpha; for (i = 0; i < size - 1; i += 2) { do { u1 = (float)random() * (1.0 / (float)RAND_MAX); u2 = (float)random() * (1.0 / (float)RAND_MAX); } while (u1 <= -DBL_MAX); r = sqrt(-2.0 * logf(u1)); sincosf(2 * M_PI * u2, &s, &c); *x = r * c; x += incx; *x = r * s; x += incx; } if (i < size) { do { u1 = (float)random() * (1.0 / (float)RAND_MAX); u2 = (float)random() * (1.0 / (float)RAND_MAX); } while (u1 <= -DBL_MAX); *x = sqrtf(-2.0 * logf(u1)) * cosf(2 * M_PI * u2); x += incx; } } /* performs a normal operation (x <- normal) */ static int normal (lua_State *L) { return xy(L, _normal, 0, 0); } /* inc operation implementation */ static void _inc (int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { x[(size_t)i * incx] += alpha; } } /* performs a inc operation (x <- x + alpha) */ static int inc (lua_State *L) { return xy(L, _inc, 0, 1); } /* element-wise multiplication implementation, alpha = 1 */ static void _mul1 (int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)alpha; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { y[(size_t)i * incy] *= x[(size_t)i * incx]; } } /* element-wise multiplication implementation, alpha = -1 */ static void _mulm1 (int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)alpha; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { y[(size_t)i * incy] /= x[(size_t)i * incx]; } } /* element-wise multiplication implementation, alpha = any */ static void _mul (int size, float *x, int incx, float *y, int incy, float alpha) { int i; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { y[(size_t)i * incy] *= pow(x[(size_t)i * incx], alpha); } } /* performs element-wise multiplication (y <- x^alpha .* y) */ static int mul (lua_State *L) { float alpha; alpha = luaL_optnumber(L, 3, 1.0); if (alpha == 1.0) { return xy(L, _mul1, 1, 1); } if (alpha == -1.0) { return xy(L, _mulm1, 1, 1); } return xy(L, _mul, 1, 1); } /* power raising operation implementation */ static void _pow (int size, float *x, int incx, float *y, int incy, float alpha) { int i; (void)y; (void)incy; #pragma omp parallel for private(i) schedule(auto) \ if(size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < size; i++) { x[(size_t)i * incx] = pow(x[(size_t)i * incx], alpha); } } /* performs element-wise power raising (x <- x^alpha) */ static int powx (lua_State *L) { return xy(L, _pow, 0, 1); } /* apply function */ typedef float(*applyfunction)(float); /* applies a function to a value */ static int apply (lua_State *L, applyfunction apply, int parallel) { struct vector *x; struct matrix *X; int i, j; size_t base; if (lua_type(L, 1) == LUA_TNUMBER) { lua_pushnumber(L, apply(lua_tonumber(L, 1))); return 1; } x = luaL_testudata(L, 1, LUALINEAR_VECTOR_METATABLE); if (x != NULL) { #pragma omp parallel for private(i) schedule(auto) \ if(parallel && x->size >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < x->size; i++) { x->values[(size_t)i * x->inc] = apply(x->values[(size_t)i * x->inc]); } return 0; } X = luaL_testudata(L, 1, LUALINEAR_MATRIX_METATABLE); if (X != NULL) { switch (X->order) { case CblasRowMajor: for (i = 0; i < X->rows; i++) { base = (size_t)i * X->ld; #pragma omp parallel for private(j) \ schedule(auto) \ if(parallel && X->cols \ >= LUALINEAR_OMP_MINSIZE) for (j = 0; j < X->cols; j++) { X->values[base + j] = apply( X->values[base + j]); } } break; case CblasColMajor: for (i = 0; i < X->cols; i++) { base = (size_t)i * X->ld; #pragma omp parallel for private(j) \ schedule(auto) \ if(parallel && X->rows \ >= LUALINEAR_OMP_MINSIZE) for (j = 0; j < X->rows; j++) { X->values[base + j] = apply( X->values[base + j]); } } break; } return 0; } return luaL_argerror(L, 1, lua_pushfstring(L, "number, vector, or " "matrix expected, got %s", luaL_typename(L, 1))); } /* sign function implementation */ static float _sign (float x) { if (x > 0) { return 1; } if (x < 0) { return -1; } return x; } /* sign function */ static int sign (lua_State *L) { return apply(L, _sign, 1); } /* abs function implementation */ static float _abs (float x) { return fabs(x); } /* abs function */ static int absx (lua_State *L) { return apply(L, _abs, 1); } /* exp function */ static int expx (lua_State *L) { return apply(L, expf, 1); } /* log function */ static int logx (lua_State *L) { return apply(L, logf, 1); } /* logistic function implementation */ static float _logistic (float z) { return 1.0 / (1.0 + expf(-z)); } /* logistic function */ static int logistic (lua_State *L) { return apply(L, _logistic, 1); } /* tanh function */ static int tanhx (lua_State *L) { return apply(L, tanhf, 1); } /* softplus function implementation */ static float _softplus (float x) { return logf(1 + expf(x)); } /* softplus function */ static int softplus (lua_State *L) { return apply(L, _softplus, 1); } /* rectifier function implementation */ static float _rectifier (float x) { return x > 0.0 ? x : 0.0; } /* rectifier function */ static int rectifier (lua_State *L) { return apply(L, _rectifier, 1); } /* current Lua state */ static __thread lua_State *TL; /* apply function implementation */ static float _apply (float x) { float result; lua_pushvalue(TL, -1); lua_pushnumber(TL, x); lua_call(TL, 1, 1); result = lua_tonumber(TL, -1); lua_pop(TL, 1); return result; } /* apply function */ static int applyx (lua_State *L) { luaL_checktype(L, 2, LUA_TFUNCTION); lua_settop(L, 2); TL = L; return apply(L, _apply, 0); } /* invokes the GEMV subprogram (y <- alpha A x + b y) */ static int gemv (lua_State *L) { struct matrix *A; struct vector *x, *y; float alpha, beta; CBLAS_TRANSPOSE ta; int m, n; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); x = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); y = luaL_checkudata(L, 3, LUALINEAR_VECTOR_METATABLE); alpha = luaL_optnumber(L, 4, 1.0); beta = luaL_optnumber(L, 5, 0.0); ta = checktranspose(L, 6); m = ta == CblasNoTrans ? A->rows : A->cols; n = ta == CblasNoTrans ? A->cols : A->rows; luaL_argcheck(L, x->size == n, 2, "dimension mismatch"); luaL_argcheck(L, y->size == m, 3, "dimension mismatch"); /* invoke subprogram */ cblas_sgemv(A->order, ta, A->rows, A->cols, alpha, A->values, A->ld, x->values, x->inc, beta, y->values, y->inc); return 0; } /* invokes the GER subprogram (A <- alpha x y' + A) */ static int ger (lua_State *L) { struct vector *x, *y; struct matrix *A; float alpha; /* check and process arguments */ x = luaL_checkudata(L, 1, LUALINEAR_VECTOR_METATABLE); y = luaL_checkudata(L, 2, LUALINEAR_VECTOR_METATABLE); A = luaL_checkudata(L, 3, LUALINEAR_MATRIX_METATABLE); alpha = luaL_optnumber(L, 4, 1.0); luaL_argcheck(L, x->size == A->rows, 1, "dimension mismatch"); luaL_argcheck(L, y->size == A->cols, 2, "dimension mismatch"); /* invoke subprogram */ cblas_sger(A->order, A->rows, A->cols, alpha, x->values, x->inc, y->values, y->inc, A->values, A->ld); return 0; } /* invokes the GEMM subprogram (C <- alpha A B + beta C) */ static int gemm (lua_State *L) { struct matrix *A, *B, *C; float alpha, beta; CBLAS_TRANSPOSE ta, tb; int m, n, ka, kb; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, B->order == A->order, 2, "order mismatch"); C = luaL_checkudata(L, 3, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, C->order == A->order, 3, "order mismatch"); alpha = luaL_optnumber(L, 4, 1.0); beta = luaL_optnumber(L, 5, 0.0); ta = checktranspose(L, 6); tb = checktranspose(L, 7); m = ta == CblasNoTrans ? A->rows : A->cols; n = tb == CblasNoTrans ? B->cols : B->rows; ka = ta == CblasNoTrans ? A->cols : A->rows; kb = tb == CblasNoTrans ? B->rows : B->cols; luaL_argcheck(L, ka == kb, 2, "dimension mismatch"); /* invoke subprogramm */ cblas_sgemm(A->order, ta, tb, m, n, ka, alpha, A->values, A->ld, B->values, B->ld, beta, C->values, C->ld); return 0; } /* invokes the GESV subprogram */ static int gesv (lua_State *L) { struct matrix *A, *B; int *ipiv, result; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->rows == A->cols, 1, "not square"); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, B->order == A->order, 2, "order mismatch"); luaL_argcheck(L, B->rows == A->rows, 2, "dimension mismatch"); /* invoke subprogramm */ ipiv = calloc(A->rows, sizeof(lapack_int)); if (ipiv == NULL) { return luaL_error(L, "cannot allocate indexes"); } result = LAPACKE_sgesv(A->order, A->rows, B->cols, A->values, A->ld, ipiv, B->values, B->ld); free(ipiv); lua_pushinteger(L, result); return 1; } /* invokes the GELS subprogram */ static int gels (lua_State *L) { struct matrix *A, *B; char ta; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, B->order == A->order, 2, "order mismatch"); ta = lapacktranspose(checktranspose(L, 3)); luaL_argcheck(L, B->rows == (A->rows >= A->cols ? A->rows : A->cols), 2, "dimension mismatch"); /* invoke subprogramm */ lua_pushinteger(L, LAPACKE_sgels(A->order, ta, A->rows, A->cols, B->cols, A->values, A->ld, B->values, B->ld)); return 1; } /* calculates the inverse of a matrix */ static int inv (lua_State *L) { struct matrix *A; int *ipiv, result; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->rows == A->cols, 1, "not square"); /* invoke subprograms */ ipiv = calloc(A->rows, sizeof(lapack_int)); if (ipiv == NULL) { return luaL_error(L, "cannot allocate indexes"); } result = LAPACKE_sgetrf(A->order, A->rows, A->cols, A->values, A->ld, ipiv); if (result != 0) { free(ipiv); lua_pushinteger(L, result); return 1; } result = LAPACKE_sgetri(A->order, A->rows, A->values, A->ld, ipiv); free(ipiv); lua_pushinteger(L, result); return 1; } /* calculates the determinant of a matrix */ static int det (lua_State *L) { struct matrix *A; float *copy, *d, *s, det; int n, *ipiv, result, neg, i; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->rows == A->cols, 1, "not square"); n = A->rows; /* copy matrix */ copy = calloc((size_t)n * n, sizeof(float)); if (copy == NULL) { return luaL_error(L, "cannot allocate values"); } d = copy; s = A->values; for (i = 0; i < n; i++) { memcpy(d, s, (size_t)n * sizeof(float)); d += n; s += A->ld; } /* invoke subprograms */ ipiv = calloc(n, sizeof(lapack_int)); if (ipiv == NULL) { free(copy); return luaL_error(L, "cannot allocate indexes"); } result = LAPACKE_sgetrf(A->order, n, n, copy, n, ipiv); if (result != 0) { free(copy); free(ipiv); lua_pushnumber(L, 0.0); return 1; } /* calculate determinant */ det = 1.0; neg = 0; for (i = 0; i < n; i++) { det *= copy[(size_t)i * n + i]; if (ipiv[i] != i + 1) { neg = !neg; } } free(copy); free(ipiv); lua_pushnumber(L, neg ? -det : det); return 1; } /* calculates the covariance of a matrix */ static int cov (lua_State *L) { struct matrix *A, *B; int ddof, i, j, k; float *means, *v, *vi, *vj, sum; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->cols == B->rows, 2, "dimension mismatch"); luaL_argcheck(L, B->rows == B->cols, 2, "not square"); ddof = luaL_optinteger(L, 3, 0); luaL_argcheck(L, ddof >= 0 && ddof < A->rows, 3, "bad ddof"); /* calculate means */ means = calloc((size_t)A->cols, sizeof(float)); if (means == NULL) { return luaL_error(L, "cannot allocate values"); } switch (A->order) { case CblasRowMajor: #pragma omp parallel for private(i, j, sum, v) schedule(auto) \ if(A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[i]; for (j = 0; j < A->rows; j++) { sum += *v; v += A->ld; } means[i] = sum / A->rows; } break; case CblasColMajor: #pragma omp parallel for private(i, j, sum, v) schedule(auto) \ if(A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[(size_t)i * A->ld]; for (j = 0; j < A->rows; j++) { sum += *v; v++; } means[i] = sum / A->rows; } break; } /* calculate covariance */ switch (A->order) { case CblasRowMajor: for (i = 0; i < A->cols; i++) { #pragma omp parallel for private(j, k, sum, vi, vj) \ schedule(auto) if(A->rows * (A->cols \ - i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[i]; vj = &A->values[j]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi += A->ld; vj += A->ld; } B->values[(size_t)i * B->ld + j] = B->values[ (size_t)j * B->ld + i] = sum / (A->rows - ddof); } } break; case CblasColMajor: for (i = 0; i < A->cols; i++) { #pragma omp parallel for private(j, k, sum, vi, vj) \ schedule(auto) if(A->rows * (A->cols \ - i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[(size_t)i * A->ld]; vj = &A->values[(size_t)j * A->ld]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi++; vj++; } B->values[(size_t)i * B->ld + j] = B->values[ (size_t)j * B->ld + i] = sum / (A->rows - ddof); } } break; } free(means); return 0; } /* calculates the correlation of a matrix */ static int corr (lua_State *L) { struct matrix *A, *B; int i, j, k; float *means, *stds, *v, *vi, *vj, sum; /* check and process arguments */ A = luaL_checkudata(L, 1, LUALINEAR_MATRIX_METATABLE); B = luaL_checkudata(L, 2, LUALINEAR_MATRIX_METATABLE); luaL_argcheck(L, A->cols == B->rows, 2, "dimension mismatch"); luaL_argcheck(L, B->rows == B->cols, 2, "not square"); /* calculate means and stds */ means = calloc((size_t)A->cols, sizeof(float)); if (means == NULL) { return luaL_error(L, "cannot allocate values"); } stds = calloc((size_t)A->cols, sizeof(float)); if (stds == NULL) { free(means); return luaL_error(L, "cannot allocate values"); } switch (A->order) { case CblasRowMajor: #pragma omp parallel for private(i, j, sum, v) schedule(auto) \ if(A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[i]; for (j = 0; j < A->rows; j++) { sum += *v; v += A->ld; } means[i] = sum / A->rows; sum = 0.0; v = &A->values[i]; for (j = 0; j < A->rows; j++) { sum += (*v - means[i]) * (*v - means[i]); v += A->ld; } stds[i] = sqrt(sum); } break; case CblasColMajor: #pragma omp parallel for private(i, j, sum, v) schedule(auto) \ if(A->rows * A->cols >= LUALINEAR_OMP_MINSIZE) for (i = 0; i < A->cols; i++) { sum = 0.0; v = &A->values[(size_t)i * A->ld]; for (j = 0; j < A->rows; j++) { sum += *v; v++; } means[i] = sum / A->rows; sum = 0.0; v = &A->values[(size_t)i * A->ld]; for (j = 0; j < A->rows; j++) { sum += (*v - means[i]) * (*v - means[i]); v++; } stds[i] = sqrt(sum); } break; } /* calculate correlation */ switch (A->order) { case CblasRowMajor: for (i = 0; i < A->cols; i++) { #pragma omp parallel for private(j, k, sum, vi, vj) \ schedule(auto) if(A->rows * (A->cols \ - i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[i]; vj = &A->values[j]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi += A->ld; vj += A->ld; } B->values[(size_t)i * B->ld + j] = B->values[ (size_t)j * B->ld + i] = sum / (stds[i] * stds[j]); } } break; case CblasColMajor: for (i = 0; i < A->cols; i++) { #pragma omp parallel for private(j, k, sum, vi, vj) \ schedule(auto) if(A->rows * (A->cols \ - i) >= LUALINEAR_OMP_MINSIZE) for (j = i; j < A->cols; j++) { sum = 0.0; vi = &A->values[(size_t)i * A->ld]; vj = &A->values[(size_t)j * A->ld]; for (k = 0; k < A->rows; k++) { sum += (*vi - means[i]) * (*vj - means[j]); vi++; vj++; } B->values[(size_t)i * B->ld + j] = B->values[ (size_t)j * B->ld + i] = sum / (stds[i] * stds[j]); } } break; } free(means); free(stds); return 0; } /* * Exported functions. */ int luaopen_linear (lua_State *L) { static const luaL_Reg FUNCTIONS[] = { { "vector", vector }, { "matrix", matrix }, { "type", type }, { "size", size }, { "tvector", tvector }, { "sub", sub }, { "unwind", unwind }, { "reshape", reshape }, { "totable", totable }, { "tolinear", tolinear }, { "dot", dot }, { "nrm2", nrm2 }, { "asum", asum }, { "iamax", iamax }, { "sum", sum }, { "swap", swap }, { "copy", copy }, { "axpy", axpy }, { "scal", scal }, { "set", set }, { "uniform", uniform }, { "normal", normal }, { "inc", inc }, { "mul", mul }, { "pow", powx }, { "sign", sign }, { "abs", absx }, { "exp", expx }, { "log", logx }, { "logistic", logistic }, { "tanh", tanhx }, { "softplus", softplus }, { "rectifier", rectifier }, { "apply", applyx }, { "gemv", gemv }, { "ger", ger }, { "gemm", gemm }, { "gesv", gesv }, { "gels", gels }, { "inv", inv }, { "det", det }, { "cov", cov }, { "corr", corr }, { NULL, NULL } }; /* register functions */ #if LUA_VERSION_NUM >= 502 luaL_newlib(L, FUNCTIONS); #else luaL_register(L, luaL_checkstring(L, 1), FUNCTIONS); #endif /* vector metatable */ luaL_newmetatable(L, LUALINEAR_VECTOR_METATABLE); lua_pushcfunction(L, vector_len); lua_setfield(L, -2, "__len"); lua_pushcfunction(L, vector_index); lua_setfield(L, -2, "__index"); lua_pushcfunction(L, vector_newindex); lua_setfield(L, -2, "__newindex"); lua_pushcfunction(L, vector_ipairs); lua_setfield(L, -2, "__ipairs"); lua_pushcfunction(L, vector_tostring); lua_setfield(L, -2, "__tostring"); lua_pushcfunction(L, vector_free); lua_setfield(L, -2, "__gc"); lua_pop(L, 1); /* matrix metatable */ luaL_newmetatable(L, LUALINEAR_MATRIX_METATABLE); lua_pushcfunction(L, matrix_len); lua_setfield(L, -2, "__len"); lua_pushcfunction(L, matrix_index); lua_setfield(L, -2, "__index"); lua_pushcfunction(L, matrix_ipairs); lua_setfield(L, -2, "__ipairs"); lua_pushcfunction(L, matrix_tostring); lua_setfield(L, -2, "__tostring"); lua_pushcfunction(L, matrix_free); lua_setfield(L, -2, "__gc"); lua_pop(L, 1); return 1; }
{ "alphanum_fraction": 0.5999652898, "avg_line_length": 23.8099173554, "ext": "c", "hexsha": "e04cb06194fd86400dee21c3fae83e5be8cbd61e", "lang": "C", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2022-03-14T09:33:02.000Z", "max_forks_repo_forks_event_min_datetime": "2020-08-05T23:58:02.000Z", "max_forks_repo_head_hexsha": "6bf65edd89ae89589f6d4ae76ddc7f2f934e5a79", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "anaef/lua-linear", "max_forks_repo_path": "linear.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "6bf65edd89ae89589f6d4ae76ddc7f2f934e5a79", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "anaef/lua-linear", "max_issues_repo_path": "linear.c", "max_line_length": 74, "max_stars_count": 4, "max_stars_repo_head_hexsha": "6bf65edd89ae89589f6d4ae76ddc7f2f934e5a79", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "anaef/lua-linear", "max_stars_repo_path": "linear.c", "max_stars_repo_stars_event_max_datetime": "2022-03-14T09:33:01.000Z", "max_stars_repo_stars_event_min_datetime": "2019-07-21T00:09:52.000Z", "num_tokens": 15951, "size": 46096 }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include "parmt_utils.h" #ifdef PARMT_USE_INTEL #include <mkl_cblas.h> #else #include <cblas.h> #endif #include "iscl/memory/memory.h" /*! * @brief Converts the NED based fundamental faults to Green's functions which * scale the NED based moment tensor terms s.t. \f$ u = G m \f$. * where \f$ m = \{ m_{xx}, m_{yy}, m_{zz}, m_{xy}, m_{xz}, m_{yz} \} \f$ * are the NED moment tensor terms and \f$ u \f$ is the estimate for * the icomp'th component with source receiver azimuth, az, and given * channel orientation encapsulated by cmpaz and cmpinc. * * @param[in] npgrns number of points in greens functions * @param[in] ldg leading dimension of G (>= 6) * @param[in] icomp =1 for vertical Green's functions. * =2 for north (channel 2) Green's functions. * =3 for east (channel 3) Green's functions. * @param[in] azin source to receiver azimuth (degrees) * @param[in] bazin receiver to source azimuth (degrees). if you * do not know then set this to: * fmod(az + 180.0, 360.0) * @param[in] cmpaz component azimuth (0 north, +90 east) * @param[in] cmpinc component inclinantion (-90 up, 0 east/north, +90 down) * @param[in] ZDS vertical greens fn for 90 degree dip slip [npgrns] * @param[in] ZSS vertical greens fn for vertical strike slip [npgrns] * @param[in] ZDD vertical greens fn for 45 degree dip slip [npgrns] * @param[in] ZEX vertical greens fn for an explosion [npgrns] * @param[in] RDS radial greens fn for 90 degree dip slip [npgrns] * @param[in] RSS radial greens fn for vertical strike slip [npgrns] * @param[in] RDD radial greens fn for 45 degree dip slip [npgrns] * @param[in] REX radial greens fn for an explosion [npgrns] * @param[in] TDS transverse greens fn for 90 degree dip slip [npgrns] * @param[in] TSS transverse greens fn for vertical strike slip [npgrns] * * @param[out] G row major Green's functions matrix [npgrns x ldg]. * the columns are ordered: * \f$ \{ G_{xx}, G_{yy}, G_{zz}, * G_{xy}, G_{xz}, G_{yz} \} \f$ * * @result 0 indicates success * * @author Ben Baker * * @copyright ISTI distributed under Apache 2 * */ int parmt_utils_ff2mtGreensMatrix64f(const int npgrns, const int ldg, const int icomp, const double azin, const double bazin, const double cmpaz, const double cmpinc, const double *__restrict__ ZDS, const double *__restrict__ ZSS, const double *__restrict__ ZDD, const double *__restrict__ ZEX, const double *__restrict__ RDS, const double *__restrict__ RSS, const double *__restrict__ RDD, const double *__restrict__ REX, const double *__restrict__ TDS, const double *__restrict__ TSS, double *__restrict__ G) { double *Gxx, *Gyy, *Gzz, *Gxy, *Gxz, *Gyz; int ierr; ierr = 0; if (npgrns < 1 || ldg < 6 || G == NULL) { if (npgrns < 1) { fprintf(stderr, "%s: No points in greens fns\n", __func__); } if (ldg < 6){fprintf(stderr, "%s: ldg must be >= 6\n", __func__);} if (G == NULL){fprintf(stderr, "%s: Error G is NULL\n", __func__);} return -1; } Gxx = memory_calloc64f(npgrns); Gyy = memory_calloc64f(npgrns); Gzz = memory_calloc64f(npgrns); Gxy = memory_calloc64f(npgrns); Gxz = memory_calloc64f(npgrns); Gyz = memory_calloc64f(npgrns); ierr = parmt_utils_ff2mtGreens64f(npgrns, icomp, azin, bazin, cmpaz, cmpinc, ZDS, ZSS, ZDD, ZEX, RDS, RSS, RDD, REX, TDS, TSS, Gxx, Gyy, Gzz, Gxy, Gxz, Gyz); if (ierr != 0) { fprintf(stderr, "%s: Failed to compute greens functions columns\n", __func__); } else { cblas_dcopy(npgrns, Gxx, 1, &G[0], ldg); cblas_dcopy(npgrns, Gyy, 1, &G[1], ldg); cblas_dcopy(npgrns, Gzz, 1, &G[2], ldg); cblas_dcopy(npgrns, Gxy, 1, &G[3], ldg); cblas_dcopy(npgrns, Gxz, 1, &G[4], ldg); cblas_dcopy(npgrns, Gyz, 1, &G[5], ldg); } memory_free64f(&Gxx); memory_free64f(&Gyy); memory_free64f(&Gzz); memory_free64f(&Gxy); memory_free64f(&Gxz); memory_free64f(&Gyz); return ierr; } //============================================================================// /*! * @brief Converts the NED based fundamental faults to Green's functions which * scale the NED based moment tensor terms s.t. \f$ u = G m \f$. * where \f$ m = \{ m_{xx}, m_{yy}, m_{zz}, m_{xy}, m_{xz}, m_{yz} \} \f$ * are the NED moment tensor terms and \f$ u \f$ is the estimate for * the icomp'th component with source receiver azimuth, az, and given * channel orientation encapsulated by cmpaz and cmpinc. * * @param[in] npgrns number of points in greens functions * @param[in] icomp =1 for vertical Green's functions. * =2 for north (channel 2) Green's functions. * =3 for east (channel 3) Green's functions. * @param[in] azin source to receiver azimuth (degrees) * @param[in] bazin receiver to source azimuth (degrees). if you * do not know then set this to: * fmod(az + 180.0, 360.0) * @param[in] cmpaz component azimuth (0 north, +90 east) * @param[in] cmpinc component inclinantion (-90 up, 0 east/north, +90 down) * @param[in] ZDS vertical greens fn for 90 degree dip slip [npgrns] * @param[in] ZSS vertical greens fn for vertical strike slip [npgrns] * @param[in] ZDD vertical greens fn for 45 degree dip slip [npgrns] * @param[in] ZEX vertical greens fn for an explosion [npgrns] * @param[in] RDS radial greens fn for 90 degree dip slip [npgrns] * @param[in] RSS radial greens fn for vertical strike slip [npgrns] * @param[in] RDD radial greens fn for 45 degree dip slip [npgrns] * @param[in] REX radial greens fn for an explosion [npgrns] * @param[in] TDS transverse greens fn for 90 degree dip slip [npgrns] * @param[in] TSS transverse greens fn for vertical strike slip [npgrns] * * @param[out] Gxx greens function scaling mxx moment tensor term [npgrns] * @param[out] Gyy greens function scaling myy moment tensor term [npgrns] * @param[out] Gzz greens function scaling mzz moment tensor term [npgrns] * @param[out] Gxy greens function scaling mxy moment tensor term [npgrns] * @param[out] Gxz greens function scaling mxz moment tensor term [npgrns] * @param[out] Gyz greens function scaling myz moment tensor term [npgrns] * * @result 0 indicates success * * @author Ben Baker * * @copyright ISTI distributed under Apache 2 * */ int parmt_utils_ff2mtGreens64f(const int npgrns, const int icomp, const double azin, const double bazin, const double cmpaz, const double cmpinc, const double *__restrict__ ZDS, const double *__restrict__ ZSS, const double *__restrict__ ZDD, const double *__restrict__ ZEX, const double *__restrict__ RDS, const double *__restrict__ RSS, const double *__restrict__ RDD, const double *__restrict__ REX, const double *__restrict__ TDS, const double *__restrict__ TSS, double *__restrict__ Gxx, double *__restrict__ Gyy, double *__restrict__ Gzz, double *__restrict__ Gxy, double *__restrict__ Gxz, double *__restrict__ Gyz) { double az, baz, c2a, ca, caz, cos_caz, cost, gxx_e, gxy_e, gxz_e, gyy_e, gyz_e, gzz_e, gxx_n, gxy_n, gxz_n, gyy_n, gyz_n, gzz_n, gxx_r, gxy_r, gxz_r, gyy_r, gyz_r, gzz_r, gxx_t, gxy_t, gxz_t, gyy_t, gyz_t, gzz_t, gxx_z, gxy_z, gxz_z, gyy_z, gyz_z, gzz_z, rex, rdd, rds, rss, s2a, sa, sin_caz, sint, tds, tss, xscal, zex, zdd, zds, zss; int i; const double pi180 = M_PI/180.0; const double half = 0.5; const double sixth = 1.0/6.0; const double third = 1.0/3.0; //------------------------------------------------------------------------// if (icomp < 1 || icomp > 3) { fprintf(stderr, "%s: Invalid component: %d\n", __func__, icomp); return -1; } if (icomp == 1) { if (ZDS == NULL || ZSS == NULL || ZDD == NULL || ZEX == NULL) { if (ZDS == NULL){fprintf(stderr, "%s: zds is NULL\n", __func__);} if (ZSS == NULL){fprintf(stderr, "%s: zss is NULL\n", __func__);} if (ZDD == NULL){fprintf(stderr, "%s: zdd is NULL\n", __func__);} if (ZEX == NULL){fprintf(stderr, "%s: zex is NULL\n", __func__);} return -1; } } else { if (RDS == NULL || RSS == NULL || RDD == NULL || REX == NULL || TDS == NULL || TSS == NULL) { if (RDS == NULL){fprintf(stderr, "%s: rds is NULL\n", __func__);} if (RSS == NULL){fprintf(stderr, "%s: rss is NULL\n", __func__);} if (RDD == NULL){fprintf(stderr, "%s: rdd is NULL\n", __func__);} if (REX == NULL){fprintf(stderr, "%s: rex is NULL\n", __func__);} if (TDS == NULL){fprintf(stderr, "%s: tds is NULL\n", __func__);} if (TSS == NULL){fprintf(stderr, "%s: tss is NULL\n", __func__);} return -1; } } // Get the backazimuth az = azin; baz = bazin; //fmod(az + 180.0, 360.0); // Convert to radians az = az*pi180; baz = baz*pi180; caz = cmpaz*pi180; // Compute the geometric factors sa = sin(az); ca = cos(az); s2a = sin(2.*az); c2a = cos(2.*az); sint = sin(baz); cost = cos(baz); cos_caz = cos(caz); sin_caz = sin(caz); // the cmpaz will come from the metadata so fix that if (icomp == 3) { cos_caz = cos(caz - M_PI/2.0); sin_caz = sin(caz - M_PI/2.0); } // Set the columns (Minson et. al. 2008) xscal = 1.0; if (fabs(cmpinc - 90.0) < 1.e-4){xscal =-1.0;} if (icomp == 1) { #pragma omp simd for (i=0; i<npgrns; i++) { zss = ZSS[i]; zdd = ZDD[i]; zds = ZDS[i]; zex = ZEX[i]; // compute the greens function corresponding to the mt gxx_z = half*(zss*c2a) - sixth*zdd + third*zex; gyy_z =-half*(zss*c2a) - sixth*zdd + third*zex; gzz_z = third*(zdd + zex); gxy_z = zss*s2a; gxz_z = zds*ca; gyz_z = zds*sa; // copy and include polarity so it matches the observation Gxx[i] = xscal*gxx_z; Gyy[i] = xscal*gyy_z; Gzz[i] = xscal*gzz_z; Gxy[i] = xscal*gxy_z; Gxz[i] = xscal*gxz_z; Gyz[i] = xscal*gyz_z; } // Loop on points } else { // North or 2 component if (icomp == 2) { // Loop on rows (data points) //#pragma omp simd for (i=0; i<npgrns; i++) { rss = RSS[i]; rdd = RDD[i]; rds = RDS[i]; rex = REX[i]; tds = TDS[i]; tss = TSS[i]; gxx_r = half*(rss*c2a) - sixth*rdd + third*rex; gyy_r =-half*(rss*c2a) - sixth*rdd + third*rex; gzz_r = third*(rdd + rex); gxy_r = rss*s2a; gxz_r = rds*ca; gyz_r = rds*sa; gxx_t = half*tss*s2a; gyy_t =-half*tss*s2a; gzz_t = 0.0; gxy_t =-tss*c2a; gxz_t = tds*sa; gyz_t =-tds*ca; // Rotate from (r, t) -> (n, e) // n = cos(baz-180)*r - sin(baz-180)*t // =-cos(baz)*r + sin(baz)*t // e = sin(baz-180)*r + cos(baz-180)*t // =-sin(baz)*r - cos(baz)*t gxx_n =-cost*gxx_r + sint*gxx_t; gxx_e =-sint*gxx_r - cost*gxx_t; gyy_n =-cost*gyy_r + sint*gyy_t; gyy_e =-sint*gyy_r - cost*gyy_t; gzz_n =-cost*gzz_r + sint*gzz_t; gzz_e =-sint*gzz_r - cost*gzz_t; gxy_n =-cost*gxy_r + sint*gxy_t; gxy_e =-sint*gxy_r - cost*gxy_t; gxz_n =-cost*gxz_r + sint*gxz_t; gxz_e =-sint*gxz_r - cost*gxz_t; gyz_n =-cost*gyz_r + sint*gyz_t; gyz_e =-sint*gyz_r - cost*gyz_t; // Rotate from (n, e) -> (1, 2) around az // 1 = cos(comp_az)*n + sin(comp_az)*e Gxx[i] = cos_caz*gxx_n + sin_caz*gxx_e; Gyy[i] = cos_caz*gyy_n + sin_caz*gyy_e; Gzz[i] = cos_caz*gzz_n + sin_caz*gzz_e; Gxy[i] = cos_caz*gxy_n + sin_caz*gxy_e; Gxz[i] = cos_caz*gxz_n + sin_caz*gxz_e; Gyz[i] = cos_caz*gyz_n + sin_caz*gyz_e; } // Loop on points } // East or 3 component else { // Loop on rows (data points) #pragma omp simd for (i=0; i<npgrns; i++) { rss = RSS[i]; rdd = RDD[i]; rds = RDS[i]; rex = REX[i]; tds = TDS[i]; tss = TSS[i]; gxx_r = half*(rss*c2a) - sixth*rdd + third*rex; gyy_r =-half*(rss*c2a) - sixth*rdd + third*rex; gzz_r = third*(rdd + rex); gxy_r = rss*s2a; gxz_r = rds*ca; gyz_r = rds*sa; gxx_t = half*tss*s2a; gyy_t =-half*tss*s2a; gzz_t = 0.0; gxy_t =-tss*c2a; gxz_t = tds*sa; gyz_t =-tds*ca; // Rotate from (r, t) -> (n, e) // n = cos(baz-180)*r - sin(baz-180)*t // =-cos(baz)*r + sin(baz)*t // e = sin(baz-180)*r + cos(baz-180)*t // =-sin(baz)*r - cos(baz)*t gxx_n =-cost*gxx_r + sint*gxx_t; gxx_e =-sint*gxx_r - cost*gxx_t; gyy_n =-cost*gyy_r + sint*gyy_t; gyy_e =-sint*gyy_r - cost*gyy_t; gzz_n =-cost*gzz_r + sint*gzz_t; gzz_e =-sint*gzz_r - cost*gzz_t; gxy_n =-cost*gxy_r + sint*gxy_t; gxy_e =-sint*gxy_r - cost*gxy_t; gxz_n =-cost*gxz_r + sint*gxz_t; gxz_e =-sint*gxz_r - cost*gxz_t; gyz_n =-cost*gyz_r + sint*gyz_t; gyz_e =-sint*gyz_r - cost*gyz_t; // Rotate from (n, e) -> (1, 2) around az // 2 =-sin(comp_az)*n + cos(comp_az)*e Gxx[i] =-sin_caz*gxx_n + cos_caz*gxx_e; Gyy[i] =-sin_caz*gyy_n + cos_caz*gyy_e; Gzz[i] =-sin_caz*gzz_n + cos_caz*gzz_e; Gxy[i] =-sin_caz*gxy_n + cos_caz*gxy_e; Gxz[i] =-sin_caz*gxz_n + cos_caz*gxz_e; Gyz[i] =-sin_caz*gyz_n + cos_caz*gyz_e; } // Loop on points } // End check on 1 or 2 component } // End check on component return 0; }
{ "alphanum_fraction": 0.4882508093, "avg_line_length": 42.4478371501, "ext": "c", "hexsha": "e7943f724e6e5dcc1a84d6e9b6a5fd37ef8dd60f", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "2b4097df02ef5e56407d40e821d5c7155c2e4416", "max_forks_repo_licenses": [ "Intel" ], "max_forks_repo_name": "bakerb845/parmt", "max_forks_repo_path": "utils/ff2mtGreens.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "2b4097df02ef5e56407d40e821d5c7155c2e4416", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Intel" ], "max_issues_repo_name": "bakerb845/parmt", "max_issues_repo_path": "utils/ff2mtGreens.c", "max_line_length": 80, "max_stars_count": null, "max_stars_repo_head_hexsha": "2b4097df02ef5e56407d40e821d5c7155c2e4416", "max_stars_repo_licenses": [ "Intel" ], "max_stars_repo_name": "bakerb845/parmt", "max_stars_repo_path": "utils/ff2mtGreens.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4838, "size": 16682 }
#pragma once #include <gsl/gsl_matrix.h> #include <gsl/gsl_vector.h> #include <tiny_dnn/tiny_dnn.h> #include "defines.h" #include "matrix_vector_ops.h" #include "genann.h" #define NUM_STATES 12 #define NUM_INPUTS 4 // Dynamics model from G. Gremillion, S. Humbert paper "System Identification of // a Quadrotor Micro Air Vehicle" (equation 3 in paper) // Augmented to include position dynamics in hover // Dynamics and controls matrix: // cos(ps)*u -sin(ps)*v 0 0 0 0 0 0 0 0 0 0 0 0 0 0 // sin(ps)*u cos(ps)*v 0 0 0 0 0 0 0 0 0 0 0 0 0 0 // 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 // 0 0 0 X_u*u 0 0 0 0 0 0 X_th*th 0 0 0 0 0 // 0 0 0 0 Y_v*v 0 0 0 0 Y_ph*ph 0 0 0 0 0 0 // 0 0 0 0 0 Z_w*w 0 0 0 0 0 0 0 0 0 Z_thr*d_thr // 0 0 0 0 0 0 L_p*p 0 0 L_ph*ph 0 0 L_la*d_la 0 0 0 // 0 0 0 0 0 0 0 M_q*q 0 0 M_th*th 0 0 M_lo*d_lo 0 0 // 0 0 0 0 0 0 0 0 N_r*r 0 0 0 0 0 N_ya*d_ya 0 // 0 0 0 0 0 0 ph_p*p 0 0 0 0 0 ph_la*d_la 0 0 0 // 0 0 0 0 0 0 0 th_q*q 0 0 0 0 0 th_lo*d_lo 0 0 // 0 0 0 0 0 0 0 0 ps_r*r 0 0 0 0 0 ps_ya*d_ya 0 // // Parameters: // X_u = -0.27996 // Y_v = -0.22566 // Z_w = -1.2991 // L_p = -2.5110 // M_q = -2.4467 // N_r = -0.4948 // X_th = -10.067 // Y_ph = 9.8648 // L_ph = -21.358 // M_th = -18.664 // ph_p = 0.9655 // th_q = 0.9634 // ps_r = 0.6748 // Z_thr = -39.282 // L_la = 11.468 // M_lo = 9.5711 // N_ya = 3.5647 // ph_la = 0.0744 // th_lo = 0.0594 // ps_ya = 0.0397 // // State vector: // x y z u v w p q r phi theta psi // // Control vector: // del_lat del_lon del_yaw del_thrust int dynamics(gsl_vector *dy, double t, const gsl_vector *y, const gsl_vector *u); int setupDynamics(); int teardownDynamics(); gsl_vector *feedback(gsl_vector *yd, gsl_vector *y); int setupFeedback(); int teardownFeedback(); gsl_vector *nnFeedback(gsl_vector *yd, gsl_vector *y); int setupNnFeedback(int dataShape[2]); int setupNnFeedback(FILE *in); int teardownNnFeedback(); genann *getFeedbackNn();
{ "alphanum_fraction": 0.4737451737, "avg_line_length": 35.9722222222, "ext": "h", "hexsha": "a1335b98eaa3606832c78856715d6defdf1119ae", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "d04a8fa496ec414e2cffdc70ee0beda85e0d7cb4", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "umd-agrc/SimpleControlSim", "max_forks_repo_path": "dynamics.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "d04a8fa496ec414e2cffdc70ee0beda85e0d7cb4", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "umd-agrc/SimpleControlSim", "max_issues_repo_path": "dynamics.h", "max_line_length": 107, "max_stars_count": null, "max_stars_repo_head_hexsha": "d04a8fa496ec414e2cffdc70ee0beda85e0d7cb4", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "umd-agrc/SimpleControlSim", "max_stars_repo_path": "dynamics.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1147, "size": 2590 }
/** * Subroutines used mostly for working on catalogs * in the tasks SEX2GOL and GOL2AF. * * Howard Bushouse, STScI, 04-Mar-2011, version 1.4 * Changed FATAL error to WARN3 when get_valid_entries returns no valid * magnitudes for an object. * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_interp.h> #include "aXe_utils.h" #include "aXe_grism.h" #include "spc_cfg.h" #include "disp_conf.h" #include "aper_conf.h" #include "trace_conf.h" #include "spc_CD.h" #include "spc_wl_calib.h" #include "aper_conf.h" #include "specmodel_utils.h" #include "model_utils.h" #include "spc_fluxcube.h" #include "spc_model.h" #include "crossdisp_utils.h" #include "spc_utils.h" #include "spc_sex.h" // define SQUARE #define SQR(x) ((x)*(x)) /** * * Function: create_SexObject * Allocates and creates a SExtractor object from * a line of a SExtractor catalog. * * Parameters: * @param actinfo - the header structure with the column names * @param line - the line to be transformed into a SexObject * @param waves - vector with the walelengths of the magnitude columns * @param cnums - vector with the column numbers of the magnitude columns * @param magcol - column number of the magnitude column selected * for mag_auto * @param fullinfo - deecides whether a column is optional or not * * Returns: * @return o - the new created SexObject */ SexObject * create_SexObject(const colinfo *actinfo, char *line, const gsl_vector * waves, const gsl_vector * cnums, const px_point backwin_cols, const px_point modinfo_cols, const int magcol, const int fullinfo) { gsl_vector *v; gsl_vector *mags; gsl_vector *wavs; SexObject *o; int i=0; // transform the data in the line to a vector lv1ws (line); v = string_to_gsl_array (line); // allocate space for the SexObject o = (SexObject *) malloc (sizeof (SexObject)); if (o == NULL) aXe_message (aXe_M_FATAL, __FILE__, __LINE__, "create_SexObject: Could not allocate memory."); // succesively fill the data into the SexObject. // for 'fullinfo=1' missing data results in an error. // otherwise NaN is stored into the SexObject o->number = (int) get_col_value (actinfo, "NUMBER", v, 1); o->xy_image.x = get_col_value (actinfo, "X_IMAGE", v,fullinfo); o->xy_image.y = get_col_value (actinfo, "Y_IMAGE", v,fullinfo); o->xy_world.ra = get_col_value (actinfo, "X_WORLD", v,1); o->xy_world.dec = get_col_value (actinfo, "Y_WORLD", v,1); o->el_image.a = get_col_value (actinfo, "A_IMAGE", v,fullinfo); o->el_image.b = get_col_value (actinfo, "B_IMAGE", v,fullinfo); o->el_image.theta = get_col_value (actinfo, "THETA_IMAGE", v,fullinfo); o->el_world.a = get_col_value (actinfo, "A_WORLD", v,1); o->el_world.b = get_col_value (actinfo, "B_WORLD", v,1); // Some clarification is needed for the next two lines. // to have get_col_value and get_col_value2 is not // satisfying!! PROBLEM o->el_world.theta = get_col_value2 (actinfo, "THETA_WORLD", v,0); if (o->el_world.theta <-900000){ // o->el_world.theta = get_col_value (actinfo, "THETA_WORLD", v,0); // if (isnan(o->el_world.theta)){ o->el_world.theta = get_col_value (actinfo, "THETA_SKY", v,1); } if (backwin_cols.x !=-1) { o->backwindow.x = get_col_value (actinfo, "BACKWINDOW", v,1); o->backwindow.y = -1; // old code for FORS2-MXU // o->backwindow.y = get_col_value (actinfo, "BACKWIN_LOW", v,1); } else { o->backwindow.x = -1; o->backwindow.y = -1; } // transfer the index for the model spectrum if (modinfo_cols.x > -1) o->modspec = (int)get_col_value(actinfo, "MODSPEC", v,1); else o->modspec = -1; // transfer the index value for the object shape if (modinfo_cols.y > -1) o->modimage = (int)get_col_value(actinfo, "MODIMAGE", v,1); else o->modimage = -1; // check whether the MAG_AUTO column exists if ((int)waves->size == 1 && (int)gsl_vector_get (waves, 0) == 0) { // fill the MAG_AUTO value // put the magnitude and wavelength vectors to NULL o->magnitudes = NULL; o->lambdas = NULL; o->mag_auto = get_col_value (actinfo, "MAG_AUTO", v, 1); } else { // create the vectors for the magnitude values mags = gsl_vector_alloc ((int)waves->size); wavs = gsl_vector_alloc ((int)waves->size); // fill the magnitude values into the vectors and store them in // the SexObject. for (i=0; i < (int)waves->size; i++){ gsl_vector_set(mags, i, gsl_vector_get (v, (int)gsl_vector_get (cnums, i)-1)); gsl_vector_set(wavs, i, gsl_vector_get (waves, i)); } // fill the MAG_AUTO value o->magnitudes = mags; o->lambdas = wavs; o->mag_auto = gsl_vector_get (v, magcol-1); } return o; } /** * Function: SexObject_fprintf * Function to print the attributes of a SexObbject * to an output stream. * * Paramters: * @param output - an ouput file or stream * @param o - a pointer to a SexObject */ void SexObject_fprintf (FILE * output, SexObject * o) { fprintf (output, "NUMBER: %d\n", o->number); fprintf (output, "X_IMAGE: %e\n", o->xy_image.x); fprintf (output, "Y_IMAGE: %e\n", o->xy_image.y); fprintf (output, "X_WORLD: %e\n", o->xy_world.ra); fprintf (output, "Y_WORLD: %e\n", o->xy_world.dec); fprintf (output, "A_IMAGE: %e\n", o->el_image.a); fprintf (output, "B_IMAGE: %e\n", o->el_image.b); fprintf (output, "THETA_IMAGE: %e\n", o->el_image.theta); fprintf (output, "A_WORLD: %e\n", o->el_world.a); fprintf (output, "B_WORLD: %e\n", o->el_world.b); fprintf (output, "THETA_WORLD: %e\n", o->el_world.theta); fprintf (output, "MAG_AUTO: %e\n", o->mag_auto); } /** * Function: sobs_to_vout * The function transforms a SexObject into * a vector. * * Parameters: * @param sobs - the SexObject to be transformed * * Returns: * @return vout - the vector with the Seobject data */ gsl_vector * sobs_to_vout(const SexObject *sobs) { int nentries, i; int count=0; // determine the size of the vector if (sobs->magnitudes){ nentries = 11+sobs->magnitudes->size; } else{ nentries = 12; } if(sobs->backwindow.x != -1) nentries = nentries+1; // old FORS2-MXU code: //nentries = nentries+2; if(sobs->modspec != -1) nentries++; if(sobs->modimage != -1) nentries++; // allocate space for the vector gsl_vector *vout = gsl_vector_alloc (nentries); // fill the vector gsl_vector_set (vout, count++, sobs->number); gsl_vector_set (vout, count++, sobs->xy_world.ra); gsl_vector_set (vout, count++, sobs->xy_world.dec); gsl_vector_set (vout, count++, sobs->el_world.a); gsl_vector_set (vout, count++, sobs->el_world.b); gsl_vector_set (vout, count++, sobs->el_world.theta); gsl_vector_set (vout, count++, sobs->xy_image.x); gsl_vector_set (vout, count++, sobs->xy_image.y); gsl_vector_set (vout, count++, sobs->el_image.a); gsl_vector_set (vout, count++, sobs->el_image.b); gsl_vector_set (vout, count++, sobs->el_image.theta); if(sobs->backwindow.x != -1) { gsl_vector_set (vout, count++, sobs->backwindow.x); // old code for FORS2-MXU //gsl_vector_set (vout, count++, sobs->backwindow.y); } if(sobs->modspec != -1) gsl_vector_set (vout, count++, sobs->modspec); if(sobs->modimage != -1) gsl_vector_set (vout, count++, sobs->modimage); // see whether there is MAG_AUTO if (sobs->magnitudes){ // fill in the magnitude values for (i=0; i < (int)sobs->magnitudes->size; i++) gsl_vector_set (vout, count++, gsl_vector_get(sobs->magnitudes, i)); } else{ // fill in the MAG_AUTO value gsl_vector_set (vout, count++, sobs->mag_auto); } // return the vector return vout; } /** * Function: catalog_to_wcs * This function reads a sextractor catalog and using an input and an output * CD matrix, converts the world ra and dec coordinates to the output CD * matrix's image pixel coordinates. Non existing (i.e. NaN) world RA and * DEC values are generated using * the pixel coordinates and the input CD matrix * * Parameters: * @param - infile a pointer to a char array containing the name of the * input catalog * @param - outfile a pointer to a char array containing the name of the * output catalog * @param - from_wcs a pointer to an exisiting WoldCoor structure containing * the input catalog CD matrix * @param - to_wcs a pointer to an exisiting WoldCoor structure containing * the output catalog CD matrix * @param - overwrite_wcs if set to 1 then input wold coordinate are * recomputed first using the from_wcs and the x,y image values * @param - overwrite_img if set to 1 then the x,y coordinates of the input * image are first computed using the from_wcs and the * world ra and dec info */ void catalog_to_wcs (char grismfile[], int hdunum, char infile[], char outfile[], struct WorldCoor *from_wcs, struct WorldCoor *to_wcs, int distortion, int overwrite_wcs, int overwrite_img) { FILE *fin, *fout; gsl_vector *v; gsl_vector *v_out; gsl_vector *waves; gsl_vector *cnums; gsl_matrix *coeffs=NULL; char Buffer[CATBUFFERSIZE]; char line[CATBUFFERSIZE], str[CATBUFFERSIZE]; int i, hasmags=0, magcencol = 0; SexObject *o; colinfo * actcatinfo; px_point pixmax; px_point backwin_cols; px_point modinfo_cols; actcatinfo = get_sex_col_descr (infile); hasmags = has_magnitudes(actcatinfo); if (!hasmags) { aXe_message (aXe_M_FATAL, __FILE__, __LINE__, "No magnitudes in file %s", infile); } else{ waves = gsl_vector_alloc (hasmags); cnums = gsl_vector_alloc (hasmags); hasmags = get_magcols(actcatinfo, waves, cnums); magcencol = get_magauto_col(waves, cnums, 800.0); } backwin_cols = has_backwindow(actcatinfo); modinfo_cols = has_modelinfo(actcatinfo); if (!(fin = fopen (infile, "r"))) { aXe_message (aXe_M_FATAL, __FILE__, __LINE__, "Cannot open catalog file %s", infile); } if (!(fout = fopen (outfile, "w"))) { aXe_message (aXe_M_FATAL, __FILE__, __LINE__, "Cannot open catalog file %s", outfile); } if (check_worldcoo_input(actcatinfo, 0)) aXe_message (aXe_M_FATAL, __FILE__, __LINE__, "Catalogue %s does not have all WCO columns\n", infile); if (check_imagecoo_input(actcatinfo)) aXe_message (aXe_M_FATAL, __FILE__, __LINE__, "Catalogue %s does not have all image coo columns\n", infile); make_GOL_header(fout, actcatinfo, waves, cnums, backwin_cols, modinfo_cols); if (distortion) coeffs = get_crossdisp_matrix(grismfile, hdunum); pixmax = get_npixels (grismfile, hdunum); while (fgets (line, CATBUFFERSIZE, fin)) { if (!(line_is_valid (actcatinfo, line))) continue; if (line[0] == ';') continue; /* Create a vector containing the information */ lv1ws (line); v = string_to_gsl_array (line); // o = create_SexObject (sex_col_desc, line); o = create_SexObject (actcatinfo, line, waves, cnums, backwin_cols, modinfo_cols, magcencol, 1); if ( (from_wcs!=NULL)&&(to_wcs!=NULL) ) { // if (distortion){ // fprintf(stdout, "(%f,%f) &&-->&& ", o->xy_image.x, o->xy_image.y); // o->xy_image = undistort_point(coeffs, pixmax, o->xy_image); // } fill_missing_WCS_coordinates (o, from_wcs, overwrite_wcs); fill_missing_image_coordinates (o, from_wcs, overwrite_img); compute_new_image_coordinates (o, to_wcs); if (distortion){ o->xy_image = distort_point(coeffs, pixmax, o->xy_image); // fprintf(stdout, "(%f,%f)\n\n", o->xy_image.x, o->xy_image.y); } } v_out = sobs_to_vout(o); sprintf (Buffer, "%8.5g ", gsl_vector_get (v_out, 0)); for (i = 1; i < (int)v_out->size; i++) { sprintf (str, " %8.5g ", gsl_vector_get (v_out, i)); strcat (Buffer, str); } strcat (Buffer, "\n"); fputs (Buffer, fout); gsl_vector_free (v_out); gsl_vector_free (v); } if (coeffs) gsl_matrix_free(coeffs); fclose (fin); fclose (fout); } /** * Function:catalog_to_wcs_nodim * This function reads a sextractor catalog and using an input and an output * CD matrix, converts the world ra and dec coordinates to the output * CD matrix's image pixel coordinates. Non existing (i.e. NaN) world RA * and DEC values are generated using the pixel coordinates and the input * CD matrix * * Parameters: * @param infile - a pointer to a char array containing the name of the * input catalog * @param outfile - a pointer to a char array containing the name of the * output catalog * @param from_wcs - a pointer to an exisiting WoldCoor structure containing * the input catalog CD matrix * @param to_wcs - a pointer to an exisiting WoldCoor structure * containing the output catalog CD matrix * @param overwrite_wcs - if set to 1 then input wold coordinate are * recomputed first using the from_wcs and the * x,y image values * @param overwrite_img - if set to 1 then the x,y coordinates of the * input image are first computed using the * from_wcs and the world ra and dec info */ void catalog_to_wcs_nodim (char infile[], char outfile[], struct WorldCoor *grism_wcs, int overwrite_wcs, int overwrite_img) { FILE *fin, *fout; gsl_vector *v; gsl_vector *v_out; gsl_vector *waves; gsl_vector *cnums; char Buffer[CATBUFFERSIZE]; char line[CATBUFFERSIZE], str[CATBUFFERSIZE]; int i; SexObject *o; int compute_imcoos=0; int th_sky=0; int checksum, hasmags=0, magcencol=0; colinfo * actcatinfo; px_point backwin_cols; px_point modinfo_cols; actcatinfo = get_sex_col_descr (infile); hasmags = has_magnitudes(actcatinfo); if (!hasmags) { aXe_message (aXe_M_FATAL, __FILE__, __LINE__, "No magnitudes in file %s", infile); } else{ waves = gsl_vector_alloc (hasmags); cnums = gsl_vector_alloc (hasmags); hasmags = get_magcols(actcatinfo, waves, cnums); magcencol = get_magauto_col(waves, cnums, 800.0); } backwin_cols = has_backwindow(actcatinfo); modinfo_cols = has_modelinfo(actcatinfo); if (!(fin = fopen (infile, "r"))) { aXe_message (aXe_M_FATAL, __FILE__, __LINE__, "Cannot open catalog file %s", infile); } if (!(fout = fopen (outfile, "w"))) { aXe_message (aXe_M_FATAL, __FILE__, __LINE__, "Cannot open catalog file %s", outfile); } // checksum = check_worldcoo_input(sex_col_desc); th_sky=1; checksum = check_worldcoo_input(actcatinfo, 1); if (checksum) aXe_message (aXe_M_FATAL, __FILE__, __LINE__, "Catalogue %s does not have all necessary columns\n", infile); // if (check_imagecoo_input(sex_col_desc)) if (check_imagecoo_input(actcatinfo)) compute_imcoos=1; make_GOL_header(fout, actcatinfo, waves, cnums, backwin_cols, modinfo_cols); while (fgets (line, CATBUFFERSIZE, fin)) { /* If line is not a valid catalog entry, just continue */ if (!(line_is_valid (actcatinfo, line))) continue; /* if line starts with ";", just continue */ if (line[0] == ';') continue; /* Create a vector containing the information */ lv1ws (line); v = string_to_gsl_array (line); o = create_SexObject (actcatinfo, line, waves, cnums, backwin_cols, modinfo_cols, magcencol, 0); if (compute_imcoos) compute_new_image_sexobject (o, grism_wcs, th_sky); v_out = sobs_to_vout(o); sprintf (Buffer, "%8.5g ", gsl_vector_get (v_out, 0)); for (i = 1; i < (int)v_out->size; i++) { sprintf (str, " %8.5g ", gsl_vector_get (v_out, i)); strcat (Buffer, str); } strcat (Buffer, "\n"); fputs (Buffer, fout); gsl_vector_free (v_out); gsl_vector_free (v); } fclose (fin); fclose (fout); free (actcatinfo); } /** * Function: SexMags_to_beamFlux * Fill an beam structure with the flux and shape information information * contained in a SexObject structure. * * Parameters: * @param sobj - pointer to a SexObject * @param actbeam - the beam structure to be filled * * Returns: * @return - */ void SexMags_to_beamFlux(SexObject * sobj, beam *actbeam) { int nvalid=0; int jj=0; int j=0; double fval=0.0; gsl_vector *flux; // set 'default' shapes actbeam->awidth = -1.0; actbeam->bwidth = -1.0; actbeam->aorient = -1.0; // set a 'default' flux actbeam->flux = NULL; /* if flux/wavelength information exists */ if (sobj->magnitudes) { // get the number of valid entries in 'sobj->magnitudes' nvalid = get_valid_entries(sobj->magnitudes); if (!nvalid) aXe_message (aXe_M_WARN3, __FILE__, __LINE__, "No valid magnitude for object %i !\n", sobj->number); // allocate the flux vector flux = gsl_vector_alloc (2*(nvalid)); // initialize a counter jj=0; // go over all magnidute values for (j=0; j < (int)sobj->magnitudes->size; j++) { // check whether the current entry is valid if (is_valid_entry(gsl_vector_get(sobj->magnitudes, j))) { // get the current entry, already converted to flux fval = get_flambda_from_magab(gsl_vector_get(sobj->magnitudes, j), gsl_vector_get(sobj->lambdas, j)); // set the wavelength value and the flux value gsl_vector_set(flux, 2*jj, gsl_vector_get(sobj->lambdas, j)); gsl_vector_set(flux, 2*jj+1, fval); // enhance the counter jj++; } } // transfer the flux values // to the beam actbeam->flux = flux; // transfer the shape values // to the beam actbeam->awidth = sobj->el_image.a; actbeam->bwidth = sobj->el_image.b; actbeam->aorient = sobj->el_image.theta; } } /** * Function: SexObject_to_slitgeom * Fill a beam structure with the optimized geometry information. * The quantities are computed from the object shape and * the trace angle. * * Parameters: * @param sobj - pointer to a SexObject * @param trace_angle - pointer to a SexObject * @param actbeam - the beam structure to be filled * * Returns: * @return - */ void SexObject_to_slitgeom(const aperture_conf *conf, const SexObject * sobj, const double trace_angle, beam *actbeam) { double theta=0.0; double orient=0.0; double tmp_angle, cos_tmp_angle; double A11, A12, A22; d_point obj_size; // convert the SExtractor angle to rad in the right quadrant orient = (180.0 + sobj->el_image.theta) / 180.0 * M_PI; while (orient > M_PI) orient = orient - M_PI; // compute angle between object // orientation and trace theta = orient - trace_angle; // check the object size, possibly setting // it to the size of point-like object obj_size = check_object_size(conf, sobj, actbeam->ID); /* // determine the three matrix elements A11 = SQR(cos(theta) / sobj->el_image.a) + SQR(sin(theta) / sobj->el_image.b); A12 = cos(theta) * sin(theta) * (1.0/(SQR(sobj->el_image.a)) - 1.0/SQR(sobj->el_image.b)); A22 = SQR(sin(theta) / sobj->el_image.a) + SQR(cos(theta) / sobj->el_image.b); // compute a temporary angle // and its cosine tmp_angle = atan(A12 / A11); cos_tmp_angle = cos(tmp_angle); // compute the slit length if (cos_tmp_angle > 0.01) actbeam->slitgeom[0] = sqrt(A11)*sobj->el_image.a*sobj->el_image.b / cos_tmp_angle; else actbeam->slitgeom[0] = sqrt(A11)*sobj->el_image.a*sobj->el_image.b / 0.01; // compute the slit orientation actbeam->slitgeom[1] = tmp_angle + trace_angle + M_PI_2; // compute the slit length actbeam->slitgeom[2] = 1.0 / sqrt(A11); // compute a modified B_IMAGE value, defined to keep the object area constant actbeam->slitgeom[3] = sobj->el_image.a*sobj->el_image.b / actbeam->slitgeom[0]; */ // determine the three matrix elements A11 = SQR(cos(theta) / obj_size.x) + SQR(sin(theta) / obj_size.y); A12 = cos(theta) * sin(theta) * (1.0/(SQR(obj_size.x)) - 1.0/SQR(obj_size.y)); A22 = SQR(sin(theta) / obj_size.x) + SQR(cos(theta) / obj_size.y); // compute a temporary angle // and its cosine tmp_angle = atan(A12 / A11); cos_tmp_angle = cos(tmp_angle); // compute the slit length if (cos_tmp_angle > 0.01) actbeam->slitgeom[0] = sqrt(A11)*obj_size.x*obj_size.y / cos_tmp_angle; else actbeam->slitgeom[0] = sqrt(A11)*obj_size.x*obj_size.y / 0.01; // compute the slit orientation actbeam->slitgeom[1] = tmp_angle + trace_angle + M_PI_2; // compute the slit width actbeam->slitgeom[2] = 1.0 / sqrt(A11); // compute a modified B_IMAGE value, defined to keep the object area constant actbeam->slitgeom[3] = obj_size.x*obj_size.y / actbeam->slitgeom[0]; } /** * Function: fill_corner_ignore * The method computes and fills the beam corner information * and the the ignore flag into a beam structure. * * Parameters: * @param sobj - pointer to a SexObject * @param obs - a pointer to the data array containing the image * @param conf - pointer to the configuration structure * @param beamID - the beam ID * @param dmag - number of magnitudes to add to the magnitudes cutoffs * @param actbeam - the beam structure to be filled * * Returns: * @return - */ void fill_corner_ignore(SexObject * sobj, observation * const obs, aperture_conf *conf, int beamID, float dmag, beam *actbeam) { double mmag_extract; double mmag_mark; // get the extraction and the mark margnitudes mmag_extract = conf->beam[beamID].mmag_extract + dmag; mmag_mark = conf->beam[beamID].mmag_mark + dmag; // make a default for the // ignore flag actbeam->ignore = 0; /* magnitude cut */ // PROBLEM: there is a logical flaw inside // whhat happend when (mag > mag_mark) && (mag < mmag_extract) ???? // of course this has only relevance when mag_mark < mmag_extract // if ((sobj->mag_auto <= mmag_mark)&&(sobj->mag_auto <= mmag_extract)) // That's now an easy fix, but makes sense.. if (sobj->mag_auto <= mmag_extract) actbeam->ignore = 0; // this object will be extracted if ((sobj->mag_auto <= mmag_mark)&&(sobj->mag_auto > mmag_extract)) actbeam->ignore = 2; // this object will be not be extracted if ((sobj->mag_auto > mmag_mark)&&(sobj->mag_auto > mmag_extract)) actbeam->ignore = 1; // this object will be ignored // fill the boundary box values; // returns if the beams is completely out of the image if (!fill_object_bbox (obs, actbeam, 2, conf->beam[beamID].offset.dx0, conf->beam[beamID].offset.dx1)) actbeam->ignore = 1; } /** * Function: set_extraction_parameters * The function computes and fills the extraction width and the * extraction orientation into a beam structure. Depending on the * input parameters and the object shape, different methods * are deployed. * * Parameters: * @param sobj - pointer to a SexObject * @param bck_mode - pointer for background mode * @param mfwhm - the fwhm multiplicator constant to apply to * determine the width of the aperture box for the object. * @param dmag - number of magnitudes to add to the magnitudes cutoffs * @param auto_reorient - if set then this task tries to optimize * the orientation of the * extraction slit - Should in general be left at 1 so that strange * geomety is avoided. If set to 2 the extraction is * forced to be vertical (90 deg.) * @param trace_angle - the trace angle * @param actbeam - the beam structure to be filled * * Returns: * @return - */ void set_extraction_parameters(SexObject * sobj, int bck_mode, float mfwhm, int auto_reorient, double trace_angle, beam *actbeam) { double orient; double theta; double theta_deg; double trace_dist; double dya; double dyb; //int iturn=0; // convert the orientation to rad; // turn into the right quadrant orient = (180.0 + sobj->el_image.theta) / 180.0 * M_PI; while (orient > M_PI) orient = orient - M_PI; // compute the relative between the trace angle // and object orientation theta = orient - trace_angle; // convert theta to deg; // convert it into the range: // 0.0 < theta_deg < 180.0 theta_deg = theta / M_PI * 180.; while (theta_deg < 0.0) theta_deg += 180.0; while (theta_deg > 180.0) theta_deg -= 180.0; // that's the 'normal', slanted extaction if (auto_reorient == 0) { // take the major half axis value * mfwhm // as width and its orientation // as extraction direction actbeam->width = sobj->el_image.a * mfwhm; actbeam->orient = orient; // give a warning for small angles // between the orientation direction and // the trace angle. if (fabs(theta_deg )< MIN_DIFFANGLE || fabs(theta_deg-180.0) < MIN_DIFFANGLE) aXe_message (aXe_M_WARN4, __FILE__, __LINE__, "aXe_GOL2AF: Object ID: %i : The angle between the extraction orientation and " "the trace is less than %5.2f degrees. You may get severe problems" " down to core dumps in the 1D extraction later on!\n", sobj->number, MIN_DIFFANGLE); } // thats the slanted, adjusted extraction else if (auto_reorient == 1) { // use the optimized parameters actbeam->width = actbeam->slitgeom[0] * mfwhm; actbeam->orient = actbeam->slitgeom[1]; } // thats the perpendicular extraction else if(auto_reorient == 2) { // extraction angle is prependicular to trace actbeam->orient = trace_angle + M_PI_2; if (mfwhm < 0.0) { // take the mfwhm value as width; actbeam->width = -1.0 * mfwhm; } else { // compute the projections of the major and minor // half axis to the extraction direction dya = fabs (sobj->el_image.a * sin (theta)); dyb = fabs (sobj->el_image.b * sin (M_PI_2 - theta)); // use the larger for extraction width actbeam->width = mfwhm*MAX(dya,dyb); } } // this piece of code was specifically added // for NICMOS HLA, inorder to nelarge the background // area around bright, point-like objects. if (sobj->backwindow.x != -1 && bck_mode) { // compute the projected distance to the trace trace_dist = sin(actbeam->orient - trace_angle) * actbeam->width; // check whether the distance is too small if (trace_dist < sobj->backwindow.x) // elongate the width such that the projected distance // equals the minimum value given in the SExobject actbeam->width = sobj->backwindow.x / sin(actbeam->orient - trace_angle); } } d_point check_object_size(const aperture_conf *conf, const SexObject *sobj, const int beamID) { double obj_size = 0.0; d_point ret; // set the input values // as default ret.x = sobj->el_image.a; ret.y = sobj->el_image.b; // check whether a minimum // size is defined if (conf->pobjsize < 0.0) // return the value return ret; // compute the object size obj_size = sobj->el_image.a * sobj->el_image.b; // check whether the object is point-like if (obj_size < SQR(conf->pobjsize)) { // transfer the point-like // sizes to the return ret.x = conf->pobjsize; ret.y = conf->pobjsize; } // return the resulting // object size return ret; } /** * Function: SexObject_to_beam * Fill an beam structure with the information contained in a SexObject * structure plus further information derived via the confiugration file. * * Parameters: * @param sobj - pointer to a SexObject * @param obs - a pointer to the data array containing the image * @param conf - pointer to the configuration structure * @param conffile - the name of the aperture configuration file * @param mfwhm - the fwhm multiplicator constant to apply to * determine the width of the aperture box for the object. * @param dmag - number of magnitudes to add to the magnitudes cutoffs * @param auto_reorient - if set then this task tries to optimize * the orientation of the * extraction slit - Should in general be left at 1 so that strange * geomety is avoided. If set to 2 the extraction is * forced to be vertical (90 deg.) * @param bck_mode - pointer for background mode * @param beamID - the beam ID * @param actbeam - the beam structure to be filled * * Returns: * @return - */ void SexObject_to_beam(SexObject * sobj, observation * const obs, aperture_conf *conf, char conffile[], float mfwhm, float dmag, int auto_reorient, int bck_mode, int beamID, beam *actbeam) { double trace_angle; d_point pixel; tracestruct *trace; // set the beam ID actbeam->ID = conf->beam[beamID].ID; // set the model template ID's actbeam->modspec = sobj->modspec; actbeam->modimage = sobj->modimage; // Adjust for posibly non (0,0) ref point of the 2D field dependence // get the geometrical description of the trace at position "pixel" pixel.x = sobj->xy_image.x - 1.0 - conf->refx; pixel.y = sobj->xy_image.y - 1.0 - conf->refy; trace = get_tracestruct_at_pos (conffile, conf->beam[beamID].ID, pixel); // transfer trace information to the beam actbeam->spec_trace = vector_to_trace_polyN(trace->pol); // compute the local trace angle trace_angle = atan2(actbeam->spec_trace->deriv (0, actbeam->spec_trace->data),1.0); // set the reference point actbeam->refpoint.x = sobj->xy_image.x - 1.0 + trace->offset.x; actbeam->refpoint.y = sobj->xy_image.y - 1.0 + trace->offset.y; // fill the flux and shape information SexMags_to_beamFlux(sobj, actbeam); // fill in the slit geometry SexObject_to_slitgeom(conf, sobj, trace_angle, actbeam); // actually set the extraction parameters set_extraction_parameters(sobj,bck_mode,mfwhm, auto_reorient, trace_angle, actbeam); // set the beam corners and the ignore flag fill_corner_ignore(sobj, obs, conf, beamID, dmag, actbeam); } /** * Function: SexObject_to_objectII * Fill an object structure with the information contained in a SexObject * structure This function uses a configuration file to compute the full * list of beams for each object. * * Parameters: * @param sobj - pointer to a SexObject * @param obs - a pointer to the data array containing the image * @param conf - pointer to the configuration structure * @param conffile - the name of the aperture configuration file * @param mfwhm - the fwhm multiplicator constant to apply to * determine the width of the aperture box for the object. * @param dmag - number of magnitudes to add to the magnitudes cutoffs * @param auto_reorient - if set then this task tries to optimize * the orientation of the * extraction slit - Should in general be left at 1 so that strange * geomety is avoided. If set to 2 the extraction is * forced to be vertical (90 deg.) * @param bck_mode - pointer for background mode * * Returns: * @return a pointer to a newly allocated object structure */ object * SexObject_to_objectII(SexObject * sobj, observation * const obs, aperture_conf *conf, char conffile[], float mfwhm, float dmag, int auto_reorient, int bck_mode) { int i=0; object *ob; // allocate an object ob = (object *)malloc(sizeof(object)); // store the object specific // information ob->ID = sobj->number; ob->nbeams = conf->nbeams; ob->grism_obs = obs; // go over all beams for (i = 0; i < conf->nbeams; i++) // fill the current beam SexObject_to_beam(sobj, obs, conf, conffile, mfwhm, dmag, auto_reorient, bck_mode, i, &(ob->beams[i])); // return the object return ob; } /** * Function: SexObject_to_object * Fill an object structure with the information contained in a SexObject * structure This function uses a configuration file to compute the full * list of beams for each object. * * Parameters: * @param sobj - pointer to a SexObject * @param obs - a pointer to the data array containing the image * @param conffile - the name of the aperture configuration file * @param maxmag - upper magniture bound. If object has a magnitude * greater than this has its ignore flag set to 1, * zero otherwise. * @param mfwhm - the fwhm multiplicator constant to apply to * determine the width of the aperture box for the object. * @param dmag - number of magnitudes to add to the magnitudes cutoffs * @param auto_reorient - if set then this task tries to optimize * the orientation of the * extraction slit - Should in general be left at 1 so that strange * geomety is avoided. If set to 2 the extraction is * forced to be vertical (90 deg.) * * Returns: * @return a pointer to a newly allocated object structure * */ // object * // SexObject_to_object (SexObject * sobj, observation * const obs, // aperture_conf *conf, char conffile[], float mfwhm, // float dmag, // // char conffile[], float mfwhm, float dmag, // int auto_reorient, int bck_mode) // { // int i, dx0, dx1; // beam *b; // object *ob = malloc (sizeof (object)); // d_point pixel; // tracestruct *trace; // // aperture_conf *conf; // float mmag_extract, mmag_mark; // // conf = get_aperture_descriptor (conffile); // int j = 0, jj=0, nvalid=0; // double fval=0.0; // gsl_vector *flux; // float aposang, paposang; // float dya, dyb; // int iturn; // ob->ID = sobj->number; // for (i = 0; i < conf->nbeams; i++) // { // dx0 = conf->beam[i].offset.dx0; // dx1 = conf->beam[i].offset.dx1; // mmag_extract = conf->beam[i].mmag_extract+dmag; // mmag_mark = conf->beam[i].mmag_mark+dmag; // b = &(ob->beams[i]); // b->ID = conf->beam[i].ID; // //--------------------------------------- // // some code for FORS2 MXU // // b->backwindow.x = sobj->backwindow.x; // // b->backwindow.y = sobj->backwindow.y; // b->modspec = sobj->modspec; // b->modimage = sobj->modimage; // /* Adjust for posibly non (0,0) ref point of the 2D field dependence */ // pixel.x = sobj->xy_image.x-1 - conf->refx; // pixel.y = sobj->xy_image.y-1 - conf->refy; // /* get the geometrical description of the trace at position "pixel" */ // trace = // get_tracestruct_at_pos (conffile, conf->beam[i].ID, pixel); // b->spec_trace = vector_to_trace_polyN ( trace->pol ); // b->width = sobj->el_image.a * mfwhm; // /* if flux/wavelength information exists */ // if (sobj->magnitudes) // { // // get the number of valid entries in 'sobj->magnitudes' // nvalid = get_valid_entries(sobj->magnitudes); // if (!nvalid) // aXe_message (aXe_M_WARN3, __FILE__, __LINE__, // "No valid magnitude for object %i !\n", sobj->number); // // allocate the vector for the flux // flux = gsl_vector_alloc (2*(nvalid)); // // fill the flux vector with values // jj=0; // for (j=0; j < (int)sobj->magnitudes->size; j++) // { // if (is_valid_entry(gsl_vector_get(sobj->magnitudes, j))) // { // fval = get_flambda_from_magab(gsl_vector_get(sobj->magnitudes, j), gsl_vector_get(sobj->lambdas, j)); // gsl_vector_set(flux, 2*jj, gsl_vector_get(sobj->lambdas, j)); // // gsl_vector_set(flux, 2*jj+1, gsl_vector_get(sobj->magnitudes, j)); // gsl_vector_set(flux, 2*jj+1, fval); // jj++; // } // } // b->flux = flux; // /* fill the structual parameters of the object */ // b->awidth = sobj->el_image.a; // b->bwidth = sobj->el_image.b; // b->aorient = sobj->el_image.theta; // } // // if flux/wavelength information does not exists // else // { // // set everything to dummy values // b->flux = NULL; // b->awidth = -1.0; // b->bwidth = -1.0; // b->aorient = -1.0; // } // /* Convert from SeXtractor angle reference point to aXe's */ // b->orient = (180 + sobj->el_image.theta) / 180. * M_PI; // while (b->orient > M_PI) // b->orient = b->orient - M_PI; // // calculate the angle between // // the extraction direction and the trace // aposang = // b->orient - atan2(b->spec_trace->deriv (0, b->spec_trace->data),1.0); // // transform the angle to degrees // paposang = aposang / M_PI * 180.; // // give a warning for small angles // // between the orientation direction and // // the trace angle. // // BUGFIX comparison should be to paposang not the logical comparison MLS // if ( ( (fabs(paposang) < MIN_DIFFANGLE) || (fabs(paposang-180.0) < MIN_DIFFANGLE) ) && (auto_reorient == 0) ) // aXe_message (aXe_M_WARN4, __FILE__, __LINE__, // "aXe_GOL2AF: Object ID: %i: The angle between the extraction orientation and " // "the trace is less than %5.2f degrees. You may get severe problems" // " down to core dumps in the 1D extraction later on!\n", ob->ID, MIN_DIFFANGLE); // Case when the extraction angle is modified to that the extraction // /* proceeds along the semi-axis which projects farther away from the // * trace i.e. we avoid trying to extract spectra in a direction nearly // * parallel the trace */ // if (auto_reorient==1) // { // aposang = // b->orient - atan2(b->spec_trace->deriv (0, // b->spec_trace->data),1.0); // /* February 2004 introduced to make a hardstop in the range for // allowed angles. */ // if (aposang > 0.0){ // paposang = aposang / M_PI * 180.; // } // else{ // paposang = aposang / M_PI * 180. + 360.0; // } // if (paposang > 180.0) // paposang = paposang-180.0; // /* the hard stop is here */ // if (paposang > 30.0 && paposang < 150.0){ // iturn = 0;} // else{ // iturn = 1;} // /*end introduction */ // /* Compute how far each axes extends away from the trace */ // dya = fabs (sobj->el_image.a * sin (aposang)); // dyb = fabs (sobj->el_image.b * sin (M_PI / 2. - aposang)); // /* Select the broader of the two axes */ // if (dya > dyb && iturn == 0) // { // b->width = sobj->el_image.a * mfwhm; // } // else // { // b->width = sobj->el_image.b * mfwhm; // b->orient = b->orient + M_PI / 2.; // } // } // /* Case when we force the extraction to be vertical, // the extraction width is recomputed */ // if (auto_reorient==2) { // // new system for fixed extraction: // // - extraction direction perpendicular // // to the trace // // - the mfwhm is used as a fixed extraction widt in pixels // b->orient = // atan2(b->spec_trace->deriv (0,b->spec_trace->data),1.0)+ M_PI / 2.0; // if (mfwhm < 0.0) // { // b->orient = // atan2(b->spec_trace->deriv (0.0,b->spec_trace->data),1.0)+ M_PI / 2.0; // b->width = -mfwhm; // } // else // { // aposang = b->orient // - atan2(b->spec_trace->deriv (0,b->spec_trace->data),1.0); // /* Compute how far each axes extends away from the trace */ // dya = fabs (sobj->el_image.a * sin (aposang)); // dyb = fabs (sobj->el_image.b * sin (M_PI / 2. - aposang)); // b->width = mfwhm*MAX(dya,dyb); // b->orient = // atan2(b->spec_trace->deriv (0,b->spec_trace->data),1.0)+ M_PI / 2.0; // } // } // if (sobj->backwindow.x != -1 && bck_mode) // b->width = MAX(b->width, sobj->backwindow.x); // /* On coordinate systems: // the sexrtractor image positions are given in the iraf system, // which means the value of the lower left pixel is associated // with the coordinate (1.0,1.0). // aXe works in a kind of 'matrix system', where the value of // the lower left pixel is stored in the matrix indices (0,0) // or (0.0,0.0) seen as a coordinate system. // To transform into this system 1.0 is subracted from // both sextractor coordinates. // */ // b->refpoint.x = sobj->xy_image.x-1.0 + trace->offset.x; // b->refpoint.y = sobj->xy_image.y-1.0 + trace->offset.y; // /* magnitude cut */ // // PROBLEM: there is a logical flaw inside // // whhat happend when (mag > mag_mark) && (mag < mmag_extract) ???? // // of course this has only relevance when mag_mark < mmag_extract // // if ((sobj->mag_auto <= mmag_mark)&&(sobj->mag_auto <= mmag_extract)) // // That's now an easy fix, but makes sense.. // if (sobj->mag_auto <= mmag_extract) // { // b->ignore = 0; /* This object will be extracted */ // } // if ((sobj->mag_auto <= mmag_mark)&&(sobj->mag_auto > mmag_extract)) // { // b->ignore = 2; /* This object will be not be extracted */ // } // if ((sobj->mag_auto > mmag_mark)&&(sobj->mag_auto > mmag_extract)) // { // b->ignore = 1; /* This object will be ignored */ // } // if (!fill_object_bbox (obs, b, 2, dx0, dx1)) // b->ignore = 1; // b->slitgeom[0] = -1.0; // b->slitgeom[1] = -1.0; // b->slitgeom[2] = -1.0; // b->slitgeom[3] = -1.0; // } // ob->nbeams = conf->nbeams; // ob->grism_obs = obs; // return ob; // } /** * Function: SexObjects_to_oblist * Produces an object list from the data contained in an array of SexObjects * * Parameters: * @param sobjs - an NULL terminated array containing pointers to SexObjects * @param obs - a pointer to the data array containing the image * @param conffile - the name of the aperture configuration file * @param mmag_extract - upper magniture bound. Any object with a magnitude greater than * this has its ignore flag set to 1. * @param mmag_mark - upper magniture bound. Any object with a magnitude greater than * this has its ignore flag set to 2. * @param mfwhm - the fwhm multiplicator constant to apply to determine the width of the * aperture box for the object. * @param dmag - number of magnitudes to add to the magnitudes cutoffs * @param auto_reorient - if set to 1 then this task tries to optimize the orientation of the * extraction slit. Should in general be left at 1 so that strange geomety is avoided. If set to 2 * the extraction is forced to be vertical (90 deg.) * * Return: * @return a pointer to a NULL terminated object array. */ object ** SexObjects_to_oblist (SexObject ** sobjs, observation * const obs, aperture_conf *conf, char conffile[], float mfwhm, float dmag, // char conffile[], float mfwhm, float dmag, int auto_reorient, int bck_mode) { int i, nobjs = 0; object **oblist; /* Find the number of SexObjects in sobjs */ while (sobjs[nobjs]) nobjs++; /* Allocate enough room for a new object list */ oblist = (object **) malloc ((nobjs + 1) * sizeof (object *)); for (i = 0; i < nobjs; i++) { //fprintf(stdout, "Using the old routine...\n"); oblist[i] = SexObject_to_objectII(sobjs[i], obs, conf, conffile, mfwhm, dmag, auto_reorient, bck_mode); } oblist[nobjs] = NULL; return oblist; } /** * Function: SexObjects_to_oblistII * Produces an object list from the data contained in an array of SexObjects * * Parameters: * @param sobjs - an NULL terminated array containing pointers to SexObjects * @param obs - a pointer to the data array containing the image * @param conffile - the name of the aperture configuration file * @param mmag_extract - upper magniture bound. Any object with a magnitude greater than * this has its ignore flag set to 1. * @param mmag_mark - upper magniture bound. Any object with a magnitude greater than * this has its ignore flag set to 2. * @param mfwhm - the fwhm multiplicator constant to apply to determine the width of the * aperture box for the object. * @param dmag - number of magnitudes to add to the magnitudes cutoffs * @param auto_reorient - if set to 1 then this task tries to optimize the orientation of the * extraction slit. Should in general be left at 1 so that strange geomety is avoided. If set to 2 * the extraction is forced to be vertical (90 deg.) * * Return: * @return a pointer to a NULL terminated object array. */ object ** SexObjects_to_oblistII (SexObject ** sobjs, observation * const obs, aperture_conf *conf, char conffile[], float mfwhm, float dmag, int auto_reorient, int bck_mode) { int i, nobjs = 0; object **oblist; fflush(stdout); /* Find the number of SexObjects in sobjs */ while (sobjs[nobjs]) nobjs++; /* Allocate enough room for a new object list */ oblist = (object **) malloc ((nobjs + 1) * sizeof (object *)); for (i = 0; i < nobjs; i++) { //fprintf(stdout, "Using the new routine...\n"); oblist[i] = SexObject_to_objectII(sobjs[i], obs, conf, conffile, mfwhm, dmag, auto_reorient, bck_mode); } oblist[nobjs] = NULL; return oblist; } /** * Function: check_conf_for_slitlessgeom * The functions checks whether a smoothed flux conversion * is possible or not. In case that keywords in the configuration * files are missing, it is NOT possible, and 0 is returned. * * Parameters: * @param conf - the configuarion file structure * @param auto_reorient - integer indicating the extraction method * * Returns * @return is_possible - the paramters used in the smoothing */ int check_conf_for_slitlessgeom(const aperture_conf *conf, const int auto_reorient) //check_conf_for_slitlessgeom(const aperture_conf *conf, const int slitless_geom) { int is_possible=1; if (auto_reorient == 1 && conf->pobjsize < 0.0) // change the switch is_possible = 0; // return the pointer return is_possible; } /** * Function: fill_object_bbox * Fill up the aperture part of the beam of an object structure by looking at the trace polynomial * description associated with this object. * * Parameters: * @param obs - a pointer to the data array containing the image (used for bound checking) * @param b - A pointer to a beam whose aperture must be filled. * @param m_width - The width in pixel of the extraction box * @param dxmin - How far (in pixel) to follow the trace on the left side of the spectrum * @param dxmax - How far (in pixel) to follow the trace on the right hand side of the spectrum * * Returns: * @return 1 if bounding box appears to be valied (i.e. non-empty) */ int fill_object_bbox (observation * const obs, beam * b, const float m_width, const int dxmin, const int dxmax) { d_point pmin, pmax; d_point pminl, pminh, pmaxl, pmaxh; float w = b->width/2. * m_width + 2.0; float wcos, wsin; float area; int xmin, xmax, xact; double yact; pmin.x = b->refpoint.x + dxmin; pmin.y = b->refpoint.y + b->spec_trace->func (dxmin, b->spec_trace->data); pmax.x = b->refpoint.x + dxmax; pmax.y = b->refpoint.y + b->spec_trace->func (dxmax, b->spec_trace->data); if (b->spec_trace->type > 1) { if (dxmin < dxmax) { xmin = dxmin; xmax = dxmax; } else { xmin = dxmax; xmax = dxmin; } for (xact = xmin; xact < xmax; xact++) { yact = b->refpoint.y + b->spec_trace->func (xact, b->spec_trace->data); if (yact < pmin.y) pmin.y = yact; if (yact > pmax.y) pmax.y = yact; } wcos = w * cos (b->orient); wsin = w * sin (b->orient); if (wcos > 0) { pmin.x = pmin.x - wcos; pmax.x = pmax.x + wcos; } else { pmin.x = pmin.x + wcos; pmax.x = pmax.x - wcos; } if (wsin > 0) { pmin.y = pmin.y - wsin; pmax.y = pmax.y + wsin; } else { pmin.y = pmin.y + wsin; pmax.y = pmax.y - wsin; } b->corners[0].x = pmin.x; b->corners[0].y = pmin.y; b->corners[1].x = pmax.x; b->corners[1].y = pmin.y; b->corners[2].x = pmax.x; b->corners[2].y = pmax.y; b->corners[3].x = pmin.x; b->corners[3].y = pmax.y; } else { wcos = w * cos (b->orient); wsin = w * sin (b->orient); pminl.x = pmin.x - wcos; pminl.y = pmin.y - wsin; pminh.x = pmin.x + wcos; pminh.y = pmin.y + wsin; pmaxl.x = pmax.x - wcos; pmaxl.y = pmax.y - wsin; pmaxh.x = pmax.x + wcos; pmaxh.y = pmax.y + wsin; b->corners[0].x = pminl.x; b->corners[0].y = pminl.y; b->corners[1].x = pmaxl.x; b->corners[1].y = pmaxl.y; b->corners[2].x = pmaxh.x; b->corners[2].y = pmaxh.y; b->corners[3].x = pminh.x; b->corners[3].y = pminh.y; } /* Compute the area of this aperture */ area = 0.5 * (abs (b->corners[0].y - b->corners[4].y) * abs (b->corners[0].x - b->corners[1].x) + abs (b->corners[2].y - b->corners[1].y) * abs (b->corners[3].x - b->corners[2].x)); if (area == 0) { fprintf (stderr, "aper debug: area is zero!\n"); return 0; } return 1; } /** * Function: size_of_sextractor_catalog * A utility function which parses a Sextractor catalog file, * and returns the number of valid catalog entries found in the file * * Parameters: * @param filename - a pointer pointing to a char array containing the * list of a sextractor object output catalog. * Ignores rows starting with a ";" */ int size_of_sextractor_catalog (char filename[]) { FILE *input; char Buffer[CATBUFFERSIZE]; gsl_vector *v; int catsize; int num = 0; colinfo * actcatinfo; actcatinfo = get_sex_col_descr (filename); // catalog_header = get_sex_col_descr (filename); catsize = actcatinfo->numcols; if (!(input = fopen (filename, "r"))) { aXe_message (aXe_M_FATAL, __FILE__, __LINE__, "Could not open Sextractor catalog" "file %s,\n", filename); } while (fgets (Buffer, CATBUFFERSIZE, input)) { if (Buffer[0] == ';') continue; lv1ws (Buffer); v = string_to_gsl_array (Buffer); if (v==NULL) continue; if ((int)v->size == catsize) num++; } return num; } /** * Function: get_SexObject_from_catalog * Parses a Sextractor 2.0 catalog file, and outputs a NULL terminated * array of SexObjects pointers. Ignores rows starting with a ; * * Parameters: * @param filename a pointer pointing to a char array containing the * list of a sextractor object output catalog * * Returns: * @return a pointer to an array of SeXObject pointer. NULL terminated */ SexObject ** get_SexObject_from_catalog (char filename[], const double lambda_mark) { FILE *input; char Buffer[CATBUFFERSIZE]; gsl_vector *v; gsl_vector *waves; gsl_vector *cnums; size_t hasmags=0; size_t nobjs; size_t catsize; int i; size_t magcencol=0; SexObject **sobjs, *sobj; colinfo * actcatinfo; px_point backwin_cols; px_point modinfo_cols; actcatinfo = get_sex_col_descr (filename); hasmags = has_magnitudes(actcatinfo); waves = gsl_vector_alloc (hasmags); cnums = gsl_vector_alloc (hasmags); hasmags = get_magcols(actcatinfo, waves, cnums); magcencol = get_magauto_col(waves, cnums, lambda_mark); backwin_cols = has_backwindow(actcatinfo); modinfo_cols = has_modelinfo(actcatinfo); // catsize = count_keys (catalog_header); catsize = actcatinfo->numcols; nobjs = size_of_sextractor_catalog (filename); if (!(input = fopen (filename, "r"))) { aXe_message (aXe_M_FATAL, __FILE__, __LINE__, "Could not open Sextractor catalog" "file %s,\n", filename); } /* Allocate enough room for nobjs+1 SexObject pointers */ sobjs = (SexObject **) malloc ((nobjs + 1) * sizeof (SexObject *)); if (!sobjs) { aXe_message (aXe_M_FATAL, __FILE__, __LINE__, "Out of memory. Couldn't allocate Sextractor Object"); } i = 0; while (fgets (Buffer, CATBUFFERSIZE, input)) { if (Buffer[0] == ';') continue; v = string_to_gsl_array (Buffer); if (v==NULL) continue; if (v->size == catsize) { // BIG BUG!!! // sobj = create_SexObject (catalog_header, Buffer); sobj = create_SexObject (actcatinfo, Buffer, waves, cnums, backwin_cols, modinfo_cols, magcencol, 0); sobjs[i++] = sobj; } } sobjs[i] = NULL; return sobjs; } /** * Function: el_to_ABC_world * Convert a wold coordinate ellipse into a set of 3 points on the * tangent plane point a is at pos of obj, semi major-axis end is point b, * semi minor axis is point c theta is measured from x-axis to semi-major * axis, counter clock wise. */ void el_to_ABC_world (ellipse *el, sky_coord *a, sky_coord *b, sky_coord *c) { a->ra = 0.0; a->dec = 0.0; b->ra = el->a * cos (el->theta / 180. * M_PI); b->dec = el->a * sin (el->theta / 180. * M_PI); c->ra = -1.0 * el->b * sin (el->theta / 180. * M_PI); c->dec = el->b * cos (el->theta / 180. * M_PI); } /* * Function: el_to_ABC_world2 * */ void el_to_ABC_world2 (ellipse *el, sky_coord *a, sky_coord *b, sky_coord *c) { b->ra = a->ra - (el->a * cos (el->theta / 180. * M_PI))/cos(a->dec / 180. * M_PI); b->dec = a->dec + (el->a * sin (el->theta / 180. * M_PI)); c->ra = a->ra - (el->b * cos ((el->theta-90.0)/180.*M_PI))/cos(a->dec / 180. * M_PI); c->dec = a->dec + (el->b * sin ((el->theta-90.0)/180.*M_PI)); } /** * Function: ABC_image_to_el * Convert a set of three image coordinates into an elipse structure * */ void ABC_image_to_el (d_point *a, d_point *b, d_point *c, ellipse *el) { if (b->x != 0.0) { el->theta = atan (b->y / b->x) / M_PI * 180.0; } else { el->theta = 90.0; } el->a = sqrt ((b->x - a->x) * (b->x - a->x) + (b->y - a->y) * (b->y - a->y)); el->b = sqrt ((c->x - a->x) * (c->x - a->x) + (c->y - a->y) * (c->y - a->y)); } /** * Function: ABC_image_to_el2 * Convert a set of three image coordinates into an elipse structure * */ void ABC_image_to_el2 (d_point *a, d_point *b, d_point *c, ellipse *el) { el->theta = atan ((b->y - a->y )/ (b->x - a->x)) / M_PI * 180.0; el->a = sqrt ((b->x - a->x) * (b->x - a->x) + (b->y - a->y) * (b->y - a->y)); el->b = sqrt ((c->x - a->x) * (c->x - a->x) + (c->y - a->y) * (c->y - a->y)); } /** * Function: fill_missing_WCS_coordinates * Update a SexObject structure and replace all missing WCS coordinates * by re-computing them using the world one and the from_wcs wcs. * Re-computes: * peak_world_x * peak_world.y * xy_world.x * xy_world.y * el_world.a * el_world.b * el_world.theta * * @param o - a pointer to an exsting SexObject. Should have been checked for validity already * @param from_wcs - a pointer to an existing WoldCoord wcs structure */ void fill_missing_WCS_coordinates (SexObject * o, struct WorldCoor *from_wcs, int overwrite) { sky_coord as, bs, cs; d_point a, b, c; //int offscl; /***************************************************************/ /* If the WCS coordinates are missing, generate them using */ /* from_wcs and sextractor image coordinates */ /***************************************************************/ if ((overwrite) || (isnan (o->xy_world.ra)) || (isnan (o->xy_world.dec))) { /* recomputes xy_image from the world values */ /*pix2wcs (from_wcs, o->xy_image.x, o->xy_image.y, &(o->xy_world.ra),&(o->xy_world.dec), &offscl); */ /* recomputes xy_image from the world values */ // new version requested since wcstools-3.6 pix2wcs (from_wcs, o->xy_image.x, o->xy_image.y, &(o->xy_world.ra),&(o->xy_world.dec)); } if ((isnan (o->el_world.a)) || (isnan (o->el_world.b)) || (isnan (o->el_world.theta)) || (overwrite)) { /* recomputes el_image them from the world values */ /* Note the following is an unconventional use of the et_to*() routines since we store image coords in as, bs, and cs */ el_to_ABC_world (&(o->el_image), &as, &bs, &cs); /* Convert ellipse into a set of 3 coordinates */ /* Compute the end point of the axes in the image coord system */ /* pix2wcs (from_wcs, as.ra, as.dec, &(a.x), &(a.y), &offscl); pix2wcs (from_wcs, bs.ra, bs.dec, &(b.x), &(b.y), &offscl); pix2wcs (from_wcs, cs.ra, cs.dec, &(c.x), &(c.y), &offscl); */ /* Compute the end point of the axes in the image coord system */ // new version requested since wcstools-3.6 pix2wcs (from_wcs, as.ra, as.dec, &(a.x), &(a.y)); pix2wcs (from_wcs, bs.ra, bs.dec, &(b.x), &(b.y)); pix2wcs (from_wcs, cs.ra, cs.dec, &(c.x), &(c.y)); ABC_image_to_el (&a, &b, &c, &(o->el_world)); /* convert 3 coordinates into ellipse */ } } /** * Function: fill_missing_image_coordinates * Update a SexObject structure and replace all missing image coordinates * by re-computing them using the world one and the from_wcs wcs. * Re-computes: * peak_image_x * peak_image.y * xy_image.x * xy_image.y * el_image.a * el_image.b * el_image.theta * * Parameters: * @param o - a pointer to an exsting SexObject. Should have been * checked for validity already * @param from_wcs - a pointer to an existing WoldCoord wcs structure */ void fill_missing_image_coordinates (SexObject *o, struct WorldCoor *from_wcs, int overwrite) { sky_coord as, bs, cs; d_point a, b, c; int offscl; /************************/ /* If the image coordinates are missing, generate them using from_wcs and sextractor world coordinates */ /************************/ if ((overwrite) || (isnan (o->xy_image.x)) || (isnan (o->xy_image.y))) { /* recomputes xy_image from the world values */ wcs2pix (from_wcs, o->xy_world.ra, o->xy_world.dec, &(o->xy_image.x), &(o->xy_image.y), &offscl); } if ((isnan (o->el_image.a)) || (isnan (o->el_image.b)) || (isnan (o->el_image.theta)) || (overwrite)) { /* recomputes el_image them from the world values */ el_to_ABC_world (&(o->el_world), &as, &bs, &cs); /* Convert ellipse into a set of 3 coordinates */ /* Compute the end point of the axes in the image coord system */ wcs2pix (from_wcs, as.ra, as.dec, &(a.x), &(a.y), &offscl); wcs2pix (from_wcs, bs.ra, bs.dec, &(b.x), &(b.y), &offscl); wcs2pix (from_wcs, cs.ra, cs.dec, &(c.x), &(c.y), &offscl); /* DISABLED !!*/ // ABC_image_to_el (&a, &b, &c, &(o->el_image)); /* convert 3 coordinates into ellipse */ } } /** * Function: fill_all_missing_image_coordinates * Replaces all NaN values in a set of SexObjects with ones computed * from the available information and the associated WCS. * * Parameters: * @param sobjs - an NULL terminated array containing pointers to SexObjects * @param from_wcs - a pointer to an existing WoldCoord wcs structure * @param overwrite - forces the re-computation of all image coordinates * */ void fill_all_missing_image_coordinates (SexObject ** sobjs, struct WorldCoor *from_wcs, int overwrite) { int i, nobjs = 0; /* Find the number of SexObjects in sobjs */ while (sobjs[nobjs]) nobjs++; for (i = 0; i < nobjs; i++) { fill_missing_image_coordinates (sobjs[i], from_wcs, overwrite); } } /** * Function: fill_all_missing_WCS_coordinates * Replaces all NaN values in a set of SexObjects with ones computed from the * available information and the associated WCS. * * Parameters: * @param sobjs - an NULL terminated array containing pointers to SexObjects * @param from_wcs - a pointer to an existing WoldCoord wcs structure * @param overwrite - forces the re-computation of all image coordinates * */ void fill_all_missing_WCS_coordinates (SexObject ** sobjs, struct WorldCoor *from_wcs, int overwrite) { int i, nobjs = 0; /* Find the number of SexObjects in sobjs */ while (sobjs[nobjs]) nobjs++; for (i = 0; i < nobjs; i++) { fill_missing_WCS_coordinates (sobjs[i], from_wcs, overwrite); } } /** * Function: compute_new_image_coordinates * Function that uses the exisiting world coordinates of a SexObject to * compute new pixel coordinates using a (new) WCS. * * Parameters: * @param o - a pointer to an exsting SexObject. Should have been checked * for validity already * @param to_wcs - a pointer to an existing WoldCoord wcs structure * */ void compute_new_image_coordinates (SexObject * o, struct WorldCoor *to_wcs) { sky_coord as, bs, cs; d_point a, b, c; int offscl; /******************************************************************/ /* uses the existing world coordinates of a SexObject to compute */ /* new pixel coordinates using a (new) WCS. */ /*******************************************************************/ /* recomputes xy_image from the world values */ wcs2pix (to_wcs, o->xy_world.ra, o->xy_world.dec, &(o->xy_image.x), &(o->xy_image.y), &offscl); /* recomputes el_image them from the world values */ el_to_ABC_world (&(o->el_world), &as, &bs, &cs); /* Convert ellipse into a set of 3 coordinates */ /* Compute the end point of the axes in the image coord system */ wcs2pix (to_wcs, as.ra, as.dec, &(a.x), &(a.y), &offscl); wcs2pix (to_wcs, bs.ra, bs.dec, &(b.x), &(b.y), &offscl); wcs2pix (to_wcs, cs.ra, cs.dec, &(c.x), &(c.y), &offscl); /* DISABLED */ // ABC_image_to_el (&a, &b, &c, &(o->el_image)); /* convert 3 coordinates into ellipse */ } /** * Function: compute_new_image_sexobject * Function that uses the exisiting world coordinates of a SexObject to * compute new pixel coordinates using a (new) WCS. * * Parameters: * @param o - a pointer to an exsting SexObject. Should have been checked * for validity already * @param to_wcs - a pointer to an existing WoldCoord wcs structure */ void compute_new_image_sexobject (SexObject * o, struct WorldCoor *to_wcs, int th_sky) { sky_coord bs, cs; d_point a, b, c; int offscl; /******************************************************************/ /* uses the existing world coordinates of a SexObject to compute */ /* new pixel coordinates using a (new) WCS. */ /******************************************************************/ /* recomputes xy_image from the world values */ wcs2pix (to_wcs, o->xy_world.ra, o->xy_world.dec, &(o->xy_image.x), &(o->xy_image.y), &offscl); /* Convert ellipse into a set of 3 coordinates */ el_to_ABC_world2 (&(o->el_world), &(o->xy_world), &bs, &cs); /* Compute the end point of the axes in the image coord system */ wcs2pix (to_wcs, o->xy_world.ra, o->xy_world.dec, &(a.x), &(a.y), &offscl); wcs2pix (to_wcs, bs.ra, bs.dec, &(b.x), &(b.y), &offscl); wcs2pix (to_wcs, cs.ra, cs.dec, &(c.x), &(c.y), &offscl); /* convert 3 coordinates into ellipse */ ABC_image_to_el2 (&a, &b, &c, &(o->el_image)); if (th_sky) o->el_image.theta = o->el_image.theta - 90.0; } /** * Function: compute_all_new_image_coordinates * Replaces all SexObjects image coordinates with new ones computed using the * the passed WCS. * * Parameters: * @param sobjs - a NULL terminated array containing pointers to SexObjects * @param to_wcs - a pointer to an existing WoldCoord wcs structure */ void compute_all_new_image_coordinates (SexObject ** sobjs, struct WorldCoor *to_wcs) { int i, nobjs = 0; /* Find the number of SexObjects in sobjs */ while (sobjs[nobjs]) nobjs++; for (i = 0; i < nobjs; i++) { compute_new_image_coordinates (sobjs[i], to_wcs); } } /** * Function: free_SexObjects * Free a NULL terminated array of SexObject * * Parameters: * @param sobjs - a NULL terminated array containing pointers to SexObjects * */ void free_SexObjects (SexObject ** sobjs) { int i, nobjs = 0; /* Find the number of SexObjects in sobjs */ while (sobjs[nobjs]) nobjs++; for (i = 0; i < nobjs; i++) { if (sobjs[i]->magnitudes){ gsl_vector_free (sobjs[i]->lambdas); gsl_vector_free (sobjs[i]->magnitudes); } free (sobjs[i]); sobjs[i] = NULL; } free (sobjs); sobjs = NULL; }
{ "alphanum_fraction": 0.5981047775, "avg_line_length": 32.6911694511, "ext": "c", "hexsha": "3448fc8fdf8cea18bbf4a89b7d21825cba4ee129", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f57de55daf77de21d5868ace08b69090778d5975", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "sosey/pyaxe", "max_forks_repo_path": "cextern/src/spc_sex.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "f57de55daf77de21d5868ace08b69090778d5975", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "sosey/pyaxe", "max_issues_repo_path": "cextern/src/spc_sex.c", "max_line_length": 122, "max_stars_count": null, "max_stars_repo_head_hexsha": "f57de55daf77de21d5868ace08b69090778d5975", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "sosey/pyaxe", "max_stars_repo_path": "cextern/src/spc_sex.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 19225, "size": 68488 }
/* * Copyright (c) 2016-2021 lymastee, All rights reserved. * Contact: lymastee@hotmail.com * * This file is part of the gslib project. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #ifndef fsysdwrite_c5ab9cfc_e66a_40b6_8ea1_29b3bde4f5e3_h #define fsysdwrite_c5ab9cfc_e66a_40b6_8ea1_29b3bde4f5e3_h #include <d3d11.h> #include <d3d10_1.h> #include <dxgi.h> #include <d2d1.h> #include <dwrite.h> #include <gslib/std.h> #include <ariel/sysop.h> #include <ariel/rendersysd3d11.h> __ariel_begin__ typedef unordered_map<font, IDWriteTextFormat*> dwrite_font_map; class fsys_dwrite: public fontsys { public: fsys_dwrite(); virtual ~fsys_dwrite(); virtual void initialize() override; virtual void set_font(const font& f) override; virtual bool query_size(const gchar* str, int& w, int& h, int len = -1) override; virtual bool create_text_image(image& img, const gchar* str, int x, int y, const color& cr, int len = -1) override; virtual bool create_text_texture(texture2d** tex, const gchar* str, int margin, const color& cr, int len = -1) override; virtual void draw(image& img, const gchar* str, int x, int y, const color& cr, int len = -1) override; protected: dwrite_font_map _font_map; IDWriteTextFormat* _current_font; com_ptr<ID3D11Device> _dev11; com_ptr<ID3D10Device1> _dev101; com_ptr<IDWriteFactory> _dwfactory; com_ptr<ID2D1Factory> _d2dfactory; protected: void destroy_font_map(); }; __ariel_end__ #endif
{ "alphanum_fraction": 0.7226027397, "avg_line_length": 36.5, "ext": "h", "hexsha": "7497fb28da675c1e89f36bb62d8d0c1c91bee8b0", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2016-10-19T15:20:58.000Z", "max_forks_repo_forks_event_min_datetime": "2016-10-19T15:20:58.000Z", "max_forks_repo_head_hexsha": "1b165b7a812526c4b2a3179588df9a7c2ff602a6", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "lymastee/gslib", "max_forks_repo_path": "include/ariel/fsysdwrite.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "1b165b7a812526c4b2a3179588df9a7c2ff602a6", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "lymastee/gslib", "max_issues_repo_path": "include/ariel/fsysdwrite.h", "max_line_length": 124, "max_stars_count": 9, "max_stars_repo_head_hexsha": "1b165b7a812526c4b2a3179588df9a7c2ff602a6", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "lymastee/gslib", "max_stars_repo_path": "include/ariel/fsysdwrite.h", "max_stars_repo_stars_event_max_datetime": "2022-02-11T09:44:51.000Z", "max_stars_repo_stars_event_min_datetime": "2016-10-18T09:40:09.000Z", "num_tokens": 695, "size": 2628 }
#ifndef PAHEMISSIONMODEL_H_ #define PAHEMISSIONMODEL_H_ #include "Exception.h" #include <algorithm> #include <array> #include <cmath> #include <iomanip> #include <iostream> #include <numeric> #include <utility> #include <vector> #include <gsl/gsl_errno.h> #include <gsl/gsl_integration.h> #include <gsl/gsl_math.h> #include <gsl/gsl_roots.h> class PAHEmissionModel { public: static double _energy; static double _frequency; static double solveInitialTemperatureFunc(double temperature, void *transitions); static double integralOverHeatCapacity(double temperature, void *transitions); static double heatCapacity(double temperature, void *transitions); static double featureStrength(double temperature, void *transitions); static void convertFromFrequencyToWavelength(std::vector<double> &grid); static void convertFromFrequencyToWavelength(std::array<double, 2> &grid); static void convertFromWavelengthToFrequency(std::vector<double> &grid); static void convertFromFrequencyToWavelength( std::vector<std::vector<std::pair<double, double>>> &transitions); static void convertFromWavelengthToFrequency( std::vector<std::vector<std::pair<double, double>>> &transitions); PAHEmissionModel(); PAHEmissionModel( const std::vector<std::vector<std::pair<double, double>>> &transitions); void setTransitions( const std::vector<std::vector<std::pair<double, double>>> &transitions); void getTransitions( std::vector<std::vector<std::pair<double, double>>> &transitions); void shiftTransitions(double shift); void setGrid(const std::vector<double> &grid); void makeGrid(const std::vector<double> &frange, double step); void makeGrid(double fmin, double fmax, double step); std::vector<double> const &getGrid() const; void printTransitions(); void applyCascadeWithEnergy(double energy, std::vector<double> &temperatures); void applyTemperatureWithEnergy(double energy, std::vector<double> &temperatures); void applyBlackbodyWithTemperature(double temperature); void applyBlackbodyWithTemperatureForEach(const std::vector<double> &temperatures); void getSpectraAndConvolveWithLorentianOfFHWM( std::vector<std::vector<double>> &vector, double fwhm = 15); void getSpectraAndConvolveWithGaussianOfFHWM( std::vector<std::vector<double>> &vector, double fwhm = 15); void getSpectraAndConvolveWithDrudeOfFHWM(std::vector<std::vector<double>> &vector, double fwhm = 15); private: static constexpr double TemperatureMin = 2.73; static constexpr double TemperatureMax = 5000.0; static constexpr double RootAccuracy = 1e-4; static constexpr double IntegrationAccuracy = 1e-4; static constexpr int MaxIterations = 150; static constexpr int MaxSteps = 100; static constexpr double PlanckConstant = 6.6260693000000018e-27; static constexpr double SpeedOfLight = 29979245800.0; static constexpr double BoltzmannConstant = 1.3806504e-16; std::vector<std::vector<std::pair<double, double>>> _transitions; std::vector<double> _grid; double _fmin; double _fmax; double solveInitialTemperature(double energy, std::vector<std::pair<double, double>> &transitions); double Lorentzian(double frequency, double centroid, double hwhm); double Gaussian(double frequency, double centroid, double sigma); double Drude(double frequency, double centroid, double fwhm); double Blackbody(double frequency, double temperature); }; inline void PAHEmissionModel::makeGrid(const std::vector<double> &frange, double step) { makeGrid(frange[0], frange[1], step); } inline std::vector<double> const &PAHEmissionModel::getGrid() const { return (_grid); } inline double PAHEmissionModel::Lorentzian(double frequency, double centroid, double hwhm) { return ((1.0 / M_PI) * hwhm / (pow(frequency - centroid, 2) + pow(hwhm, 2))); } inline double PAHEmissionModel::Gaussian(double frequency, double centroid, double sigma) { return ((1.0 / (sigma * sqrt(2.0 * M_PI))) * exp(-pow(frequency - centroid, 2) / (2.0 * pow(sigma, 2)))); } inline double PAHEmissionModel::Drude(double frequency, double centroid, double fwhm) { return ((2.0 / (fwhm * M_PI)) * pow(fwhm / centroid, 2) / (pow(centroid / frequency - frequency / centroid, 2) + pow(fwhm / centroid, 2))); } inline double PAHEmissionModel::Blackbody(double frequency, double temperature) { return ( (2.0 * PlanckConstant * SpeedOfLight * SpeedOfLight * pow(frequency, 3)) / (exp(PlanckConstant * SpeedOfLight * frequency / (BoltzmannConstant * temperature)) - 1.0)); } #endif /* PAHEMISSIONMODEL_H_ */
{ "alphanum_fraction": 0.6846092504, "avg_line_length": 29.8571428571, "ext": "h", "hexsha": "68357558af6090cc35b6d0e7e026250556e86f38", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-12-23T18:00:50.000Z", "max_forks_repo_forks_event_min_datetime": "2021-12-23T18:00:50.000Z", "max_forks_repo_head_hexsha": "3bd9dc62360c8b97239b4922db32c70c83eb6822", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "PAHdb/CPP-Backend", "max_forks_repo_path": "include/PAHEmissionModel.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "3bd9dc62360c8b97239b4922db32c70c83eb6822", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "PAHdb/CPP-Backend", "max_issues_repo_path": "include/PAHEmissionModel.h", "max_line_length": 80, "max_stars_count": null, "max_stars_repo_head_hexsha": "3bd9dc62360c8b97239b4922db32c70c83eb6822", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "PAHdb/CPP-Backend", "max_stars_repo_path": "include/PAHEmissionModel.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1204, "size": 5016 }
#include <gsl/gsl_test.h> #include <gsl/gsl_ieee_utils.h> #include <gsl/gsl_math.h> #include "gsl_cblas.h" #include "tests.h" void test_rotm (void) { const double flteps = 1e-4, dbleps = 1e-6; { int N = 1; float h[] = { -1.0f, -4.44982e+03f, -15.5826f, 7.091334e+04f, 2.95912e+04f }; float X[] = { -0.034f }; int incX = 1; float Y[] = { -0.56f }; int incY = -1; float x_expected[] = { -3.956017e+04f }; float y_expected[] = { -1.657054e+04f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 654)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 655)"); } }; }; { int N = 1; float h[] = { 0.0f, 15.9728f, 6.400638e+03f, 1.733082e-05f, 1.524511e-04f }; float X[] = { -0.034f }; int incX = 1; float Y[] = { -0.56f }; int incY = -1; float x_expected[] = { -0.0340097f }; float y_expected[] = { -218.182f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 656)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 657)"); } }; }; { int N = 1; float h[] = { 1.0f, 5.688411e+04f, 5.914789e+03f, 0.00210473f, 0.0231019f }; float X[] = { -0.034f }; int incX = 1; float Y[] = { -0.56f }; int incY = -1; float x_expected[] = { -1.93462e+03f }; float y_expected[] = { 0.0210629f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 658)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 659)"); } }; }; { int N = 1; float h[] = { -2.0f, -0.582083f, 0.00103161f, -3.429851e-05f, 7.411469e-05f }; float X[] = { -0.034f }; int incX = 1; float Y[] = { -0.56f }; int incY = -1; float x_expected[] = { -0.034f }; float y_expected[] = { -0.56f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 660)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 661)"); } }; }; { int N = 1; float h[] = { -1.0f, 115.163f, -6.715448e+04f, -258.695f, -16.2552f }; float X[] = { -0.034f }; int incX = 1; float Y[] = { -0.56f }; int incY = -1; float x_expected[] = { 140.954f }; float y_expected[] = { 2.292355e+03f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 662)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 663)"); } }; }; { int N = 1; float h[] = { 0.0f, -3.314862e+03f, -442.976f, -214.586f, -25.9716f }; float X[] = { -0.034f }; int incX = 1; float Y[] = { -0.56f }; int incY = -1; float x_expected[] = { 120.134f }; float y_expected[] = { 14.5012f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 664)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 665)"); } }; }; { int N = 1; float h[] = { 1.0f, -1.177304e+03f, -1.236662e-04f, -0.186585f, 1.15841f }; float X[] = { -0.034f }; int incX = 1; float Y[] = { -0.56f }; int incY = -1; float x_expected[] = { 39.4683f }; float y_expected[] = { -0.614711f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 666)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 667)"); } }; }; { int N = 1; float h[] = { -2.0f, -88.9796f, 0.808226f, 1.106582e-05f, -0.00862288f }; float X[] = { -0.034f }; int incX = 1; float Y[] = { -0.56f }; int incY = -1; float x_expected[] = { -0.034f }; float y_expected[] = { -0.56f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 668)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 669)"); } }; }; { int N = 1; float h[] = { -1.0f, -0.00225865f, 8.338551e+04f, -1.98282f, -2.409905e-05f }; float X[] = { -0.034f }; int incX = 1; float Y[] = { -0.56f }; int incY = -1; float x_expected[] = { 1.11046f }; float y_expected[] = { -2.835107e+03f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 670)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 671)"); } }; }; { int N = 1; float h[] = { 0.0f, 0.258779f, 74.2802f, 0.923299f, 4.847128e+03f }; float X[] = { -0.034f }; int incX = 1; float Y[] = { -0.56f }; int incY = -1; float x_expected[] = { -0.551048f }; float y_expected[] = { -3.08553f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 672)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 673)"); } }; }; { int N = 1; double h[] = { -1.0, -8.00850735044, 0.0204647351647, 1.898461360078e-04, -4.32701487194 }; double X[] = { 0.84 }; int incX = 1; double Y[] = { -0.711 }; int incY = -1; double x_expected[] = { -6.72728115497 }; double y_expected[] = { 3.09369795149 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 674)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 675)"); } }; }; { int N = 1; double h[] = { 0.0, 1.230610998905e+04, 210.056650134, 9.20757074452, 2.072879691524e+03 }; double X[] = { 0.84 }; int incX = 1; double Y[] = { -0.711 }; int incY = -1; double x_expected[] = { -5.70658279935 }; double y_expected[] = { 175.736586112 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 676)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 677)"); } }; }; { int N = 1; double h[] = { 1.0, -1.244580625511e+03, 1.11154682624, 2.269384716089e-05, -0.0143785338883 }; double X[] = { 0.84 }; int incX = 1; double Y[] = { -0.711 }; int incY = -1; double x_expected[] = { -1.046158725429e+03 }; double y_expected[] = { -0.829776862405 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 678)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 679)"); } }; }; { int N = 1; double h[] = { -2.0, 293.927527276, -2.614737743134e+03, 10.3164975867, -7.947030813329e+03 }; double X[] = { 0.84 }; int incX = 1; double Y[] = { -0.711 }; int incY = -1; double x_expected[] = { 0.84 }; double y_expected[] = { -0.711 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 680)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 681)"); } }; }; { int N = 1; double h[] = { -1.0, -0.0178609251786, 0.00983044958941, 105.944529127, 1.687350579234e-05 }; double X[] = { 0.84 }; int incX = 1; double Y[] = { -0.711 }; int incY = -1; double x_expected[] = { -75.3415633866 }; double y_expected[] = { 0.00824558059248 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 682)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 683)"); } }; }; { int N = 1; double h[] = { 0.0, 6.241999071283e-05, 2.495425882445e+03, 304.604891146, 1.604644714854e+04 }; double X[] = { 0.84 }; int incX = 1; double Y[] = { -0.711 }; int incY = -1; double x_expected[] = { -215.734077605 }; double y_expected[] = { 2.095446741254e+03 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 684)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 685)"); } }; }; { int N = 1; double h[] = { 1.0, -0.058097639487, 8.386083625428e+03, -10.5233229994, 184.653245391 }; double X[] = { 0.84 }; int incX = 1; double Y[] = { -0.711 }; int incY = -1; double x_expected[] = { -0.759802017169 }; double y_expected[] = { -132.128457473 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 686)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 687)"); } }; }; { int N = 1; double h[] = { -2.0, -92.8754629217, 1.467547244529e-04, -3.197881072301e-04, -1.89874629713 }; double X[] = { 0.84 }; int incX = 1; double Y[] = { -0.711 }; int incY = -1; double x_expected[] = { 0.84 }; double y_expected[] = { -0.711 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 688)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 689)"); } }; }; { int N = 1; double h[] = { -1.0, -0.0961996230646, -2.248344186185e-05, -316.856396787, 1.663969157848e+03 }; double X[] = { 0.84 }; int incX = 1; double Y[] = { -0.711 }; int incY = -1; double x_expected[] = { 225.204090432 }; double y_expected[] = { -1.183082090116e+03 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 690)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 691)"); } }; }; { int N = 1; double h[] = { 0.0, -201.862043128, 4.999906166451e-04, -0.0653365534487, 586.454083328 }; double X[] = { 0.84 }; int incX = 1; double Y[] = { -0.711 }; int incY = -1; double x_expected[] = { 0.886454289502 }; double y_expected[] = { -0.710580007882 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 692)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 693)"); } }; }; { int N = 1; float h[] = { -1.0f, 162.86f, 1.379231e-04f, 9.67285f, 0.929218f }; float X[] = { 0.629f }; int incX = -1; float Y[] = { 0.386f }; int incY = 1; float x_expected[] = { 106.173f }; float y_expected[] = { 0.358765f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 694)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 695)"); } }; }; { int N = 1; float h[] = { 0.0f, 537.387f, -21.6404f, -1.017074e+03f, -1.730546e-05f }; float X[] = { 0.629f }; int incX = -1; float Y[] = { 0.386f }; int incY = 1; float x_expected[] = { -391.961f }; float y_expected[] = { -13.2258f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 696)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 697)"); } }; }; { int N = 1; float h[] = { 1.0f, -1.339977e-05f, 0.00522784f, 2.020352e-05f, -0.0654088f }; float X[] = { 0.629f }; int incX = -1; float Y[] = { 0.386f }; int incY = 1; float x_expected[] = { 0.385992f }; float y_expected[] = { -0.654248f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 698)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 699)"); } }; }; { int N = 1; float h[] = { -2.0f, -50.922f, 31.5261f, -0.194913f, 0.206417f }; float X[] = { 0.629f }; int incX = -1; float Y[] = { 0.386f }; int incY = 1; float x_expected[] = { 0.629f }; float y_expected[] = { 0.386f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 700)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 701)"); } }; }; { int N = 1; float h[] = { -1.0f, 1.15659f, 2.599832e+04f, 435.891f, 1.546671e+03f }; float X[] = { 0.629f }; int incX = -1; float Y[] = { 0.386f }; int incY = 1; float x_expected[] = { 168.981f }; float y_expected[] = { 1.694996e+04f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 702)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 703)"); } }; }; { int N = 1; float h[] = { 0.0f, 3.359889e-04f, -0.00134822f, -12.9136f, -5.655622e+04f }; float X[] = { 0.629f }; int incX = -1; float Y[] = { 0.386f }; int incY = 1; float x_expected[] = { -4.35566f }; float y_expected[] = { 0.385152f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 704)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 705)"); } }; }; { int N = 1; float h[] = { 1.0f, 2.75119e-05f, 1.70314f, 18.4063f, 185.731f }; float X[] = { 0.629f }; int incX = -1; float Y[] = { 0.386f }; int incY = 1; float x_expected[] = { 0.386017f }; float y_expected[] = { 71.063f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 706)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 707)"); } }; }; { int N = 1; float h[] = { -2.0f, -1.031009e-04f, -3.378602e+04f, 7.869358e-05f, 157.303f }; float X[] = { 0.629f }; int incX = -1; float Y[] = { 0.386f }; int incY = 1; float x_expected[] = { 0.629f }; float y_expected[] = { 0.386f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 708)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 709)"); } }; }; { int N = 1; float h[] = { -1.0f, 0.00207419f, -89.9374f, -1.40414f, -25.1433f }; float X[] = { 0.629f }; int incX = -1; float Y[] = { 0.386f }; int incY = 1; float x_expected[] = { -0.540694f }; float y_expected[] = { -66.276f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 710)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 711)"); } }; }; { int N = 1; float h[] = { 0.0f, -4.972562e+04f, 3.65698e-05f, 632.116f, 0.195207f }; float X[] = { 0.629f }; int incX = -1; float Y[] = { 0.386f }; int incY = 1; float x_expected[] = { 244.626f }; float y_expected[] = { 0.386023f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 712)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 713)"); } }; }; { int N = 1; double h[] = { -1.0, 8.64768339859, -105.906731008, -347.053994991, -1.28802789909 }; double X[] = { -0.674 }; int incX = -1; double Y[] = { -0.645 }; int incY = 1; double x_expected[] = { 218.021288159 }; double y_expected[] = { 72.2119146942 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 714)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 715)"); } }; }; { int N = 1; double h[] = { 0.0, 0.926057152065, 3.315158944851e-04, -1.203638835886e+03, 0.00197484344868 }; double X[] = { -0.674 }; int incX = -1; double Y[] = { -0.645 }; int incY = 1; double x_expected[] = { 775.673049147 }; double y_expected[] = { -0.645223441713 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 716)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 717)"); } }; }; { int N = 1; double h[] = { 1.0, -9.404298701289e-05, -0.00380843381223, -0.0767212569647, -3.66628238398 }; double X[] = { -0.674 }; int incX = -1; double Y[] = { -0.645 }; int incY = 1; double x_expected[] = { -0.644936615027 }; double y_expected[] = { 3.03875213767 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 718)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 719)"); } }; }; { int N = 1; double h[] = { -2.0, 0.0900662226146, 0.00250500071094, 6.46624826995, -2.159443948633e-05 }; double X[] = { -0.674 }; int incX = -1; double Y[] = { -0.645 }; int incY = 1; double x_expected[] = { -0.674 }; double y_expected[] = { -0.645 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 720)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 721)"); } }; }; { int N = 1; double h[] = { -1.0, 8.011686652935e+03, -23.8989526115, -1.104879849207e+04, 0.108740065261 }; double X[] = { -0.674 }; int incX = -1; double Y[] = { -0.645 }; int incY = 1; double x_expected[] = { 1.726598223305e+03 }; double y_expected[] = { 16.0377567181 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 722)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 723)"); } }; }; { int N = 1; double h[] = { 0.0, 5.162681717012e-05, 48.059409562, -4.701209666609e+04, -6.80333644488e+04 }; double X[] = { -0.674 }; int incX = -1; double Y[] = { -0.645 }; int incY = 1; double x_expected[] = { 3.032212834963e+04 }; double y_expected[] = { -33.0370420448 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 724)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 725)"); } }; }; { int N = 1; double h[] = { 1.0, -5.554806445579e-04, 5.101973060197e+04, -5.932040237374e+03, 3.91045757161 }; double X[] = { -0.674 }; int incX = -1; double Y[] = { -0.645 }; int incY = 1; double x_expected[] = { -0.644625606046 }; double y_expected[] = { -1.84824513369 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 726)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 727)"); } }; }; { int N = 1; double h[] = { -2.0, -1.697234884626e-05, 101.466514367, 5.772202675851e+03, -6.884724590773e-04 }; double X[] = { -0.674 }; int incX = -1; double Y[] = { -0.645 }; int incY = 1; double x_expected[] = { -0.674 }; double y_expected[] = { -0.645 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 728)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 729)"); } }; }; { int N = 1; double h[] = { -1.0, -0.0199779342753, 13.013123509, -17.8393347684, 0.129333249919 }; double X[] = { -0.674 }; int incX = -1; double Y[] = { -0.645 }; int incY = 1; double x_expected[] = { 11.5198360534 }; double y_expected[] = { -8.85426519126 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 730)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 731)"); } }; }; { int N = 1; double h[] = { 0.0, -6.673799053773e+04, 587.759435538, 3.493966594965e+04, 2.098374142331e-05 }; double X[] = { -0.674 }; int incX = -1; double Y[] = { -0.645 }; int incY = 1; double x_expected[] = { -2.253675853752e+04 }; double y_expected[] = { -396.794859553 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 732)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 733)"); } }; }; { int N = 1; float h[] = { -1.0f, 0.070033f, 0.034824f, -0.00740144f, -0.153474f }; float X[] = { -0.111f }; int incX = -1; float Y[] = { -0.103f }; int incY = -1; float x_expected[] = { -0.00701131f }; float y_expected[] = { 0.0119423f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 734)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 735)"); } }; }; { int N = 1; float h[] = { 0.0f, 7.618016e-04f, -0.00396806f, -92.8408f, -0.0018571f }; float X[] = { -0.111f }; int incX = -1; float Y[] = { -0.103f }; int incY = -1; float x_expected[] = { 9.4516f }; float y_expected[] = { -0.10256f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 736)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 737)"); } }; }; { int N = 1; float h[] = { 1.0f, -5.833806e+03f, 0.00265668f, -587.573f, 0.0972416f }; float X[] = { -0.111f }; int incX = -1; float Y[] = { -0.103f }; int incY = -1; float x_expected[] = { 647.449f }; float y_expected[] = { 0.100984f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 738)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 739)"); } }; }; { int N = 1; float h[] = { -2.0f, -8.93339e+04f, -5.16022e-05f, 2.589784e-05f, -7.52586f }; float X[] = { -0.111f }; int incX = -1; float Y[] = { -0.103f }; int incY = -1; float x_expected[] = { -0.111f }; float y_expected[] = { -0.103f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 740)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 741)"); } }; }; { int N = 1; float h[] = { -1.0f, 0.125135f, 0.00586453f, 1.100694e-05f, -0.0137436f }; float X[] = { -0.111f }; int incX = -1; float Y[] = { -0.103f }; int incY = -1; float x_expected[] = { -0.0138912f }; float y_expected[] = { 7.64631e-04f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 742)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 743)"); } }; }; { int N = 1; float h[] = { 0.0f, -0.0996414f, 0.00505806f, 1.321441e-05f, 1.151406e-04f }; float X[] = { -0.111f }; int incX = -1; float Y[] = { -0.103f }; int incY = -1; float x_expected[] = { -0.111001f }; float y_expected[] = { -0.103561f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 744)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 745)"); } }; }; { int N = 1; float h[] = { 1.0f, 8.18165f, 169.902f, -1.453316e-05f, 1.539957e+03f }; float X[] = { -0.111f }; int incX = -1; float Y[] = { -0.103f }; int incY = -1; float x_expected[] = { -1.01116f }; float y_expected[] = { -158.505f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 746)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 747)"); } }; }; { int N = 1; float h[] = { -2.0f, 1.827623e-04f, -0.0528808f, 24.7305f, 328.39f }; float X[] = { -0.111f }; int incX = -1; float Y[] = { -0.103f }; int incY = -1; float x_expected[] = { -0.111f }; float y_expected[] = { -0.103f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 748)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 749)"); } }; }; { int N = 1; float h[] = { -1.0f, -0.0876053f, 7.858704e+04f, -4.758389e+03f, -0.0114841f }; float X[] = { -0.111f }; int incX = -1; float Y[] = { -0.103f }; int incY = -1; float x_expected[] = { 490.124f }; float y_expected[] = { -8.72316e+03f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 750)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 751)"); } }; }; { int N = 1; float h[] = { 0.0f, 0.00192188f, -1.031412e-05f, -0.00123957f, 0.312197f }; float X[] = { -0.111f }; int incX = -1; float Y[] = { -0.103f }; int incY = -1; float x_expected[] = { -0.110872f }; float y_expected[] = { -0.102999f }; cblas_srotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], flteps, "srotm(case 752)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], flteps, "srotm(case 753)"); } }; }; { int N = 1; double h[] = { -1.0, -0.0253351881542, -0.105247702585, -7.18405641016, -5.409804811228e+04 }; double X[] = { 0.203 }; int incX = -1; double Y[] = { -0.03 }; int incY = -1; double x_expected[] = { 0.21037864911 }; double y_expected[] = { 1.622920078085e+03 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 754)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 755)"); } }; }; { int N = 1; double h[] = { 0.0, 8.503080247483e+03, -6.186691885896e-05, -0.201279925805, -5.810746179529e-05 }; double X[] = { 0.203 }; int incX = -1; double Y[] = { -0.03 }; int incY = -1; double x_expected[] = { 0.209038397774 }; double y_expected[] = { -0.0300125589845 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 756)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 757)"); } }; }; { int N = 1; double h[] = { 1.0, 0.351101212426, 64.9574703355, 3.015315809025e-05, -5.291308403203e-04 }; double X[] = { 0.203 }; int incX = -1; double Y[] = { -0.03 }; int incY = -1; double x_expected[] = { 0.0412735461225 }; double y_expected[] = { -0.202984126075 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 758)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 759)"); } }; }; { int N = 1; double h[] = { -2.0, 0.0220262018719, -0.00311338149392, -70.6413298654, 31.8952671416 }; double X[] = { 0.203 }; int incX = -1; double Y[] = { -0.03 }; int incY = -1; double x_expected[] = { 0.203 }; double y_expected[] = { -0.03 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 760)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 761)"); } }; }; { int N = 1; double h[] = { -1.0, 1.549812806922e+04, -4.868519165134e+04, -5.230242596804e+04, 1.58043443456e+04 }; double X[] = { 0.203 }; int incX = -1; double Y[] = { -0.03 }; int incY = -1; double x_expected[] = { 4.715192777093e+03 }; double y_expected[] = { -1.035722423559e+04 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 762)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 763)"); } }; }; { int N = 1; double h[] = { 0.0, -3.30917942895, -0.0100316602276, -0.0222191220411, -0.0881815578726 }; double X[] = { 0.203 }; int incX = -1; double Y[] = { -0.03 }; int incY = -1; double x_expected[] = { 0.203666573661 }; double y_expected[] = { -0.0320364270262 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 764)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 765)"); } }; }; { int N = 1; double h[] = { 1.0, 5.68327898035, 1.646867755046e-04, -0.106527931872, -28.2458905362 }; double X[] = { 0.203 }; int incX = -1; double Y[] = { -0.03 }; int incY = -1; double x_expected[] = { 1.12370563301 }; double y_expected[] = { 0.644376716086 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 766)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 767)"); } }; }; { int N = 1; double h[] = { -2.0, 2.20585352008, 1.117638462348e+03, -0.116329468158, 0.00362096329059 }; double X[] = { 0.203 }; int incX = -1; double Y[] = { -0.03 }; int incY = -1; double x_expected[] = { 0.203 }; double y_expected[] = { -0.03 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 768)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 769)"); } }; }; { int N = 1; double h[] = { -1.0, -0.00182683798892, -2.288460066516e-05, -37.55844708, -9.54075659826e-05 }; double X[] = { 0.203 }; int incX = -1; double Y[] = { -0.03 }; int incY = -1; double x_expected[] = { 1.12638256429 }; double y_expected[] = { -1.783346955549e-06 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 770)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 771)"); } }; }; { int N = 1; double h[] = { 0.0, 1.02690456955e-04, -20.1292302013, -1.703870486677e-04, 5.17477399477 }; double X[] = { 0.203 }; int incX = -1; double Y[] = { -0.03 }; int incY = -1; double x_expected[] = { 0.203005111611 }; double y_expected[] = { -4.11623373087 }; cblas_drotm(N, X, incX, Y, incY, h); { int i; for (i = 0; i < 1; i++) { gsl_test_rel(X[i], x_expected[i], dbleps, "drotm(case 772)"); } }; { int i; for (i = 0; i < 1; i++) { gsl_test_rel(Y[i], y_expected[i], dbleps, "drotm(case 773)"); } }; }; }
{ "alphanum_fraction": 0.4804074126, "avg_line_length": 23.3763227513, "ext": "c", "hexsha": "084b7a800988ac779f89649a2332f7af85cbc146", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2015-10-02T01:32:59.000Z", "max_forks_repo_forks_event_min_datetime": "2015-10-02T01:32:59.000Z", "max_forks_repo_head_hexsha": "91e70bc88726ee680ec6e8cbc609977db3fdcff9", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "ICML14MoMCompare/spectral-learn", "max_forks_repo_path": "code/em/treba/gsl-1.0/cblas/test_rotm.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "91e70bc88726ee680ec6e8cbc609977db3fdcff9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "ICML14MoMCompare/spectral-learn", "max_issues_repo_path": "code/em/treba/gsl-1.0/cblas/test_rotm.c", "max_line_length": 106, "max_stars_count": 14, "max_stars_repo_head_hexsha": "91e70bc88726ee680ec6e8cbc609977db3fdcff9", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "ICML14MoMCompare/spectral-learn", "max_stars_repo_path": "code/em/treba/gsl-1.0/cblas/test_rotm.c", "max_stars_repo_stars_event_max_datetime": "2021-06-10T11:31:28.000Z", "max_stars_repo_stars_event_min_datetime": "2015-12-18T18:09:25.000Z", "num_tokens": 14688, "size": 35345 }
// // Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE.md file in the project root for full license information. // // CPUMatrix.h : template implementation of all matrix functions on the CPU side // #pragma once #include "Basics.h" #include "File.h" #include "CPUMatrix.h" #include "TensorOps.h" #include <assert.h> #include <stdexcept> #include <omp.h> #include <math.h> #include <random> #include <chrono> #include <exception> #include <thread> #include <iostream> #include <algorithm> #pragma warning(push) #pragma warning(disable:4244) // 'conversion' conversion from 'type1' to 'type2', possible loss of data #include <boost/random/normal_distribution.hpp> #pragma warning(pop) #include <boost/random/uniform_real_distribution.hpp> #ifdef _WIN32 #define NOMINMAX #include "Windows.h" #else #include <cfloat> #endif #ifdef LEAKDETECT #include <vld.h> #endif #pragma warning(disable : 4100) // unreferenced formal parameter; "struct TensorOpReduction<ElemType, OPFN, typename ReductionOp, N, -1>" trigger this #pragma warning(disable : 4127) // conditional expression is constant; "if (sizeof(ElemType)==sizeof(float))" triggers this #pragma warning(disable : 4244) // unreachable code; triggered for unknown reasons #pragma warning(disable : 4702) // conversion from 'double' to 'float' #ifdef USE_MKL // requires MKL 10.0 and above #include <mkl.h> #else #ifdef _MSC_VER // Visual Studio doesn't define standard complex types properly #define HAVE_LAPACK_CONFIG_H #define LAPACK_COMPLEX_STRUCTURE #endif #include <cblas.h> #include <lapacke.h> #endif #define SWAP(a, b) \ { \ (a) ^= (b); \ (b) ^= (a); \ (a) ^= (b); \ } #define IDX2C(i, j, ld) (((j) * (ld)) + (i)) // 0 based indexing namespace Microsoft { namespace MSR { namespace CNTK { #pragma region Helpful Enum Definitions enum class MatrixOrder { RowMajor = 101, // row-major arrays ColMajor = 102 // column-major arrays }; enum class MatrixTranspose : char { NoTrans = 'N', // trans='N' Trans = 'T', // trans='T' ConjTrans = 'C' // trans='C' }; enum class SymMatrixType : char { Up = 'U', // symmetric matrix is stored in the upper part Low = 'L', // symmetric matrix is stored in thelower part Full = 'F', // full populated NotSymmetric = 'N' // not a symmetric matrix }; enum class MatrixOpSide : char { Left = 'L', // left multiply Right = 'R', // right multiply }; #pragma endregion Helpful Enum Definitions #pragma region Constructors and Destructor template <class ElemType> CPUMatrix<ElemType>::CPUMatrix() { ZeroInit(); } // helper to allocate an array of ElemType // Use this instead of new[] to get NaN initialization for debugging. template <class ElemType> static ElemType* NewArray(size_t n) { ElemType* p = new ElemType[n](); #if 0 // _DEBUG ElemType nan = Matrix<ElemType>::MakeNan(__LINE__); for (size_t i = 0; i < n; i++) p[i] = nan; #endif return p; } template <class ElemType> CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols) { ZeroInit(); m_numRows = numRows; m_numCols = numCols; SetSizeAllocated(GetNumElements()); if (GetNumElements() != 0) { SetBuffer(NewArray<ElemType>(GetNumElements()), GetNumElements() * sizeof(ElemType)); } } template <class ElemType> CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags) { ZeroInit(); SetValue(numRows, numCols, pArray, matrixFlags); } //copy constructor, deep copy template <class ElemType> CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& deepCopyFrom) { ZeroInit(); SetValue(deepCopyFrom); } //assignment operator, deep copy template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(const CPUMatrix<ElemType>& deepCopyFrom) { SetValue(deepCopyFrom); return *this; } //move constructor, shallow copy template <class ElemType> CPUMatrix<ElemType>::CPUMatrix(CPUMatrix<ElemType>&& moveFrom) : Base(/* shallow */ true) { ShallowCopyFrom(moveFrom); moveFrom.ZeroValues(); } // Shortcut of default constructor + shallow copy, to avoid one initialization template <class ElemType> CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& shallowCopyFrom, bool shallow) : Base(shallow) { ShallowCopyFrom(shallowCopyFrom); } //move assignment operator, shallow copy template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(CPUMatrix<ElemType>&& moveFrom) { if (this != &moveFrom) { ShallowCopyFrom(moveFrom); // release the pointer from the source object so that the destructor won't release it twice moveFrom.ZeroValues(); } return *this; } template <class ElemType> void CPUMatrix<ElemType>::Clear() { ZeroInit(); } #pragma endregion Constructors and Destructor #pragma region Basic Operators template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::ColumnSlice(size_t startColumn, size_t numCols) const { if (startColumn + numCols > m_numCols) InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) m_numCols); CPUMatrix<ElemType> slice(*this, /* shallow= */ true); slice.m_numCols = numCols; slice.m_sliceViewOffset = m_sliceViewOffset + startColumn * m_numRows; return slice; } // set this(:, 0:numCols-1) = fromMatrix(:, startColumn : startColumn+numCols-1) // TODO: why not say *this = ColumnSlice()? template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols) { if (startColumn + numCols > fromMatrix.m_numCols) InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) fromMatrix.m_numCols); Clear(); ShallowCopyFrom(fromMatrix); m_numCols = numCols; m_sliceViewOffset = fromMatrix.m_sliceViewOffset + startColumn * m_numRows; return *this; } // set this(: , startColumn:startColumn+numCols-1)= fromMatrix; template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols) { if (startColumn + numCols > m_numCols) LogicError("The slice is out of range of the destination matrix."); if (numCols > fromMatrix.GetNumCols()) InvalidArgument("The slice (%d) is out of range of the source matrix (%d).", (int) numCols, (int) fromMatrix.GetNumCols()); if (m_numRows != fromMatrix.m_numRows) LogicError("The number of rows in source and destination matrices do not match"); memcpy(Data() + startColumn * m_numRows, fromMatrix.Data(), numCols * m_numRows * sizeof(ElemType)); return *this; } template <class ElemType> void CPUMatrix<ElemType>::CopyColumnsStrided(const CPUMatrix<ElemType>& fromMatrix, size_t numCols, size_t srcNumColsStride, size_t destNumColsStride) { if ((((numCols - 1) * srcNumColsStride) + 1) > fromMatrix.m_numCols) LogicError("The numCols to copy and srcNumColsStride specified is out of range of the source matrix."); if ((((numCols - 1) * destNumColsStride) + 1) > m_numCols) LogicError("The numCols to copy and srcNumColsStride specified is out of range of the destination matrix."); if (m_numRows != fromMatrix.m_numRows) LogicError("The number of rows in source and destination matrices do not match"); long n = (long) numCols, m = (long) m_numRows; auto& us = *this; #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (size_t i = 0; i < (m & ~3); i += 4) { us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride); us(i + 1, j * destNumColsStride) = fromMatrix(i + 1, j * srcNumColsStride); us(i + 2, j * destNumColsStride) = fromMatrix(i + 2, j * srcNumColsStride); us(i + 3, j * destNumColsStride) = fromMatrix(i + 3, j * srcNumColsStride); } // handle remaining for (size_t i = m & ~3; i < m; i++) { us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride); } } } //for each column of a, we add all rows of a to this starting from startIndex template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows) { if (a.GetNumRows() != numRows) LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows."); if (startIndex + numRows > GetNumRows()) LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows()."); if (a.GetNumCols() != GetNumCols()) LogicError("AddToRowSliceValuesOf: columns does not match."); long n = (long) a.GetNumCols(), m = (long) numRows; auto& us = *this; #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (size_t i = 0, startRow = startIndex; i < (m & ~3); i += 4, startRow += 4) { us(startRow, j) = a(i, j); us(startRow + 1, j) = a(i + 1, j); us(startRow + 2, j) = a(i + 2, j); us(startRow + 3, j) = a(i + 3, j); } // handle remaining stuffs for (size_t i = m & ~3, startRow = startIndex + (m & ~3); i < m; i++, startRow++) { us(startRow, j) = a(i, j); } } return *this; } //for each column of a, we assign numRows starting from startIndex to this template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows) { if (startIndex + numRows > a.GetNumRows()) LogicError("AssignRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows()."); RequireSize(numRows, a.GetNumCols()); long n = (long) a.GetNumCols(); // note: OpenMP requires loop indices to be long, not size_t long k = (long) a.GetNumRows(); #pragma omp parallel for for (long j = 0; j < n; j++) { // memory copy might be faster? memcpy(Data() + j * numRows, a.Data() + j * k + startIndex, sizeof(ElemType) * numRows); // //four-way unrolling // for (long i=0, startRow = startIndex; i<(m & ~3); i+=4, startRow+=4) // { // us(i,j) = a(startRow,j); // us(i+1,j) = a(startRow+1,j); // us(i+2,j) = a(startRow+2,j); // us(i+3,j) = a(startRow+3,j); // } // //handle remaining stuffs // for (long i=m & ~3, startRow = startIndex+(m & ~3); i<m; i++, startRow++) // { // us(i,j) = a(startRow,j); // } } return *this; } //for the row slice of this starting from startIndex we add a to it. template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows) { if (a.IsEmpty()) LogicError("AddToRowSliceValuesOf: input matrix a is empty."); if (a.GetNumRows() != numRows) LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows."); if (startIndex + numRows > GetNumRows()) LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows()."); if (a.GetNumCols() != GetNumCols()) LogicError("AddToRowSliceValuesOf: columns does not match."); long n = (long) a.GetNumCols(), m = (long) numRows; auto& us = *this; #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4) { us(startRow, j) += a(i, j); us(startRow + 1, j) += a(i + 1, j); us(startRow + 2, j) += a(i + 2, j); us(startRow + 3, j) += a(i + 3, j); } // handle remaining stuffs for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++) { us(startRow, j) += a(i, j); } } return *this; } //for each column of this, we add row slice of a starting from startIndex template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows) { if (a.IsEmpty()) LogicError("AddWithRowSliceValuesOf: input matrix a is empty."); if (GetNumRows() != numRows) LogicError("AddWithRowSliceValuesOf: GetNumRows() != numRows."); if (startIndex + numRows > a.GetNumRows()) LogicError("AddWithRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows()."); if (a.GetNumCols() != GetNumCols()) LogicError("AddWithRowSliceValuesOf: columns does not match."); long n = (long) a.GetNumCols(), m = (long) numRows; auto& us = *this; #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4) { us(i, j) += a(startRow, j); us(i + 1, j) += a(startRow + 1, j); us(i + 2, j) += a(startRow + 2, j); us(i + 3, j) += a(startRow + 3, j); } // handle remaining stuffs for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++) { us(i, j) += a(startRow, j); } } return *this; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::Diagonal() const { if (m_numRows != m_numCols) LogicError("Diagonal can be called only for square matrix. (rows=%d, cols=%d)", (int) m_numRows, (int) m_numCols); CPUMatrix<ElemType> diag(1, m_numCols); auto& us = *this; #pragma omp parallel for for (long i = 0; i < m_numRows; i++) { diag(0, (size_t) i) = us(i, i); } return diag; } template <class ElemType> void CPUMatrix<ElemType>::MinusOneAt(CPUMatrix<ElemType>& c, const size_t position) { if (position < c.GetNumElements()) c.Data()[position] -= 1.0; else RuntimeError("MinusOneAt: position is out of CPU matrix size"); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRepeatOf(const CPUMatrix<ElemType>& a, const size_t numRowRepeats, const size_t numColRepeats) { if (this == &a) LogicError("AssignRepeatOf: a is the same as [this]. Does not support inplace repeat."); if (a.IsEmpty()) LogicError("AssignRepeatOf: Matrix a is empty."); RequireSize(a.GetNumRows() * numRowRepeats, a.GetNumCols() * numColRepeats); long n = (long) a.GetNumCols(), m = (long) a.GetNumRows(); auto& us = *this; #pragma omp parallel for for (long q = 0; q < numColRepeats; q++) { for (long p = 0; p < numRowRepeats; p++) { long colOffset = q * n; for (long j = 0; j < n; j++, colOffset++) { long rowOffset = p * m; // four-way unrolling for (long i = 0; i < (m & ~3); i += 4, rowOffset += 4) { us(rowOffset, colOffset) = a(i, j); us(rowOffset + 1, colOffset) = a(i + 1, j); us(rowOffset + 2, colOffset) = a(i + 2, j); us(rowOffset + 3, colOffset) = a(i + 3, j); } // handle remaining stuffs for (long i = m & ~3; i < m; i++, rowOffset++) { us(rowOffset, colOffset) = a(i, j); } } } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowRepeatValuesOf(const CPUMatrix<ElemType>& a, const size_t numRepeats) { if (a.IsEmpty()) LogicError("AddToRowRepeatValuesOf: input matrix a is empty."); if (a.GetNumRows() != GetNumRows() * numRepeats) LogicError("AddToRowRepeatValuesOf: a.GetNumRows() != GetNumRows() * numRepeats."); long n = (long) a.GetNumCols(), m = (long) GetNumRows(); auto& us = *this; #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { for (long k = 0; k < numRepeats; k++) { us(i, j) += a(k * m + i, j); us(i + 1, j) += a(k * m + i + 1, j); us(i + 2, j) += a(k * m + i + 2, j); us(i + 3, j) += a(k * m + i + 3, j); } } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { for (long k = 0; k < numRepeats; k++) { us(i, j) += a(k * m + i, j); } } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber) { a; posNumber; negNumber; shiftNumber; NOT_IMPLEMENTED; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddFoldedPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber) { a; posNumber; negNumber; shiftNumber; NOT_IMPLEMENTED; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::Transpose() { if (IsEmpty()) LogicError("Transpose: Matrix is empty."); CPUMatrix<ElemType> c; c.AssignTransposeOf(*this); return c; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTransposeOf(const CPUMatrix<ElemType>& a) { if (this == &a) LogicError("AssignTransposeOf: a is the same as [this]. Does not support inplace transpose."); if (a.IsEmpty()) LogicError("AssignTransposeOf: Matrix a is empty."); RequireSize(a.GetNumCols(), a.GetNumRows()); long n = (long) a.GetNumCols(), m = (long) a.GetNumRows(); auto& us = *this; #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(j, i) = a(i, j); us(j, i + 1) = a(i + 1, j); us(j, i + 2) = a(i + 2, j); us(j, i + 3) = a(i + 3, j); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(j, i) = a(i, j); } } return *this; } // dst[i] = src[i] * alpha + dst[i] * beta // scale a column vector and add it to another // The usual special case: If beta = 0, then dst[] is not read, and may be uninitialized or NaN. template <class ElemType> static void ScaleAndAddColumn(ElemType beta, ElemType* dst, const ElemType* src, size_t numRows, ElemType alpha) { if (alpha != 1) // rare case: just do the full thing for (size_t i = 0; i < numRows; i++) dst[i] = beta * dst[i] + alpha * src[i]; else if (beta == 1) // used in backprop for (size_t i = 0; i < numRows; i++) dst[i] += src[i]; else if (beta == 0) // plain assignment memcpy(dst, src, sizeof(ElemType) * numRows); else // alpha=1, arbitrary beta: also rare case for (size_t i = 0; i < numRows; i++) dst[i] = beta * dst[i] + src[i]; } // *this[:,j] = a[:,idx[j]] * alpha + *this[:,j] * beta template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoGatherColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha) { if (idx.GetNumRows() != 1) // index is 1-dimensional only InvalidArgument("DoGatherColumnsOf: Map must be a row vector."); if (beta) VerifySize(a.GetNumRows(), idx.GetNumCols()); else Resize(a.GetNumRows(), idx.GetNumCols()); auto& us = *this; // race-condition consideration: Since this loops over independent output columns, this has no race condition. Cf. DoScatterColumnsOf(). #pragma omp parallel for // TODO: Depending in circumstance, it may be more efficient to parallelize over rows. foreach_column(jOut, us) { auto jInF = idx(0, jOut); // this is the column we need to get if (std::isnan(jInF) || jInF < 0) // negative index means gap continue; size_t jIn = (size_t)jInF; if (jIn >= a.GetNumCols()) InvalidArgument("DoGatherColumnsOf: Map out of bounds. %ld >= %ld", (long int)jIn, (long int)a.GetNumCols()); ScaleAndAddColumn(beta, &us(0,jOut), &a(0,jIn), us.GetNumRows(), alpha); } return *this; } // *this[:,idx[j]] = a[:,j] * alpha + *this[:,idx[j]] * beta template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoScatterColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha) { if (idx.GetNumRows() != 1) // index is 1-dimensional only InvalidArgument("DoScatterColumnsOf: Map must be a row vector."); if (idx.GetNumCols() != a.GetNumCols()) InvalidArgument("DoScatterColumnsOf: Map must have width of input vector."); if (a.GetNumRows() != GetNumRows()) InvalidArgument("DoScatterColumnsOf: Output must have same height as input vector."); auto& us = *this; // pre-scale with beta upfront // Scatter may add more than one source column to the same target, so we must pre-scale with beta, and then just keep adding. Scale(beta, us); // if beta is 0, then this will be a memset() // race-condition consideration: If idx[] references the same target column multiple times, this can have a race condition, // and hence cannot use parallelism. //#pragma omp parallel for // TODO: Depending in circumstance, it may be more efficient to parallelize over rows. foreach_column(jIn, a) { auto jOutF = idx(0, jIn); // this is the column we copy/add into if (std::isnan(jOutF) || jOutF < 0) // negative index means gap continue; size_t jOut = (size_t)jOutF; if (jOut >= GetNumCols()) InvalidArgument("DoGatherColumnsOf: Map out of bounds."); ScaleAndAddColumn(/*beta=*/(ElemType)1, &us(0, jOut), &a(0, jIn), us.GetNumRows(), alpha); } return *this; } template <class ElemType> void CPUMatrix<ElemType>::SetValue(const ElemType v) { if (IsEmpty()) LogicError("SetValue: Matrix is empty."); bool isFinite = std::numeric_limits<ElemType>::is_integer || std::isfinite((double) v); if (isFinite && v == 0) { memset(Data(), 0, sizeof(ElemType) * GetNumElements()); } else { ElemType* bufPtr = Data(); long m = (long) GetNumElements(); // 2-way thread parallelism is sufficient for the memory bound // operation of just setting the values of an array. const unsigned SETVALUE_NUM_THREADS = 2; UNUSED(SETVALUE_NUM_THREADS); // in case OMP is turned off. #pragma omp parallel for num_threads(SETVALUE_NUM_THREADS) // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { bufPtr[i] = v; bufPtr[i + 1] = v; bufPtr[i + 2] = v; bufPtr[i + 3] = v; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { bufPtr[i] = v; } } } template <class ElemType> void CPUMatrix<ElemType>::MaskColumnsValue(const CPUMatrix<char>& columnsMask, ElemType val, size_t numColsPerMaskEntry) { if (GetNumCols() != (columnsMask.GetNumCols() * numColsPerMaskEntry)) RuntimeError("MaskColumnsValue: Matrix number of columns must equal 'column mask number of columns * numColsPerMaskEntry'."); auto& us = *this; long n = (long)columnsMask.GetNumCols(), m = (long) GetNumRows(); #pragma omp parallel for for (long j = 0; j < n; j++) { if (columnsMask(0, j) == 1) continue; for (long k = 0; k < numColsPerMaskEntry; ++k) { // four-way unrolling for (size_t i = 0; i < (m & ~3); i += 4) { us(i, (j * numColsPerMaskEntry) + k) = val; us(i + 1, (j * numColsPerMaskEntry) + k) = val; us(i + 2, (j * numColsPerMaskEntry) + k) = val; us(i + 3, (j * numColsPerMaskEntry) + k) = val; } // handle remaining for (size_t i = m & ~3; i < m; i++) { us(i, (j * numColsPerMaskEntry) + k) = val; } } } } template <class ElemType> void CPUMatrix<ElemType>::SetColumn(const ElemType* colPointer, size_t j) { if (IsEmpty()) LogicError("SetColumn: Matrix is empty."); if (colPointer == NULL) return; auto& us = *this; long m = (long) GetNumRows(); #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = colPointer[i]; us(i + 1, j) = colPointer[i + 1]; us(i + 2, j) = colPointer[i + 2]; us(i + 3, j) = colPointer[i + 3]; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = colPointer[i]; } } template <class ElemType> void CPUMatrix<ElemType>::SetColumn(const ElemType val, size_t j) { if (IsEmpty()) LogicError("SetColumn: Matrix is empty."); auto& us = *this; long m = (long) GetNumRows(); #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = val; us(i + 1, j) = val; us(i + 2, j) = val; us(i + 3, j) = val; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = val; } } template <class ElemType> void CPUMatrix<ElemType>::SetColumn(const CPUMatrix<ElemType>& valMat, size_t j) { if (IsEmpty()) LogicError("SetColumn: Matrix is empty."); if (valMat.GetNumRows() != GetNumRows() || valMat.GetNumCols() != 1) LogicError("The valMat matrix has incorrect number of rows or columns."); auto& us = *this; long m = (long) GetNumRows(); #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = valMat(i, 0); us(i + 1, j) = valMat(i + 1, 0); us(i + 2, j) = valMat(i + 2, 0); us(i + 3, j) = valMat(i + 3, 0); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = valMat(i, 0); } } template <class ElemType> void CPUMatrix<ElemType>::SetValue(const CPUMatrix<ElemType>& deepCopyFrom) { if (this == &deepCopyFrom) return; SetValue(deepCopyFrom.GetNumRows(), deepCopyFrom.GetNumCols(), deepCopyFrom.Data(), 0); } #if 0 template <class ElemType> void CPUMatrix<ElemType>::SetValue(const GPUMatrix<ElemType>& /*deepCopyFrom*/) { NOT_IMPLEMENTED; } template <class ElemType> void CPUMatrix<ElemType>::SetValue(const CPUSparseMatrix<ElemType>& deepCopyFrom) { deepCopyFrom.AssignColumnSliceToDense(*this, 0, deepCopyFrom.GetNumCols()); } template <class ElemType> void CPUMatrix<ElemType>::SetValue(const GPUSparseMatrix<ElemType>& /*deepCopyFrom*/) { NOT_IMPLEMENTED; } #endif template <class ElemType> void CPUMatrix<ElemType>::SetValue(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags) { if (pArray == nullptr && numRows * numCols > 0) InvalidArgument("Invalid pArray. pArray == nullptr, but matrix is of size %d * %d = %d.", (int)numRows, (int)numCols, (int)(numRows * numCols)); SetFormat(matrixFormatDense); SetComputeDeviceId(CPUDEVICE); // if it's externally managed, then populate the structure if (matrixFlags & matrixFlagDontOwnBuffer) { // free previous array allocation if any before overwriting delete[] Buffer(); m_numRows = numRows; m_numCols = numCols; SetBuffer(pArray, GetNumElements() * sizeof(ElemType), true); SetSizeAllocated(GetNumElements()); } else { RequireSize(numRows, numCols); if (!IsEmpty()) { if (!(matrixFlags & matrixFormatRowMajor)) // compatible to internal structure memcpy(Data(), pArray, GetNumElements() * sizeof(ElemType)); else // need to transpose { ElemType* bufPtr = Data(); auto& us = *this; if (sizeof(ElemType) == sizeof(double)) { #pragma omp parallel for foreach_column (j, us) { cblas_dcopy((int) numRows, reinterpret_cast<double*>(pArray + j), (int) numCols, reinterpret_cast<double*>(bufPtr + LocateColumn(j)), 1); } } else { #pragma omp parallel for foreach_column (j, us) { { #pragma warning(suppress : 4244) cblas_scopy((int) numRows, reinterpret_cast<float*>(pArray + j), (int) numCols, reinterpret_cast<float*>(bufPtr + LocateColumn(j)), 1); } } } } } } } template <class ElemType> void CPUMatrix<ElemType>::SetDiagonalValue(const ElemType v) { if (GetNumRows() != GetNumCols()) LogicError("SetDiagonalValue: NumRows and NumCols do not agree."); auto& us = *this; long m = (long) GetNumRows(); #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, i) = v; us(i + 1, i + 1) = v; us(i + 2, i + 2) = v; us(i + 3, i + 3) = v; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, i) = v; } } template <class ElemType> void CPUMatrix<ElemType>::SetDiagonalValue(const CPUMatrix<ElemType>& vector) { if (IsEmpty() || vector.IsEmpty()) LogicError("SetDiagonalValue: Matrix is empty."); if (GetNumRows() != GetNumCols()) LogicError("SetDiagonalValue: NumRows and NumCols do not agree."); if (vector.GetNumRows() != 1 && vector.GetNumCols() != 1) LogicError("SetDiagonalValue: input vector must be a vector."); if (vector.GetNumElements() == 1) // reduce to simple form SetDiagonalValue(vector(0, 0)); else if (vector.GetNumRows() != GetNumRows() && vector.GetNumCols() != GetNumRows()) LogicError("SetDiagonalValue: input vector's dimension does not agree with [this]."); else { auto& us = *this; long m = (long) GetNumRows(); if (vector.GetNumRows() == 1) // row vector { #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, i) = vector(0, i); us(i + 1, i + 1) = vector(0, i + 1); us(i + 2, i + 2) = vector(0, i + 2); us(i + 3, i + 3) = vector(0, i + 3); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, i) = vector(0, i); } } else { #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, i) = vector(i, 0); us(i + 1, i + 1) = vector(i + 1, 0); us(i + 2, i + 2) = vector(i + 2, 0); us(i + 3, i + 3) = vector(i + 3, 0); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, i) = vector(i, 0); } } } } template <class ElemType> void CPUMatrix<ElemType>::SetUniformRandomValue(const ElemType low, const ElemType high, unsigned long seed) { if (IsEmpty()) LogicError("SetUniformRandomValue: Matrix is empty."); std::mt19937_64 generator; generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed); boost::random::uniform_real_distribution<ElemType> r(low, high); ElemType* bufPtr = Data(); long m = (long) GetNumElements(); // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { bufPtr[i] = r(generator); bufPtr[i + 1] = r(generator); bufPtr[i + 2] = r(generator); bufPtr[i + 3] = r(generator); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { bufPtr[i] = r(generator); } } template <class ElemType> void CPUMatrix<ElemType>::SetGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed) { if (sigma <= 0) InvalidArgument("SetUniformRandomValue: sigma must be a positive value."); if (IsEmpty()) LogicError("SetUniformRandomValue: Matrix is empty."); auto& us = *this; std::mt19937_64 generator(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed); boost::random::normal_distribution<ElemType> r(mean, sigma); // #pragma omp parallel for // is it thread safe? foreach_coord (i, j, us) { us(i, j) = r(generator); } } template <class ElemType> void CPUMatrix<ElemType>::AddGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed) { if (sigma <= 0) InvalidArgument("SetUniformRandomValue: sigma must be a positive value."); if (IsEmpty()) LogicError("SetUniformRandomValue: Matrix is empty."); auto& us = *this; std::mt19937_64 generator; generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed); boost::random::normal_distribution<ElemType> r(mean, sigma); long m = (long) GetNumRows(), n = (long) GetNumCols(); for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = r(generator); us(i + 1, j) = r(generator); us(i + 2, j) = r(generator); us(i + 3, j) = r(generator); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = r(generator); } } } //maskRate: percentage of values masked out (similar to dropout rate) //scaleValue: which scale value to set to the left ones (unmasked items). template <class ElemType> void CPUMatrix<ElemType>::SetUniformRandomMask(const ElemType maskRate, const ElemType scaleValue, RNGHandle& rngHandle) { if (IsEmpty()) LogicError("SetUniformRandomValue: Matrix is empty."); CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle); if (cpuRNGHandle == nullptr) LogicError("rngHandle must be a CPURNGHandle."); auto& us = *this; boost::random::uniform_real_distribution<ElemType> r(0, 1); long m = (long) GetNumRows(), n = (long) GetNumCols(); ElemType v; for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { v = r(cpuRNGHandle->Generator()); us(i, j) = v <= maskRate ? 0 : scaleValue; v = r(cpuRNGHandle->Generator()); us(i + 1, j) = v <= maskRate ? 0 : scaleValue; v = r(cpuRNGHandle->Generator()); us(i + 2, j) = v <= maskRate ? 0 : scaleValue; v = r(cpuRNGHandle->Generator()); us(i + 3, j) = v <= maskRate ? 0 : scaleValue; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { v = r(cpuRNGHandle->Generator()); us(i, j) = v <= maskRate ? 0 : scaleValue; } } } template <class ElemType> ElemType CPUMatrix<ElemType>::Adagrad(CPUMatrix<ElemType>& gradients, const bool needAveMultiplier) { ElemType aveMultiplier = 0; if (IsEmpty() || gradients.GetNumCols() != GetNumCols() || gradients.GetNumRows() != GetNumRows()) { RequireSize(gradients.GetNumRows(), gradients.GetNumCols()); SetValue(0.0); } if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols()) LogicError("The matrix gradients must have the same rows and columns as this matrix."); ElemType *a = Data(), *d_v = gradients.Data(); size_t n = GetNumElements(); const ElemType floor = 1e-16f; ElemType a0, a1, a2, a3; // disable omp here because aveMultiper needs to be added atomically. however, it seems the result is incorrect even if rmp atomic and amp critical are used. // #pragma omp parallel for for (long i = 0; i < (n & ~3); i += 4) // four-way unrolling { a[i] += d_v[i] * d_v[i]; a[i + 1] += d_v[i + 1] * d_v[i + 1]; a[i + 2] += d_v[i + 2] * d_v[i + 2]; a[i + 3] += d_v[i + 3] * d_v[i + 3]; a0 = sqrt(a[i] + floor); a1 = sqrt(a[i + 1] + floor); a2 = sqrt(a[i + 2] + floor); a3 = sqrt(a[i + 3] + floor); d_v[i] /= a0; d_v[i + 1] /= a1; d_v[i + 2] /= a2; d_v[i + 3] /= a3; if (needAveMultiplier) { aveMultiplier += 1 / a0 + 1 / a1 + 1 / a2 + 1 / a3; } } // get the last few elements if any for (long i = n & ~3; i < n; i++) { a[i] += d_v[i] * d_v[i]; a0 = sqrt(a[i] + floor); d_v[i] /= a0; if (needAveMultiplier) { aveMultiplier += 1 / a0; } } if (needAveMultiplier && n > 0) return aveMultiplier / n; else return 1; } template <class ElemType> void CPUMatrix<ElemType>::FSAdagrad(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learnRatePerSample, ElemType momentum, ElemType adaWeight, ElemType adaMul, bool unitGainMomentum) { auto unitGainFactor = ElemType(unitGainMomentum ? (1.0 - momentum) : 1.0); size_t numColsNeeded = 2 * gradients.GetNumCols(); if (IsEmpty() || (GetNumCols() < numColsNeeded)) { RequireSize(gradients.GetNumRows(), numColsNeeded); SetValue(0.0); } if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded) LogicError("The matrix gradients does not have expected dimensions."); size_t n = gradients.GetNumElements(); ElemType* grad = gradients.Data(); ElemType* smoothAda = Data(); ElemType* smoothMom = Data() + n; ElemType* val = functionValues.Data(); #pragma omp parallel for // TODO: Unroll 4-times for better performance leveraging vectorization for (long i = 0; i < n; i++) { ElemType g = grad[i]; ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g; smoothAda[i] = adaSqr; if (adaSqr != 0.0f) { ElemType ada = sqrt(adaSqr); ElemType w = adaMul * ((ElemType) 1.0 / ada); if (w > 10.0f) w = 10.0f; g *= w; } if (momentum > 0.0f) { g = momentum * smoothMom[i] + unitGainFactor * g; smoothMom[i] = g; } g *= learnRatePerSample; val[i] -= g; } } template <class ElemType> void CPUMatrix<ElemType>::Adam(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learnRatePerSample, ElemType momentum, ElemType adaWeight, ElemType adaMul, bool unitGainMomentum) { size_t numColsNeeded = 2 * gradients.GetNumCols(); auto unitGainFactor = ElemType(unitGainMomentum ? (1.0 - momentum) : 1.0); if (IsEmpty() || (GetNumCols() < numColsNeeded)) { RequireSize(gradients.GetNumRows(), numColsNeeded); SetValue(0.0); } if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded) LogicError("The matrix gradients does not have expected dimensions."); size_t n = gradients.GetNumElements(); ElemType* grad = gradients.Data(); ElemType* smoothAda = Data(); ElemType* smoothMom = Data() + n; ElemType* val = functionValues.Data(); #pragma omp parallel for // TODO: Unroll 4-times for better performance leveraging vectorization for (long i = 0; i < n; i++) { ElemType g = grad[i]; ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g; smoothAda[i] = adaSqr; ElemType ada = sqrt(adaSqr); ElemType w = adaMul * (ElemType)( 1.0 / (ada + 1e-8)); g = momentum * smoothMom[i] + unitGainFactor * g; smoothMom[i] = g; val[i] -= g * w * learnRatePerSample; } } template <class ElemType> ElemType CPUMatrix<ElemType>::RmsProp(CPUMatrix<ElemType>& gradients, ElemType RMS_GAMMA, ElemType RMS_WGT_INC, ElemType RMS_WGT_MAX, ElemType RMS_WGT_DEC, ElemType RMS_WGT_MIN, const bool needAveMultiplier) { const ElemType floor = 1e-6f; size_t n = gradients.GetNumElements(); ElemType* curr_grad = gradients.Data(); if (IsEmpty() || GetNumCols() < gradients.GetNumCols() * 3) { RequireSize(gradients.GetNumRows(), gradients.GetNumCols() * 3); SetValue(0.0); ElemType* avars = Data(); // accumulated variances for RMS scaling ElemType* steps = Data() + 2 * n; // current step size // initialize moving average of gradient-squared for (long i = 0; i < n; i++) avars[i] = curr_grad[i] * curr_grad[i]; // initialize starting step size for (long i = 0; i < n; i++) steps[i] = ElemType(0.02); } ElemType* avars = Data(); // accumulated variances for RMS scaling ElemType* signs = Data() + n; // sign of previous gradient ElemType* steps = Data() + 2 * n; // current step size if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols() * 3) LogicError("The matrix gradients does not have expected dimensions."); ElemType ONE_MINUS_GAMMA = ElemType(1.0) - RMS_GAMMA; // int upd[] = { // 2,2,0, // 2,2,0, // 1,1,1, // 2,2,0, // 1,2,1, // 0,2,2, // 1,1,1, // 0,2,2, // 0,2,2, // }; // for (long i=0; i<n; i++) // { // avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]); // // grad sign base 3: 0->neg, 1->zero, 2->pos // const int grad_sign = 1 + (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0)); // // signs[i] contains three consecutive grad_sign // signs[i] = 3*(int(signs[i]) % 9) + grad_sign; // switch(upd[int(signs[i])]) // { // case 0: // steps[i] = max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN); // break; // case 2: // steps[i] = min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX); // break; // } // curr_grad[i] *= steps[i] / sqrt(avars[i] + floor); // } ElemType aveMultiplier = 0, a; for (long i = 0; i < n; i++) { avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]); const int grad_sign = (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0)); if (signs[i] * grad_sign > 0) steps[i] = std::min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX); else steps[i] = std::max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN); a = steps[i] / sqrt(avars[i] + floor); curr_grad[i] *= a; signs[i] = (ElemType) grad_sign; if (needAveMultiplier) aveMultiplier += a; } if (needAveMultiplier) return aveMultiplier / n; else return 1; } template <class ElemType> void CPUMatrix<ElemType>::AdaDelta(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learningRate, ElemType rho, ElemType epsilon) { size_t numColsNeeded = 2 * gradients.GetNumCols(); if (IsEmpty() || (GetNumCols() < numColsNeeded)) { RequireSize(gradients.GetNumRows(), numColsNeeded); SetValue(0.0); } if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded) LogicError("The matrix gradients does not have expected dimensions."); size_t n = gradients.GetNumElements(); ElemType* grad = gradients.Data(); ElemType* smoothAda = Data(); ElemType* smoothX2 = Data() + n; ElemType* val = functionValues.Data(); #pragma omp parallel for // TODO: Unroll 4-times for better performance leveraging vectorization for (long i = 0; i < n; i++) { ElemType g = grad[i]; ElemType adaSqr = rho * smoothAda[i] + (1 - rho) * g * g; smoothAda[i] = adaSqr; ElemType x2 = smoothX2[i]; ElemType deltaX = -sqrt(x2 + epsilon) / sqrt(adaSqr + epsilon) * g; smoothX2[i] = rho * smoothX2[i] + (1 - rho) * deltaX * deltaX; val[i] += learningRate * deltaX; } } template <class ElemType> void CPUMatrix<ElemType>::Reshape(const size_t numRows, const size_t numCols) { if (numRows * numCols != GetNumElements()) InvalidArgument("Reshape: Total number of elements does not match."); m_numRows = numRows; m_numCols = numCols; } // RequireSize() -- Tests if the matrix is the right size. If not, resizes the matrix. This avoids the VerifyResizable check if we're already the right size. template <class ElemType> void CPUMatrix<ElemType>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/) { if (GetNumRows() != numRows || GetNumCols() != numCols) Resize(numRows, numCols, growOnly); } // Resize() -- change matrix size // This function is cheap if the matrix size does not change. // Current content is not preserved. // If growOnly is true, resize will not reallocate memory if the current memory is large enough (i.e., will not shrink). // If this object does not own its memory then new memory cannot be allocated (one can still shrink and/or reshape). template <class ElemType> void CPUMatrix<ElemType>::Resize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/) { if (GetNumRows() == numRows && GetNumCols() == numCols) return; VerifyResizable(__func__); size_t numElements = numRows * numCols; if (numElements > GetSizeAllocated() || // grow allocation (!growOnly && (numElements != GetSizeAllocated()))) // shrink allocation (not if 'growOnly') { // reallocate buffer ElemType* pArray = nullptr; if (numElements > 0) { pArray = NewArray<ElemType>(numElements); } // success: update the object delete[] Buffer(); SetBuffer(pArray, numElements * sizeof(ElemType)); SetSizeAllocated(numElements); } // success m_sliceViewOffset = 0; m_numRows = numRows; m_numCols = numCols; } // allocated by the callee but should be deleted by the caller // TODO: change to use STL vector instead template <class ElemType> ElemType* CPUMatrix<ElemType>::CopyToArray() const { size_t numElements = GetNumElements(); if (numElements != 0) { ElemType* arrayCopyTo = NewArray<ElemType>(numElements); memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements); return arrayCopyTo; } else { return nullptr; } } //memory will be allocated by the callee if not enough but need to be deleted by the caller after it's done //return number of elements copied template <class ElemType> size_t CPUMatrix<ElemType>::CopyToArray(ElemType*& arrayCopyTo, size_t& currentArraySize) const { size_t numElements = GetNumElements(); if (numElements > currentArraySize) { delete arrayCopyTo; arrayCopyTo = NewArray<ElemType>(numElements); currentArraySize = numElements; } if (numElements != 0) { memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements); } return numElements; } template <typename ElemType> void CPUMatrix<ElemType>::CopySection(size_t /*numRows*/, size_t /*numCols*/, ElemType* /*dst*/, size_t /*colStride*/) const { // REVIEW alexeyk: currently not used by CPU, but implement when possible. RuntimeError("Not implemented."); } template <class ElemType> inline size_t CPUMatrix<ElemType>::LocateColumn(const size_t col) const { // For performance reason avoid extra validation in release. assert(col == 0 || col < GetNumCols()); return col * m_numRows; // matrix in column-wise storage } template <class ElemType> inline size_t CPUMatrix<ElemType>::LocateElement(const size_t row, const size_t col) const { // For performance reason avoid extra validation in release. assert(row < m_numRows); return LocateColumn(col) + row; // matrix in column-wise storage } #pragma endregion Basic Operators #pragma region Member BLAS Functions template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(ElemType alpha) { return AssignSumOf(alpha, *this); } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(ElemType alpha) const { CPUMatrix<ElemType> c(GetNumRows(), GetNumCols()); c.AssignSumOf(alpha, *this); return c; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const ElemType alpha, const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignSumOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = alpha + a(i, j); us(i + 1, j) = alpha + a(i + 1, j); us(i + 2, j) = alpha + a(i + 2, j); us(i + 3, j) = alpha + a(i + 3, j); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = alpha + a(i, j); } } return *this; } //if [this] and a have same dimension then [this]=[this]+a //if a is a column vector, add to all columns of [this] //if a is a row vector, add to all rows of [this] //if a is a scalar, add it to all elements. template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(const CPUMatrix<ElemType>& a) { // if (a.GetNumElements() == 1) // *this += a(0,0); // else ScaleAndAdd(1, a, *this); return *this; } //if [this] and a have same dimension then OUTPUT=[this]+a //if a is a column vector, add to all columns of [this] //if a is a row vector, add to all rows of [this] template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(const CPUMatrix<ElemType>& a) const { if (GetNumElements() == 1) { CPUMatrix<ElemType> c(a); c += (*this)(0, 0); return c; } else if (a.GetNumElements() == 1) { CPUMatrix<ElemType> c(*this); c += a(0, 0); return c; } else { CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code c += a; return c; } } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b) { if (a.GetNumElements() == 1) { SetValue(b); (*this) += a; } else { SetValue(a); (*this) += b; } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(ElemType alpha) { return AssignDifferenceOf(*this, alpha); } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(ElemType alpha) const { CPUMatrix<ElemType> c(GetNumRows(), GetNumCols()); c.AssignDifferenceOf(*this, alpha); return c; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const ElemType alpha, const CPUMatrix<ElemType>& a) { auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = alpha - a(i, j); us(i + 1, j) = alpha - a(i + 1, j); us(i + 2, j) = alpha - a(i + 2, j); us(i + 3, j) = alpha - a(i + 3, j); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = alpha - a(i, j); } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const ElemType alpha) { auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = a(i, j) - alpha; us(i + 1, j) = a(i + 1, j) - alpha; us(i + 2, j) = a(i + 2, j) - alpha; us(i + 3, j) = a(i + 3, j) - alpha; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = a(i, j) - alpha; } } return *this; } //if [this] and a have same dimension then [this]=[this]-a //if a is a column vector, minus it from all columns of [this] //if a is a row vector, minus it from all rows of [this] template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(const CPUMatrix<ElemType>& a) { ScaleAndAdd(-1, a, *this); return *this; } //if [this] and a have same dimension then output=[this]-a //if a is a column vector, minus it from all columns of [this] //if a is a row vector, minus it from all rows of [this] template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(const CPUMatrix<ElemType>& a) const { CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code c -= a; return c; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b) { if (this != &a) { RequireSize(a.GetNumRows(), a.GetNumCols()); SetValue(a); } (*this) -= b; return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator*=(ElemType alpha) { Scale(alpha, *this); return *this; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(ElemType alpha) const { CPUMatrix<ElemType> c(GetNumRows(), GetNumCols()); Scale(alpha, *this, c); return c; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const ElemType alpha, const CPUMatrix<ElemType>& a) { Scale(alpha, a, *this); return *this; } // [this]=a*b template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB) { if (a.GetNumElements() == 1) { if (transposeB) AssignTransposeOf(b); (*this) *= a(0, 0); } else if (b.GetNumElements() == 1) { if (transposeA) AssignTransposeOf(a); (*this) *= b(0, 0); } else Multiply(a, transposeA, b, transposeB, *this); return *this; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(const CPUMatrix<ElemType>& a) const { auto& us = *this; if (GetNumElements() == 1) { CPUMatrix<ElemType> c; c.AssignProductOf(us(0, 0), a); return c; } else if (a.GetNumElements() == 1) { CPUMatrix<ElemType> c; c.AssignProductOf(a(0, 0), us); return c; } else { CPUMatrix<ElemType> c; Multiply(*this, a, c); return c; } } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator/=(ElemType alpha) { (*this) *= 1 / alpha; return (*this); } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator/(ElemType alpha) const { return ((*this) * (1 / alpha)); } //element-wise power template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator^=(ElemType alpha) { auto& us = *this; ElementWisePower(alpha, us, us); return us; } //element-wise power template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::operator^(ElemType alpha) const { CPUMatrix<ElemType> c(GetNumRows(), GetNumCols()); ElementWisePower(alpha, *this, c); return c; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementPowerOf(const CPUMatrix<ElemType>& a, const ElemType power) { ElementWisePower(power, a, *this); return *this; } //[this]=[this] .* a (we cannot override operator .* in c++) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementMultiplyWith(const CPUMatrix<ElemType>& a) { return AssignElementProductOf(*this, a); } //[this]=[this] .* a (we cannot override operator .* in c++) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementDivideBy(const CPUMatrix<ElemType>& a) { return AssignElementDivisionOf(*this, a); } //[this]=a .* b template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b) { if (a.IsEmpty() || b.IsEmpty()) LogicError("AssignElementProductOf: Matrix is empty."); if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) InvalidArgument("AssignElementProductOf: The input matrix dimensions do not match."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = a(i, j) * b(i, j); us(i + 1, j) = a(i + 1, j) * b(i + 1, j); us(i + 2, j) = a(i + 2, j) * b(i + 2, j); us(i + 3, j) = a(i + 3, j) * b(i + 3, j); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = a(i, j) * b(i, j); } } return *this; } //[this] +=a .* b template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b) { if (a.IsEmpty() || b.IsEmpty()) LogicError("AddElementProductOf: Matrix is empty."); if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) InvalidArgument("AddElementProductOf : The input matrix dimensions do not match."); if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == GetNumCols())) InvalidArgument("AddElementProductOf : The input matrix dimensions do not match [this]."); auto& us = *this; long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) += a(i, j) * b(i, j); us(i + 1, j) += a(i + 1, j) * b(i + 1, j); us(i + 2, j) += a(i + 2, j) * b(i + 2, j); us(i + 3, j) += a(i + 3, j) * b(i + 3, j); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) += a(i, j) * b(i, j); } } return *this; } //[this]=a ./ b // TODO: This clips the divisor by a small value. Is that really what one would want? template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementDivisionOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b) { if (a.IsEmpty() || b.IsEmpty()) LogicError("AssignElementDivisionOf: Matrix is empty."); if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) InvalidArgument("AssignElementDivisionOf : The input matrix dimensions do not match."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); ElemType smallValue = EPS_IN_INVERSE; #pragma omp parallel for foreach_coord (i, j, us) { ElemType v = b(i, j); if (v >= 0 && v < smallValue) us(i, j) = a(i, j) / smallValue; else if (v < 0 && v > -smallValue) us(i, j) = a(i, j) / (-smallValue); else us(i, j) = a(i, j) / v; } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementMultiplyWith(const CPUMatrix<ElemType>& a) { if (a.IsEmpty() || IsEmpty()) LogicError("ColumnElementMultiplyWith: Matrix is empty."); if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1)) InvalidArgument("ColumnElementMultiplyWith: The input matrix should be a col vector and match [this]'s rows."); auto& us = *this; long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) *= a(i, 0); us(i + 1, j) *= a(i + 1, 0); us(i + 2, j) *= a(i + 2, 0); us(i + 3, j) *= a(i + 3, 0); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) *= a(i, 0); } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementMultiplyWith(const CPUMatrix<ElemType>& a) { if (a.IsEmpty() || IsEmpty()) LogicError("RowElementMultiplyWith: Matrix is empty."); if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols())) InvalidArgument("RowElementMultiplyWith: The input matrix should be a row vector and match [this]'s columns."); auto& us = *this; long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { ElemType v = a(0, j); // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) *= v; us(i + 1, j) *= v; us(i + 2, j) *= v; us(i + 3, j) *= v; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) *= v; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementDivideBy(const CPUMatrix<ElemType>& a) { if (a.IsEmpty() || IsEmpty()) LogicError("RowElementDivideBy: Matrix is empty."); if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols())) InvalidArgument("RowElementDivideBy: The input matrix should be a row vector and match [this]'s columns."); auto& us = *this; long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { ElemType v = a(0, j); if (v >= 0 && v < EPS_IN_INVERSE) v = EPS_IN_INVERSE; else if (v < 0 && v > -EPS_IN_INVERSE) v = (-EPS_IN_INVERSE); // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) /= v; us(i + 1, j) /= v; us(i + 2, j) /= v; us(i + 3, j) /= v; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) /= v; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementDivideBy(const CPUMatrix<ElemType>& a) { if (a.IsEmpty() || IsEmpty()) LogicError("ColumnElementDivideBy: Matrix is empty."); if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1)) InvalidArgument("ColumnElementDivideBy: The input matrix should be a col vector and match [this]'s rows."); auto& us = *this; long m = (long) GetNumRows(), n = (long) GetNumCols(); ElemType smallValue = EPS_IN_INVERSE; #pragma omp parallel for for (long j = 0; j < n; j++) { for (long i = 0; i < m; i++) { ElemType v = a(i, 0); if (v >= 0 && v < smallValue) us(i, j) /= smallValue; else if (v < 0 && v > -smallValue) us(i, j) /= (-smallValue); else us(i, j) /= v; } } return *this; } //[this]=1 ./ a template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementInverse() { return AssignElementInverseOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementInverseOf(const CPUMatrix<ElemType>& a) { ElemType smallValue = EPS_IN_INVERSE; if (a.IsEmpty()) LogicError("AssignElementInverseOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, us) { if (a(i, j) < 0 && a(i, j) > -smallValue) us(i, j) = 1 / (-smallValue); else if (a(i, j) >= 0 && a(i, j) < smallValue) us(i, j) = 1 / smallValue; else us(i, j) = 1 / a(i, j); } return *this; } //[this]=sigmoid([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoid() { return AssignSigmoidOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignSigmoidOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, us) { if (a(i, j) >= 0) us(i, j) = 1 / (1 + exp(-a(i, j))); else { ElemType v = exp(a(i, j)); us(i, j) = v / (1 + v); } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLinearRectifierDerivative() { return AssignLinearRectifierDerivativeOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLinearRectifierDerivativeOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignLinearRectifierDerivativeOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f; us(i + 1, j) = a(i + 1, j) > 0.0f ? 1.0f : 0.0f; us(i + 2, j) = a(i + 2, j) > 0.0f ? 1.0f : 0.0f; us(i + 3, j) = a(i + 3, j) > 0.0f ? 1.0f : 0.0f; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoidDerivative() { return AssignSigmoidDerivativeOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidDerivativeOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignSigmoidDerivativeOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { ElemType v = a(i, j); us(i, j) = v * (1 - v); ElemType v1 = a(i + 1, j); us(i + 1, j) = v1 * (1 - v1); ElemType v2 = a(i + 2, j); us(i + 2, j) = v2 * (1 - v2); ElemType v3 = a(i + 3, j); us(i + 3, j) = v3 * (1 - v3); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { ElemType v = a(i, j); us(i, j) = v * (1 - v); } } return *this; } //[this]=tanh([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTanh() { return AssignTanhOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTanhOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignTanhOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = tanh(a(i, j)); us(i + 1, j) = tanh(a(i + 1, j)); us(i + 2, j) = tanh(a(i + 2, j)); us(i + 3, j) = tanh(a(i + 3, j)); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = tanh(a(i, j)); } } return *this; } //[this]=softmax([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLogSoftmax(const bool isColWise) { return AssignLogSoftmaxOf(*this, isColWise); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogSoftmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise) { if (a.IsEmpty()) LogicError("AssignLogSoftmaxOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); if (isColWise) { #pragma omp parallel for foreach_column (j, a) { // we need to extract max before applying exp to avoid overflow ElemType maxV = a(0, j); foreach_row (i, a) maxV = std::max(maxV, a(i, j)); ElemType sum = 0; foreach_row (i, a) sum += exp(us(i, j) = a(i, j) - maxV); sum = log(sum); foreach_row (i, us) us(i, j) -= sum; } } else { #pragma omp parallel for foreach_row (i, a) { // we need to extract max before applying exp to avoid overflow ElemType maxV = a(i, 0); foreach_column (j, a) maxV = std::max(maxV, a(i, j)); ElemType sum = 0; foreach_column (j, a) sum += exp(us(i, j) = a(i, j) - maxV); sum = log(sum); foreach_column (j, us) us(i, j) -= sum; } } return *this; } //[this]=hardmax([this]) //the max element is 1 else is 0 template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceHardmax(const bool isColWise) { return AssignHardmaxOf(*this, isColWise); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignHardmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise) { if (a.IsEmpty()) LogicError("AssignHardmaxOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); if (isColWise) { #pragma omp parallel for foreach_column (j, a) { // we need to extract max ElemType maxV = a(0, j); long maxI = 0; foreach_row (i, a) { if (maxV < a(i, j)) { maxV = a(i, j); maxI = i; } } foreach_row (i, us) us(i, j) = (i == maxI) ? 1.0f : 0.0f; } } else { #pragma omp parallel for foreach_row (i, a) { // we need to extract max ElemType maxV = a(i, 0); long maxJ = 0; foreach_column (j, a) { if (maxV < a(i, j)) { maxV = a(i, j); maxJ = j; } } foreach_column (j, us) us(i, j) = (j == maxJ) ? 1.0f : 0.0f; } } return *this; } //[this]=sqrt([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSqrt() { return AssignSqrtOf(*this); } //to prevent negative values caused by floating operations, we force inputs to be >=0 //this may, however, hide problems in the caller. template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSqrtOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignSqrtOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = sqrt(max((ElemType)0, a(i, j))); us(i + 1, j) = sqrt(max((ElemType)0, a(i + 1, j))); us(i + 2, j) = sqrt(max((ElemType)0, a(i + 2, j))); us(i + 3, j) = sqrt(max((ElemType)0, a(i + 3, j))); } // remaining for (long i = m & ~3; i < m; i++) { us(i, j) = sqrt(max((ElemType)0, a(i, j))); } } return *this; } //[this]=exp([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceExp() { return AssignExpOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignExpOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignExpOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = exp(a(i, j)); us(i + 1, j) = exp(a(i + 1, j)); us(i + 2, j) = exp(a(i + 2, j)); us(i + 3, j) = exp(a(i + 3, j)); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = exp(a(i, j)); } } return *this; } //[this]=exp([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAbs() { return AssignAbsOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAbsOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignAbsOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { us(i, j) = abs(a(i, j)); us(i + 1, j) = abs(a(i + 1, j)); us(i + 2, j) = abs(a(i + 2, j)); us(i + 3, j) = abs(a(i + 3, j)); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { us(i, j) = abs(a(i, j)); } } return *this; } //[this]=log([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog() { return AssignLogOf(*this); } //[this]=log([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog10() { return AssignLog10Of(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignLogOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { const ElemType v = a(i, j); if (v < EPS_IN_LOG) { us(i, j) = LOG_OF_EPS_IN_LOG; } else us(i, j) = log(v); } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLog10Of(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignLogOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { const ElemType v = a(i, j); if (v <= 0) LogicError("AssignLogOf: Log can only applied to numbers larger than 0."); else if (v < EPS_IN_LOG) { us(i, j) = LOG10_OF_EPS_IN_LOG; } else us(i, j) = log10(v); } return *this; } //[this]=cos([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceCosine() { return AssignCosineOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCosineOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignCosineOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { const ElemType v = a(i, j); us(i, j) = cos(v); } return *this; } //[this]=-sin([this]) element wise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceNegativeSine() { return AssignNegativeSineOf(*this); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNegativeSineOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignCosineOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { const ElemType v = a(i, j); us(i, j) = -sin(v); } return *this; } //Threshold truncating: this[i] = max( this[i], threshold ) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateBottom(const ElemType threshold) { if (IsEmpty()) LogicError("InplaceTruncateBottom: Matrix is empty."); auto& us = *this; long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { if (us(i, j) < threshold) us(i, j) = threshold; if (us(i + 1, j) < threshold) us(i + 1, j) = threshold; if (us(i + 2, j) < threshold) us(i + 2, j) = threshold; if (us(i + 3, j) < threshold) us(i + 3, j) = threshold; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { if (us(i, j) < threshold) us(i, j) = threshold; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncate(const ElemType threshold) { if (IsEmpty()) LogicError("InplaceTruncate: Matrix is empty."); auto& us = *this; ElemType locThresholdPos = abs(threshold); ElemType locTHresholdNeg = -locThresholdPos; long m = (long) GetNumRows(), n = (long) GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { if (us(i, j) > locThresholdPos) us(i, j) = locThresholdPos; else if (us(i, j) < locTHresholdNeg) us(i, j) = locTHresholdNeg; if (us(i + 1, j) > locThresholdPos) us(i + 1, j) = locThresholdPos; else if (us(i + 1, j) < locTHresholdNeg) us(i + 1, j) = locTHresholdNeg; if (us(i + 2, j) > locThresholdPos) us(i + 2, j) = locThresholdPos; else if (us(i + 2, j) < locTHresholdNeg) us(i + 2, j) = locTHresholdNeg; if (us(i + 3, j) > locThresholdPos) us(i + 3, j) = locThresholdPos; else if (us(i + 3, j) < locTHresholdNeg) us(i + 3, j) = locTHresholdNeg; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { if (us(i, j) > locThresholdPos) us(i, j) = locThresholdPos; else if (us(i, j) < locTHresholdNeg) us(i, j) = locTHresholdNeg; } } return *this; } //x= x-threshold if x>threshold, x+threshold if x<-threshold, 0 otherwise template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSoftThreshold(const ElemType threshold) { if (IsEmpty()) LogicError("InplaceTruncate: Matrix is empty."); long m = (long) GetNumElements(); ElemType* bufPtr = Data(); #pragma omp parallel for for (long i = 0; i < (m & ~3); i += 4) // four-way unrolling { if (bufPtr[i] > threshold) bufPtr[i] -= threshold; else if (bufPtr[i] < -threshold) bufPtr[i] += threshold; else bufPtr[i] = 0; if (bufPtr[i + 1] > threshold) bufPtr[i + 1] -= threshold; else if (bufPtr[i + 1] < -threshold) bufPtr[i + 1] += threshold; else bufPtr[i + 1] = 0; if (bufPtr[i + 2] > threshold) bufPtr[i + 2] -= threshold; else if (bufPtr[i + 2] < -threshold) bufPtr[i + 2] += threshold; else bufPtr[i + 2] = 0; if (bufPtr[i + 3] > threshold) bufPtr[i + 3] -= threshold; else if (bufPtr[i + 3] < -threshold) bufPtr[i + 3] += threshold; else bufPtr[i + 3] = 0; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { if (bufPtr[i] > threshold) bufPtr[i] -= threshold; else if (bufPtr[i] < -threshold) bufPtr[i] += threshold; else bufPtr[i] = 0; } return *this; } //Threshold truncating: this[i] = max( a[i], threshold ) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateBottomOf(const CPUMatrix<ElemType>& a, const ElemType threshold) { if (a.IsEmpty()) LogicError("AssignTruncateBottomOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { if (a(i, j) < threshold) us(i, j) = threshold; else us(i, j) = a(i, j); } return *this; } //Threshold truncating: this[i] = min( this[i], threshold ) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateTop(const ElemType threshold) { if (IsEmpty()) LogicError("InplaceTruncateTop: Matrix is empty."); auto& us = *this; #pragma omp parallel for foreach_coord (i, j, us) { if (us(i, j) > threshold) us(i, j) = threshold; } return *this; } //Threshold truncating: this[i] = min( a[i], threshold ) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateTopOf(const CPUMatrix<ElemType>& a, const ElemType threshold) { if (a.IsEmpty()) LogicError("AssignTruncateTopOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_coord (i, j, a) { if (a(i, j) > threshold) us(i, j) = threshold; else us(i, j) = a(i, j); } return *this; } //Threshold truncating: this[i] = 0 if abs(this[i]<threshold). template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetToZeroIfAbsLessThan(const ElemType threshold) { if (IsEmpty()) LogicError("SetToZeroIfAbsLessThan: Matrix is empty."); auto& us = *this; #pragma omp parallel for foreach_coord (i, j, us) { if (abs(us(i, j)) < threshold) us(i, j) = 0; } return *this; } //sum of all abs(elements) template <class ElemType> ElemType CPUMatrix<ElemType>::SumOfAbsElements() const { if (IsEmpty()) LogicError("SumOfAbsElements: Matrix is empty."); if (sizeof(ElemType) == sizeof(double)) { return (ElemType) cblas_dasum((int) GetNumElements(), reinterpret_cast<double*>(Data()), 1); } else { #pragma warning(suppress : 4244) return cblas_sasum((int) GetNumElements(), reinterpret_cast<float*>(Data()), 1); } } //sum of all elements template <class ElemType> ElemType CPUMatrix<ElemType>::SumOfElements() const { if (IsEmpty()) LogicError("SumOfElements: Matrix is empty."); ElemType sum = 0; long m = (long) GetNumElements(); // note: OpenMP requires loop indices to be long, not size_t ElemType* bufPtr = Data(); //four-way unrolling #pragma omp parallel for reduction(+ : sum) for (long i = 0; i < (m & ~3); i += 4) { sum += bufPtr[i] + bufPtr[i + 1] + bufPtr[i + 2] + bufPtr[i + 3]; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { sum += bufPtr[i]; } return sum; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOfElements(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignSumOfElements: Matrix a is empty."); auto& us = *this; us.RequireSize(1, 1); us(0, 0) = a.SumOfElements(); return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignOneHot(const CPUMatrix<ElemType>& a, vector<size_t>& shape, size_t axis) { if (a.IsEmpty()) LogicError("AssignOneHot: Matrix a is empty."); if (axis >= shape.size()) LogicError("AssignOneHot: axis is not correct"); size_t item_size = 1; for (size_t i = 0; i < shape.size() && i < axis; i++) item_size *= shape[i]; size_t num_class = shape[axis]; auto& us = *this; auto nCols = a.GetNumCols(); auto nRows = num_class * a.GetNumRows(); us.RequireSize(nRows, nCols); ElemType* bufPtr = Data(); ElemType* aBufPtr = a.Data(); memset(bufPtr, 0, sizeof(ElemType) * nRows *nCols); #pragma omp parallel for for (long i = 0; i < a.GetNumElements(); i++) { if (aBufPtr[i] >= 0 && aBufPtr[i] < num_class) { size_t block_id = i / item_size; size_t item_id = i % item_size; bufPtr[block_id * num_class * item_size + item_id + item_size * (size_t)aBufPtr[i]] = 1; } } return *this; } template <class ElemType> bool CPUMatrix<ElemType>::IsEqualTo(const CPUMatrix<ElemType>& a, const ElemType threshold /*= 1e-8*/) const { return AreEqual(*this, a, threshold); } template <class ElemType> void CPUMatrix<ElemType>::VectorSum(const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c, const bool isColWise) { if (a.IsEmpty()) LogicError("VectorSum: Input matrix a is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow if (isColWise) // col-wise { c.RequireSize(1, n); #pragma omp parallel for foreach_column (j, a) { ElemType v = 0; foreach_row (i, a) { #pragma omp atomic v += a(i, j); } c(0, j) = v; } } else { c.RequireSize(m, 1); #pragma omp parallel for foreach_row (i, a) { ElemType v = 0; foreach_column (j, a) { #pragma omp atomic v += a(i, j); } c(i, 0) = v; } } } template <class ElemType> void CPUMatrix<ElemType>::VectorNorm1(CPUMatrix<ElemType>& c, const bool isColWise) const { if (IsEmpty()) LogicError("VectorNorm1: Matrix is empty."); auto& us = *this; const int m = (int) us.GetNumRows(); const int n = (int) us.GetNumCols(); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow if (isColWise) // col-wise { c.RequireSize(1, n); #pragma omp parallel for foreach_column (j, us) { ElemType v = 0; foreach_row (i, us) { #pragma omp atomic v += abs(us(i, j)); } c(0, j) = v; } } else { c.RequireSize(m, 1); #pragma omp parallel for foreach_row (i, us) { ElemType v = 0; foreach_column (j, us) { #pragma omp atomic v += abs(us(i, j)); } c(i, 0) = v; } } } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm1Of(CPUMatrix<ElemType>& a, const bool isColWise) { a.VectorNorm1(*this, isColWise); return *this; } template <class ElemType> void CPUMatrix<ElemType>::VectorNorm2(CPUMatrix<ElemType>& c, const bool isColWise) const { if (IsEmpty()) LogicError("VectorNorm2: Matrix is empty."); auto& us = *this; const int m = (int) us.GetNumRows(); const int n = (int) us.GetNumCols(); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow ElemType* bufPtr = us.Data(); if (isColWise) // col-wise { c.RequireSize(1, n); if (sizeof(ElemType) == sizeof(double)) { #pragma omp parallel for foreach_column (j, c) { c(0, j) = (ElemType) cblas_dnrm2(m, reinterpret_cast<double*>(bufPtr + us.LocateColumn(j)), 1); } } else { #pragma omp parallel for foreach_column (j, c) { #pragma warning(suppress : 4244) c(0, j) = cblas_snrm2(m, reinterpret_cast<float*>(bufPtr + us.LocateColumn(j)), 1); } } } else { c.RequireSize(m, 1); if (sizeof(ElemType) == sizeof(double)) { #pragma omp parallel for foreach_row (i, c) { c(i, 0) = cblas_dnrm2(n, reinterpret_cast<double*>(bufPtr + i), m); } } else { #pragma omp parallel for foreach_row (i, c) { #pragma warning(suppress : 4244) c(i, 0) = cblas_snrm2(n, reinterpret_cast<float*>(bufPtr + i), m); } } } } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm2Of(CPUMatrix<ElemType>& a, const bool isColWise) { a.VectorNorm2(*this, isColWise); return *this; } template <class ElemType> void CPUMatrix<ElemType>::VectorNormInf(CPUMatrix<ElemType>& c, const bool isColWise) const { if (IsEmpty()) LogicError("VectorNormInf: Matrix is empty."); auto& us = *this; const int m = (int) us.GetNumRows(); const int n = (int) us.GetNumCols(); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow if (isColWise) // col-wise { c.RequireSize(1, n); // #pragma omp parallel for foreach_column (j, us) { ElemType v = 0; foreach_row (i, us) { v = std::max(v, abs(us(i, j))); } c(0, j) = v; } } else { c.RequireSize(m, 1); // #pragma omp parallel for foreach_row (i, us) { ElemType v = 0; foreach_column (j, us) { v = std::max(v, abs(us(i, j))); } c(i, 0) = v; } } } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNormInfOf(CPUMatrix<ElemType>& a, const bool isColWise) { a.VectorNormInf(*this, isColWise); return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignInnerProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool isColWise) { InnerProduct(a, b, *this, isColWise); return *this; } //column-wise crossproduct template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignKhatriRaoProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b) { if (a.IsEmpty() || b.IsEmpty()) LogicError("AssignKhatriRaoProductOf: Matrix is empty."); long cols = (long) a.GetNumCols(); if (cols != b.GetNumCols()) InvalidArgument("a.GetNumCols() != b.GetNumCols()"); long rowsA = (long) a.GetNumRows(); long rowsB = (long) b.GetNumRows(); RequireSize(rowsA * rowsB, cols); #ifdef __INTEL_COMPILER // TODO: check this #pragma simd statement #endif #pragma omp parallel for for (long k = 0; k < cols; k++) { long jj = 0; for (long j = 0; j < rowsB; j++) { for (long i = 0; i < rowsA; i++) { (*this)(jj++, k) = a(i, k) * b(j, k); } } } return *this; } //column-wise reshaped product. Used to compute KhatriRaoProduct Gradient // this = reshape each column of a from (K1xK2,1) to (K1, K2) // if each column of a is not transposed, each (K1, K2) times each column of b (K2, frames). // the output is a (K1, frames) matrix // if each column of a is tranposed, each (K1, K2)^T times each column of b(K1, frames) and output is (K2, frames) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddColumnReshapeProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool transposeAColumn) { if (a.IsEmpty() || b.IsEmpty()) LogicError("AddColumnReshapeProductOf: Matrix is empty."); long cols = (long) a.GetNumCols(); if (cols != b.GetNumCols()) InvalidArgument("AddColumnReshapeProductOf: a.GetNumCols() != b.GetNumCols()"); long rowsA = (long) a.GetNumRows(); long rowsB = (long) b.GetNumRows(); if (rowsA % rowsB != 0) InvalidArgument("AddColumnReshapeProductOf: number of rows in a should be multiples of that in b."); long rowsC = rowsA / rowsB; if (rowsC != GetNumRows() || cols != GetNumCols()) InvalidArgument("AddColumnReshapeProductOf: This matrix does not have the right size."); auto& us = *this; if (transposeAColumn) { // find nrows and ncols of tbe reshaped a long nrows = rowsB; long ncols = rowsC; #ifdef __INTEL_COMPILER // TODO: check this #pragma simd statement #endif #pragma omp parallel for foreach_column (t, a) { size_t k = 0; for (size_t j = 0; j < ncols; j++) // row and col is transposed { ElemType v = 0; for (size_t i = 0; i < nrows; i++) { v += a(k, t) * b(i, t); k++; } us(j, t) += v; } } } else { size_t ncols = rowsB; size_t nrows = rowsC; #ifdef __INTEL_COMPILER // TODO: check this #pragma simd statement #endif #pragma omp parallel for foreach_column (t, a) { size_t k = 0; for (size_t j = 0; j < ncols; j++) { for (size_t i = 0; i < nrows; i++) { us(i, t) += a(k, t) * b(j, t); k++; } } } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithScaleOf(ElemType alpha, const CPUMatrix<ElemType>& a) { ScaleAndAdd(alpha, a, *this); return *this; } template <class ElemType> ElemType CPUMatrix<ElemType>::FrobeniusNorm() const { if (IsEmpty()) LogicError("FrobeniusNorm: Matrix is empty."); ElemType v = 0; long m = (long) GetNumElements(); ElemType* bufPtr = Data(); //four-way unrolling #pragma omp parallel for reduction(+ : v) for (long i = 0; i < (m & ~3); i += 4) { v += bufPtr[i] * bufPtr[i] + bufPtr[i + 1] * bufPtr[i + 1] + bufPtr[i + 2] * bufPtr[i + 2] + bufPtr[i + 3] * bufPtr[i + 3]; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { v += bufPtr[i] * bufPtr[i]; } return sqrt(v); } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignFrobeniusNormOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignFrobeniusNormOf: Matrix a is empty."); auto& us = *this; us.RequireSize(1, 1); us(0, 0) = a.FrobeniusNorm(); return us; } template <class ElemType> ElemType CPUMatrix<ElemType>::MatrixNormInf() const { if (IsEmpty()) LogicError("MatrixNormInf: Matrix is empty."); auto& us = *this; ElemType v = 0; #pragma omp parallel for foreach_coord (i, j, us) { #pragma omp critical { v = std::max(v, abs(us(i, j))); } } return v; } template <class ElemType> ElemType CPUMatrix<ElemType>::MatrixNorm0() const { if (IsEmpty()) LogicError("MatrixNorm0: Matrix is empty."); auto& us = *this; ElemType v = 0; #pragma omp parallel for foreach_coord (i, j, us) { if (us(i, j) != 0) { #pragma omp critical { ++v; } } } return v; } template <class ElemType> ElemType CPUMatrix<ElemType>::MatrixNorm1() const { if (IsEmpty()) LogicError("MatrixNorm1: Matrix is empty."); auto& us = *this; ElemType sum = 0; #pragma omp parallel for reduction(+ : sum) foreach_coord (i, j, us) { sum += abs(us(i, j)); } return sum; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSignOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AssignSignOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_column (j, us) { foreach_row (i, us) { ElemType v = a(i, j); if (!std::isnan(v)) us(i, j) = (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1))); else us(i, j) = v; } } return us; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddSignOf(const CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("AddSignOf: Matrix a is empty."); auto& us = *this; if (this != &a) RequireSize(a.GetNumRows(), a.GetNumCols()); #pragma omp parallel for foreach_column (j, us) { foreach_row (i, us) { ElemType v = a(i, j); if (!std::isnan(v)) us(i, j) += (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1))); else us(i, j) = v; } } return us; } //I decided to use CPUMatrix<ElemType>& maxIndexes instead of integer vector because the result may be used to do additional calculation template <class ElemType> void CPUMatrix<ElemType>::VectorMax(CPUMatrix<ElemType>& maxIndexes, CPUMatrix<ElemType>& maxValues, const bool isColWise, int topK) const { if (IsEmpty()) LogicError("VectorMax: Matrix is empty."); auto& us = *this; const int m = (int) GetNumRows(); const int n = (int) GetNumCols(); if (topK > m) InvalidArgument("VectorMax: TopK must be less or equal than the number of rows"); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow if (isColWise) // col-wise { maxValues.RequireSize(topK, n); maxIndexes.RequireSize(topK, n); if (topK == 1) { #pragma omp parallel for for (int j = 0; j < n; j++) { ElemType v = us(0, j); size_t index = 0; foreach_row (i, us) { if (v < us(i, j)) { index = i; v = us(i, j); } } maxValues(0, j) = v; maxIndexes(0, j) = (ElemType) index; } } else { std::vector<int> indices(m); int i = 0; std::generate(indices.begin(), indices.end(), [&i] { return i++; }); const ElemType* curVal = Data(); ElemType* curIdx = maxIndexes.Data(); ElemType* curMax = maxValues.Data(); for (int icol = 0; icol < n; icol++, curVal += m, curIdx += topK, curMax += topK) { // Partial sort, descending order. std::nth_element(indices.begin(), indices.begin() + topK, indices.end(), [curVal](const int& a, const int& b) { return curVal[a] > curVal[b]; }); // REVIEW alexeyk: the following produces warning (see SCL_SECURE_NO_WARNINGS) so use loop instead. // std::transform(indices.begin(), indices.begin() + topK, curIdx, [](const int& a) { return static_cast<ElemType>(a); }); for (int i2 = 0; i2 < topK; i2++) { curIdx[i2] = static_cast<ElemType>(indices[i2]); curMax[i2] = curVal[indices[i2]]; } } } } else { if (topK > 1) RuntimeError("Row-wise TopK max is not supported."); maxValues.RequireSize(m, 1); maxIndexes.RequireSize(m, 1); #pragma omp parallel for for (int i = 0; i < m; i++) { ElemType v = us(i, 0); size_t index = 0; foreach_column (j, us) { if (v < us(i, j)) { index = j; v = us(i, j); } } maxValues(i, 0) = v; maxIndexes(i, 0) = (ElemType) index; } } } template <class ElemType> void CPUMatrix<ElemType>::VectorMin(CPUMatrix<ElemType>& minIndexes, CPUMatrix<ElemType>& minValues, const bool isColWise) const { if (IsEmpty()) LogicError("VectorMin: Matrix is empty."); auto& us = *this; const int m = (int) GetNumRows(); const int n = (int) GetNumCols(); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow if (isColWise) // col-wise { minValues.RequireSize(1, n); minIndexes.RequireSize(1, n); #pragma omp parallel for for (int j = 0; j < n; j++) { ElemType v = us(0, j); size_t index = 0; foreach_row (i, us) { if (v > us(i, j)) { index = i; v = us(i, j); } } minValues(0, j) = v; minIndexes(0, j) = (ElemType) index; } } else { minValues.RequireSize(m, 1); minIndexes.RequireSize(m, 1); #pragma omp parallel for for (int i = 0; i < m; i++) { ElemType v = us(i, 0); size_t index = 0; foreach_column (j, us) { if (v > us(i, j)) { index = j; v = us(i, j); } } minValues(i, 0) = v; minIndexes(i, 0) = (ElemType) index; } } } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNumOfDiff(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, bool searchInCol) { if (a.GetNumCols() != b.GetNumCols()) throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of columns."); if (!searchInCol && a.GetNumRows() != b.GetNumRows()) throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of rows."); ElemType n = 0; if (!searchInCol) { foreach_coord (i, j, a) { n += (a(i, j) != b(i, j)); } } else { size_t crow = b.GetNumRows(); const ElemType* curCol = b.Data(); for (size_t icol = 0; icol < a.GetNumCols(); icol++, curCol += crow) { auto res = std::find(curCol, curCol + crow, a(0, icol)); if (res == curCol + crow) n++; } } RequireSize(1, 1); // result should be one element (*this)(0, 0) = n; return *this; } #pragma endregion Member BLAS Functions #pragma region Other helper Functions struct PrintRange { // print from begin to skipBegin, then from skipEnd to end // skipBegin = end if no split size_t begin; size_t skipBegin; size_t skipEnd; size_t end; bool IsEmpty() const { return end <= begin; } // examples: // * 3..10 // * -3..-3: include end-3..end and 0..3 PrintRange(ptrdiff_t first, ptrdiff_t last, size_t total) { if (first >= 0 && last >= 0) { begin = (size_t)first; end = (size_t)last + 1; if (end > total) // allow INT_MAX, meaning to end end = total; skipBegin = end; skipEnd = end; } else if (first < 0 && last < 0) { begin = 0; skipBegin = (size_t)(-last); skipEnd = (size_t)(total + first); if (skipEnd <= skipBegin) skipBegin = skipEnd = total; end = total; } else // if other combinations are ever of interest then implement them here LogicError("Print: Bounds must be either both positive or both negative."); } }; // use negative ranges to print corners, e.g. Print("name", -3, -3, -3, -3) will print the first 3 and last 3 rows/cols template <class ElemType> void CPUMatrix<ElemType>::Print(const char* matrixName, ptrdiff_t rowFirst, ptrdiff_t rowLast, ptrdiff_t colFirst, ptrdiff_t colLast) const { fprintf(stderr, "\n###### "); if (matrixName != nullptr) fprintf(stderr, "%s ", matrixName); fprintf(stderr, "(%lu, %lu)", (unsigned long)GetNumRows(), (unsigned long)GetNumCols()); if (rowFirst != 0 || colFirst != 0 || (size_t)(rowLast + 1) != GetNumRows() || (size_t)(colLast + 1) != GetNumCols()) fprintf(stderr, " [%ld:%ld, %ld:%ld]", (long)rowFirst, (long)rowLast, (long)colFirst, (long)colLast); fprintf(stderr, " ######\n\n"); if (IsEmpty()) { fprintf(stderr, "(empty)\n"); return; } PrintRange rowRange(rowFirst, rowLast, GetNumRows()); PrintRange colRange(colFirst, colLast, GetNumCols()); if (rowRange.IsEmpty() || colRange.IsEmpty()) { fprintf(stderr, "(empty)\n"); return; } const auto& us = *this; if (rowRange.begin > 0) fprintf(stderr, "...\n"); for (size_t i = rowRange.begin; i < rowRange.end; i++) { if (i == rowRange.skipBegin) // insert ... between the two blocks if any { fprintf(stderr, "...\n"); i = rowRange.skipEnd; } if (colRange.begin > 0) // ... at line start fprintf(stderr, "...\t"); for (size_t j = colRange.begin; j < colRange.end; j++) { if (j == colRange.skipBegin) { fprintf(stderr, "...\t"); j = colRange.skipEnd; } fprintf(stderr, "%.10f\t", us(i, j)); } if (colRange.end < GetNumCols()) // ... at line end fprintf(stderr, "..."); fprintf(stderr, "\n"); } if (rowRange.end < GetNumRows()) fprintf(stderr, "...\n"); } template <class ElemType> void CPUMatrix<ElemType>::Print(const char* matrixName /*=nullptr*/) const { Print(matrixName, 0, GetNumRows() - 1, 0, GetNumCols() - 1); } // file I/O //matrixName is used to verify that correct matrix is read. template <class ElemType> void CPUMatrix<ElemType>::ReadFromFile(FILE*, const char* /*matrixName*/) { RuntimeError("not implemented."); } //matrixName is used to verify that correct matrix is read. template <class ElemType> void CPUMatrix<ElemType>::WriteToFile(FILE*, const char* /*matrixName*/) { RuntimeError("not implemented."); } //assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPackedConvolutionInput(const CPUMatrix<ElemType>& inputSubBatch, const size_t inputWidth, const size_t inputHeight, const size_t inputChannels, const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/, const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample, const bool zeroPadding) { if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth) LogicError("Arguments verticalSubsample (or horitzontalSubsample) must be less or equal than kernelHeight (or kernelWidth)."); const size_t packedInputRows = kernelWidth * kernelHeight * inputChannels; const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel const size_t inputDim = inputWidth * inputHeight * inputChannels; const size_t smallBatchSize = inputSubBatch.GetNumCols(); const long inputHeightTimesChannel = (long) (inputHeight * inputChannels); RequireSize(packedInputRows, packedInputColsPerSample * smallBatchSize); if (zeroPadding) SetValue((ElemType) 0); const long halfKernelWidth = (long) kernelWidth / 2; const long halfKernelHeight = (long) kernelHeight / 2; #pragma omp parallel for // each input element is copied to many places for (long sample = 0; sample < smallBatchSize; sample++) { for (long id = 0; id < inputDim; id++) { // IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels) // IN_ELEM_COLPOS = sample const long y = id / inputHeightTimesChannel; // inputCol const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels const long x = nXC / (long) inputChannels; // inputRow const long c = nXC % (long) inputChannels; // channel long x0 = 0, y0 = 0, x1 = 0, y1 = 0; if (zeroPadding) { x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1.0f + halfKernelHeight) / (ElemType)verticalSubsample)); // row : first wrow in which x is in x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1.0f + halfKernelWidth) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel } else { x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1) / (ElemType)verticalSubsample)); // row : first wrow in which x is in x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel } assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth); // PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight) // PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow ElemType currentInputValue = inputSubBatch(id, sample); long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight); for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample) { long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight); for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample) { const long packRow = packRowBase + posxInKernel; const long packCol = packColBase + wrow; (*this)(packRow, packCol) = currentInputValue; } packColBase += (long) outputHeight; } } } return *this; } //assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::UnpackConvolutionInput(CPUMatrix<ElemType>& inputSubBatch, const size_t inputWidth, const size_t inputHeight, const size_t inputChannels, const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/, const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample, const bool zeroPadding) const { if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth) LogicError("Arguments verticalSubsample (or horizonSubsample) must be less than or equal to kernelHeight (or kernelWidth)."); const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel const size_t inputDim = inputWidth * inputHeight * inputChannels; const size_t smallBatchSize = inputSubBatch.GetNumCols(); const long inputHeightTimesChannel = (long) (inputHeight * inputChannels); const long halfKernelWidth = (long) kernelWidth / 2; const long halfKernelHeight = (long) kernelHeight / 2; #pragma omp parallel for // each input element is copied to many places for (long sample = 0; sample < smallBatchSize; sample++) { for (long id = 0; id < inputDim; id++) { // IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels) // IN_ELEM_COLPOS = sample const long y = id / inputHeightTimesChannel; // inputCol const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels const long x = nXC / (long) inputChannels; // inputRow const long c = nXC % (long) inputChannels; // channel long x0 = 0, y0 = 0, x1 = 0, y1 = 0; if (zeroPadding) { x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1.0f + halfKernelHeight) / (ElemType) verticalSubsample)); // row : first wrow in which x is in x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1.0f + halfKernelWidth) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel } else { x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1) / (ElemType) verticalSubsample)); // row : first wrow in which x is in x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel } assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth); // PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight) // PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow ElemType currentInputValue = inputSubBatch(id, sample); long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight); for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample) { long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight); for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample) { const long packRow = packRowBase + posxInKernel; const long packCol = packColBase + wrow; currentInputValue += (*this)(packRow, packCol); } packColBase += (long) outputHeight; } inputSubBatch(id, sample) = currentInputValue; } } return inputSubBatch; } //assume each column is an input sample. Each sample is stored in (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignMaxPoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels, const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/, const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample, const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample) { const long inputHeightTimesChannel = (long) (inputHeight * channels); const long outputHeightTimesChannel = (long) (outputHeight * channels); const size_t batchSize = inputBatch.GetNumCols(); RequireSize(outputSizePerSample, batchSize); // IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels) // IN_ELEM_COLPOS = sample // OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels) // OUT_ELEM_COLPOS = sample #pragma omp parallel for for (long sample = 0; sample < (long) batchSize; sample++) { for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++) { const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels const long x = (long) (nXC / channels); // wrow const long c = (long) (nXC % channels); // channel ElemType maxVal = -FLT_MAX; ElemType minVal = FLT_MAX; const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c); for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++) { long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel; for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++) { const ElemType val = inputBatch(rowInInput, sample); // pf[rowInWindow*channels]; maxVal = std::max(maxVal, val); minVal = std::min(minVal, val); rowInInput += (long) channels; } } (*this)(outputIndexWithinSample, sample) = maxVal; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddMaxPoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch, const CPUMatrix<ElemType>& inputBatch, const CPUMatrix<ElemType>& outputBatch, const size_t channels, const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample, const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/, const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample) { size_t batchSize = inputBatch.GetNumCols(); const long inputHeightTimesChannel = (long) (inputHeight * channels); const long outputHeightTimesChannel = (long) (outputHeight * channels); // IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels) // IN_ELEM_COLPOS = sample // OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels) // OUT_ELEM_COLPOS = sample #pragma omp parallel for for (long sample = 0; sample < batchSize; sample++) { for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++) { const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels const long x = (long) (nXC / channels); // row in input const long c = (long) (nXC % channels); // channel long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / verticalSubsample : outputHeight - 1); // inclusive end long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end ElemType inputValue = inputBatch(inputIndexWithinSample, sample); for (long outY = startOutY; outY <= endOutY; outY++) { for (long outX = startOutX; outX <= endOutX; outX++) { long outputIndex = (long) (outY * outputHeightTimesChannel + outX * channels + c); if (inputValue == outputBatch(outputIndex, sample)) (*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample); } } } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAveragePoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels, const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/, const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample, const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample) { const long inputHeightTimesChannel = (long) (inputHeight * channels); const long outputHeightTimesChannel = (long) (outputHeight * channels); const size_t batchSize = inputBatch.GetNumCols(); const size_t windowSize = windowWidth * windowHeight; RequireSize(outputSizePerSample, batchSize); // IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels) // IN_ELEM_COLPOS = sample // OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels) // OUT_ELEM_COLPOS = sample #pragma omp parallel for for (long sample = 0; sample < batchSize; sample++) { for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++) { const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels const long x = (long) (nXC / channels); // wrow const long c = (long) (nXC % channels); // channel ElemType sum = 0; const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c); for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++) { long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel; for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++) { sum += inputBatch(rowInInput, sample); rowInInput += (long) channels; } } (*this)(outputIndexWithinSample, sample) = sum / windowSize; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddAveragePoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch, const size_t channels, const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample, const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/, const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample) { size_t batchSize = outputGradientBatch.GetNumCols(); const long inputHeightTimesChannel = (long) (inputHeight * channels); const long outputHeightTimesChannel = (long) (outputHeight * channels); const long windowSize = (long) (windowWidth * windowHeight); // IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels) // IN_ELEM_COLPOS = sample // OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels) // OUT_ELEM_COLPOS = sample #pragma omp parallel for for (long sample = 0; sample < batchSize; sample++) { for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++) { const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels const long x = nXC / (long) channels; // row in input const long c = nXC % (long) channels; // channel long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / (long) verticalSubsample : outputHeight - 1); // inclusive end long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end for (long outY = startOutY; outY <= endOutY; outY++) { for (long outX = startOutX; outX <= endOutX; outX++) { long outputIndex = outY * outputHeightTimesChannel + outX * (long) channels + c; (*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample) / windowSize; } } } } return *this; } #pragma endregion Other Helper Functions template <class ElemType> void CPUMatrix<ElemType>::ConvolutionForward(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht, const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const { #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++) { for (size_t row = 0; row < output.GetNumRows(); row++) { int colBase = mpRowCol(row, 0); int ivBase = mpRowIwht(row, 0); assert(0 <= colBase && colBase < GetNumRows()); ElemType sum = 0; int i0 = mpRowRun(row, 0); int skip = runs(i0++, 0); int size = runs(i0++, 0); int imask = i0 + size; for (int i = 0; i < size; i++) { if (runs(imask + i, 0) == 0) continue; int dcol = runs(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < GetNumRows()); sum += kernel.Data()[ivBase + skip + i] * (*this)(colBase + dcol, sample); } output(row, sample) = sum; } } } template <class ElemType> void CPUMatrix<ElemType>::ConvolutionBackwardData(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht, const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& grad) const { #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++) { for (size_t row = 0; row < GetNumRows(); row++) { int colBase = mpRowCol(row, 0); int ivBase = mpRowIwht(row, 0); assert(0 <= colBase && colBase < grad.GetNumRows()); ElemType curGrad = (*this)(row, sample); int i0 = mpRowRun(row, 0); int skip = runs(i0++, 0); int size = runs(i0++, 0); int imask = i0 + size; for (int i = 0; i < size; i++) { if (runs(imask + i, 0) == 0) continue; int dcol = runs(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows()); grad(colBase + dcol, sample) += curGrad * kernel.Data()[ivBase + skip + i]; } } } } template <class ElemType> void CPUMatrix<ElemType>::ConvolutionBackwardKernel(const CPUMatrix<ElemType>& in, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht, const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& kernelGrad) const { // Do NOT parallelize these loops! for (size_t sample = 0; sample < GetNumCols(); sample++) { for (size_t row = 0; row < GetNumRows(); row++) { int colBase = mpRowCol(row, 0); int ivBase = mpRowIwht(row, 0); assert(0 <= colBase && colBase < in.GetNumRows()); ElemType curGrad = (*this)(row, sample); int i0 = mpRowRun(row, 0); int skip = runs(i0++, 0); int size = runs(i0++, 0); int imask = i0 + size; for (int i = 0; i < size; i++) { if (runs(imask + i, 0) == 0) continue; int dcol = runs(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < in.GetNumRows()); kernelGrad.Data()[ivBase + skip + i] += curGrad * in(colBase + dcol, sample); } } } } template <class ElemType> void CPUMatrix<ElemType>::UnrollConvolutionInput(size_t unrollCols, size_t mapOutSize, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const { size_t batchSize = GetNumCols(); #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)batchSize; sample++) { for (size_t row = 0; row < mapOutSize; row++) { int colBase = mpRowCol(row, 0); assert(0 <= colBase && colBase < GetNumRows()); int i0 = mpRowRun(row, 0); int skip = runs(i0++, 0); int size = runs(i0++, 0); int imask = i0 + size; for (int i = 0; i < size; i++) { if (runs(imask + i, 0) == 0) continue; int dcol = runs(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < GetNumRows()); output.Data()[(row * batchSize + sample) * unrollCols + skip + i] = (*this)(colBase + dcol, sample); } } } } template <class ElemType> void CPUMatrix<ElemType>::UnrollConvolutionOutput(size_t unrollCols, size_t mapInCount, size_t mapOutCount, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const { if (mpRowCol.GetNumRows() % mapOutCount != 0) InvalidArgument("The number of rows in mpRowCol must be multiple of mapOutCount."); size_t mapOutSize = mpRowCol.GetNumRows() / mapOutCount; size_t batchSize = GetNumCols(); size_t kernelSize = runs(1, 0); if (kernelSize % mapInCount != 0) InvalidArgument("kernelSize must be multiple of mapInCount."); size_t kernelMapSize = kernelSize / mapInCount; #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++) { for (size_t row = 0; row < mapOutSize; row++) { int colBase = mpRowCol(row, 0); int i0 = mpRowRun(row, 0); int skip = runs(i0++, 0); int size = runs(i0++, 0); int imask = i0 + size; for (int i = 0; i < std::min(size, (int)kernelMapSize); i++) { if (runs(imask + i, 0) == 0) continue; int dcol = runs(i0 + i, 0); size_t isrc = row; size_t idst = ((colBase + dcol) * batchSize + sample) * unrollCols + ((skip + i) % kernelMapSize) * mapOutCount; for (size_t outMap = 0; outMap < mapOutCount; outMap++, isrc += mapOutSize) { assert(isrc < GetNumElements()); assert(idst + outMap < output.GetNumElements()); output.Data()[idst + outMap] = (*this)(isrc, sample); } } } } } template <class ElemType> void CPUMatrix<ElemType>::UnrollConvolutionInputForKernelBackprop(size_t mapOutSize, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const { size_t batchSize = GetNumCols(); size_t unrollCols = mapOutSize * batchSize; #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)batchSize; sample++) { for (size_t row = 0; row < mapOutSize; row++) { int colBase = mpRowCol(row, 0); assert(0 <= colBase && colBase < GetNumRows()); int i0 = mpRowRun(row, 0); int skip = runs(i0++, 0); int size = runs(i0++, 0); int imask = i0 + size; for (int i = 0; i < size; i++) { if (runs(imask + i, 0) == 0) continue; int dcol = runs(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < GetNumRows()); size_t idst = (skip + i) * unrollCols + row * batchSize + sample; assert(idst < output.GetNumElements()); output.Data()[idst] = (*this)(colBase + dcol, sample); } } } } template <class ElemType> void CPUMatrix<ElemType>::MaxPoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output) const { #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++) { for (size_t row = 0; row < output.GetNumRows(); row++) { int colBase = mpRowCol(row, 0); assert(0 <= colBase && colBase < GetNumRows()); assert(std::numeric_limits<ElemType>::has_infinity); ElemType res = -std::numeric_limits<ElemType>::infinity(); int i0 = mpRowIndices(row, 0); int size = indices(i0++, 0); assert(size > 0); for (int i = 0; i < size; i++) { int dcol = indices(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < GetNumRows()); res = std::max(res, (*this)(colBase + dcol, sample)); } output(row, sample) = res; } } } template <class ElemType> void CPUMatrix<ElemType>::MaxPoolingBackward(const CPUMatrix<ElemType>& out, const CPUMatrix<ElemType>& in, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& grad) const { #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++) { for (size_t row = 0; row < GetNumRows(); row++) { int colBase = mpRowCol(row, 0); assert(0 <= colBase && colBase < grad.GetNumRows()); int i0 = mpRowIndices(row, 0); int size = indices(i0++, 0); assert(size > 0); ElemType g = (*this)(row, sample); ElemType m = out(row, sample); for (int i = 0; i < size; i++) { int dcol = indices(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows()); if (in(colBase + dcol, sample) >= m) { #pragma omp atomic grad(colBase + dcol, sample) += g; break; } } } } } // For each image, for each ROI, this function treats that ROI as an image // and does max pooling so that it has output size pooledHeight x pooledWidth. // It loops over each location in the output tensor, computes which ROI // and image should populate that location, computes the subset of the image // corresponding to the ROI and which pixels in that subset should go into the // output location, then takes the max value over that window. // src: Images [W x H x C x N] // roiData: ROIs [4 x numROIs x N], // dst: Pooled ROIs [PW x PH x C x numROIs x N] // argmax: max positions [PW x PH x C x numROIs x N] // where PW = Pooled Width, PH = Pooled Height, C = Channels, N = Batch Size template <class ElemType> void CPUMatrix<ElemType>::ROIPoolingForward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height, const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& output, CPUMatrix<ElemType>& argmax) const { size_t roiOutputSize = pooledHeight * pooledWidth * channels; #pragma omp parallel for for (int imgIdx = 0; imgIdx < numImg; imgIdx++) { auto img = ColumnSlice(imgIdx, 1); auto rois = roiData.ColumnSlice(imgIdx, 1); #pragma omp parallel for for (int roiIdx = 0; roiIdx < numRois; roiIdx++) { // each ROI is 4 elements: (x, y, w, h). int base = roiIdx * 4; // scaled ROI numbers (relative to original image size) // roi points are doubles that represent location relative to image ElemType scX = rois(base, (ElemType)0); ElemType scY = rois(base + (ElemType)1, (ElemType)0); ElemType scW = rois(base + (ElemType)2, (ElemType)0); ElemType scH = rois(base + (ElemType)3, (ElemType)0); // compute actual spatial location of the ROI in our featuremap. size_t x = (size_t)round(scX * width); size_t y = (size_t)round(scY * height); ElemType roiW = (ElemType)max(round(scW * width), (ElemType)1); ElemType roiH = (ElemType)max(round(scH * height), (ElemType)1); const ElemType winW = roiW / (ElemType)pooledWidth; const ElemType winH = roiH / (ElemType)pooledHeight; // inspired by Ross Girshick fast-rcnn caffe cpu: https://github.com/rbgirshick/fast-rcnn // loop over spatial locations in output. #pragma omp parallel for for (int outw = 0; outw < pooledWidth; outw++) { for (int outh = 0; outh < pooledHeight; outh++) { // compute the top left corner of the input // spatial window corresponding to this output unit size_t hstart = (size_t)floor(outh * winH); size_t wstart = (size_t)floor(outw * winW); // compute bottom right corner (not included) size_t hend = (size_t)ceil((outh + 1) * winH); size_t wend = (size_t)ceil((outw + 1) * winW); // offset window based on ROI top left corner. // these indices are into the input slice. hstart = min(max(hstart + y, (size_t)0), height); wstart = min(max(wstart + x, (size_t)0), width); hend = min(max(hend + y, (size_t)0), height); wend = min(max(wend + x, (size_t)0), width); bool isempty = (hend <= hstart) || (wend <= wstart); for (size_t c = 0; c < channels; c++) { // [W x H x C x R x N]; R = ROIs per image size_t outputIdx = roiIdx * roiOutputSize + outw + outh * pooledWidth + c * pooledHeight * pooledWidth; size_t maxidx = 0; ElemType maxval = isempty ? (ElemType)0 : -FLT_MAX; size_t baseIdx = c * height * width; for (size_t h = hstart; h < hend; h++) { for (size_t w = wstart; w < wend; w++) { // stored argmax indices are relative to the current channel. size_t dataIdx = w + h * width; if (img(baseIdx + dataIdx, 0) > maxval) { maxval = img(baseIdx + dataIdx, 0); maxidx = dataIdx; } } } output(outputIdx, imgIdx) = maxval; argmax(outputIdx, imgIdx) = maxidx; } } } } } } // This function loops over locations in the input to the ROIPoolingNode (image locations). // It loops over the ROIs corresponding to that image, seeing which ones could contain the current location // in their output. For each ROI, it checks the argmax data to see if that ROI indeed chose // this pixel location as the maximum. If so, it increments the gradient term for the input location. template <class ElemType> void CPUMatrix<ElemType>::ROIPoolingBackward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height, const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& grad, CPUMatrix<ElemType>& argmax) const { // loop over images in the batch. #pragma omp parallel for for (int imgIdx = 0; imgIdx < numImg; imgIdx++) { // ROIs for this image. length 4*numRois; auto rois = roiData.ColumnSlice(imgIdx, 1).Data(); // gradient values for all ROIs from this image. length numRois*pooledHeight*pooledWidth*channels; auto pooledGrad = ColumnSlice(imgIdx, 1).Data(); auto argmaxCol = argmax.ColumnSlice(imgIdx, 1).Data(); // loop over spatial locations in the image. #pragma omp parallel for for (int w = 0; w < width; w++) { #pragma omp parallel for for (int h = 0; h < width; h++) { // loop over the ROIs seeing which ones contain this location. for (int roiN = 0; roiN < numRois; roiN++) { // each ROI is 4 elements: (x, y, w, h). int roiOffset = roiN * 4; // ROI data is relative to original image size size_t roiStartW = (size_t)round(rois[roiOffset + 0] * width); size_t roiStartH = (size_t)round(rois[roiOffset + 1] * height); size_t roiWidth = max((size_t)round(rois[roiOffset + 2] * width), (size_t)1); size_t roiHeight = max((size_t)round(rois[roiOffset + 3] * height), (size_t)1); // skip this ROI if it doesn't contain the current input location. const bool inROI = (w >= roiStartW && w < roiStartW + roiWidth && h >= roiStartH && h < roiStartH + roiHeight); if (!inROI) continue; ElemType winH = (ElemType)roiHeight / (ElemType)pooledHeight; ElemType winW = (ElemType)roiWidth / (ElemType)pooledWidth; // what pooled nodes in the output for this ROI could have pooled this input location? size_t phstart = (size_t)((h - roiStartH) / winH); size_t pwstart = (size_t)((w - roiStartW) / winW); size_t phend = (size_t)(ceil((h - roiStartH + 1) / winH)); size_t pwend = (size_t)(ceil((w - roiStartW + 1) / winW)); phstart = min(max(phstart, (size_t)0), pooledHeight); phend = min(max(phend, (size_t)0), pooledHeight); pwstart = min(max(pwstart, (size_t)0), pooledWidth); pwend = min(max(pwend, (size_t)0), pooledWidth); for (size_t c = 0; c < channels; c++) { ElemType gradient = 0; // [W x H x C x N] size_t index = w + h*width + c*height*width; // go right up to channel c of the current ROI. size_t offset = (roiN * channels + c) * pooledWidth * pooledHeight; const ElemType* offsetPoolGrad = pooledGrad + offset; const ElemType* offsetArgmax = argmaxCol + offset; for (size_t ph = phstart; ph < phend; ph++) { for (size_t pw = pwstart; pw < pwend; pw++) { if ((size_t)offsetArgmax[ph * pooledWidth + pw] == (w + h * width)) gradient += offsetPoolGrad[ph * pooledWidth + pw]; } } grad(index, imgIdx) = gradient; } } } } } } template <class ElemType> void CPUMatrix<ElemType>::MaxUnpooling(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, const CPUMatrix<ElemType>& poolInput, CPUMatrix<ElemType>& input) const { #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++) { for (size_t row = 0; row < GetNumRows(); row++) { int colBase = mpRowCol(row, 0); assert(0 <= colBase && colBase < input.GetNumRows()); int i0 = mpRowIndices(row, 0); int size = indices(i0++, 0); assert(size > 0); ElemType curMax = poolInput(colBase + indices(i0, 0), sample); ElemType prevMax = curMax; int imax = 0; for (int i = 1; i < size; i++) { int dcol = indices(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < poolInput.GetNumRows()); curMax = std::max(curMax, poolInput(colBase + dcol, sample)); if (curMax > prevMax) { prevMax = curMax; imax = i; } } int dcol = indices(i0 + imax, 0); assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows()); input(colBase + dcol, sample) = (*this)(row, sample); //int i = (int)poolIn(row, sample); //assert(0 <= i && i < size); //int dcol = indices(i0 + i, 0); //assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows()); //input(colBase + dcol, sample) = (*this)(row, sample); } } } template <class ElemType> void CPUMatrix<ElemType>::AveragePoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output, const bool poolIncludePad) const { #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++) { for (size_t row = 0; row < output.GetNumRows(); row++) { int colBase = mpRowCol(row, 0); assert(0 <= colBase && colBase < GetNumRows()); ElemType sum = 0; int i0 = mpRowIndices(row, 0); int size = indices(i0++, 0); assert(size > 0); for (int i = 0; i < size; i++) { int dcol = indices(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < GetNumRows()); sum += (*this)(colBase + dcol, sample); } // Note that we divide by size which is the number of actual elements (does not include padding). // if poolIncludePad == true, use avg_pool_include_pad if (poolIncludePad) size = indices(0, 0); output(row, sample) = sum / size; } } } template <class ElemType> void CPUMatrix<ElemType>::AveragePoolingBackward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& grad, const bool poolIncludePad) const { #pragma omp parallel for for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++) { for (size_t row = 0; row < GetNumRows(); row++) { int colBase = mpRowCol(row, 0); assert(0 <= colBase && colBase < grad.GetNumRows()); int i0 = mpRowIndices(row, 0); int size = indices(i0++, 0); int tmp = size; if (poolIncludePad) size = indices(0, 0); assert(size > 0); ElemType g = (*this)(row, sample) / size; size = tmp; for (int i = 0; i < size; i++) { int dcol = indices(i0 + i, 0); assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows()); #pragma omp atomic grad(colBase + dcol, sample) += g; } } } } template <class ElemType> void CPUMatrix<ElemType>::BatchNormalizationForward(const CPUMatrix<ElemType>& scale, const CPUMatrix<ElemType>& bias, bool inferenceOnly, double expAvgFactor, double blendFactor, CPUMatrix<ElemType>& runMean, CPUMatrix<ElemType>& runVariance, CPUMatrix<ElemType>& out, double epsilon, CPUMatrix<ElemType>& saveMean, CPUMatrix<ElemType>& saveInvStdDev) const { if (GetNumRows() % scale.GetNumRows() != 0) LogicError("The number of rows of this matrx must be multiple of the number of rows of the scale matrix."); if (!inferenceOnly || expAvgFactor != 0 || blendFactor != 1) RuntimeError("Batch normalization training on CPU is not yet implemented."); saveMean.Resize(0, 0); // only doing inference: these two are not produced saveInvStdDev.Resize(0, 0); bool spatial = GetNumRows() != scale.GetNumRows(); if (spatial) { size_t spatialSize = GetNumRows() / scale.GetNumRows(); #pragma omp parallel for for (long icol = 0; icol < out.GetNumCols(); icol++) { for (long irow = 0; irow < out.GetNumRows(); irow++) { size_t imap = irow / spatialSize; ElemType stdDev = sqrt(runVariance(imap, 0) + epsilon); out(irow, icol) = scale(imap, 0) * ((*this)(irow, icol) - runMean(imap, 0)) / stdDev + bias(imap, 0); } } } else { #pragma omp parallel for for (long icol = 0; icol < out.GetNumCols(); icol++) { for (long irow = 0; irow < out.GetNumRows(); irow++) { ElemType stdDev = sqrt(runVariance(irow, 0) + epsilon); out(irow, icol) = scale(irow, 0) * ((*this)(irow, icol) - runMean(irow, 0)) / stdDev + bias(irow, 0); } } } } template <class ElemType> void CPUMatrix<ElemType>::BatchNormalizationBackward(const CPUMatrix<ElemType>& in, CPUMatrix<ElemType>& grad, const CPUMatrix<ElemType>& scale, double blendFactor, const CPUMatrix<ElemType>& saveMean, const CPUMatrix<ElemType>& saveInvStdDev, CPUMatrix<ElemType>& scaleGrad, CPUMatrix<ElemType>& biasGrad) const { UNUSED(in); UNUSED(grad); UNUSED(scale); UNUSED(blendFactor), UNUSED(saveMean); UNUSED(saveInvStdDev); UNUSED(scaleGrad); UNUSED(biasGrad); RuntimeError("Batch normalization training on CPU is not yet implemented."); } #pragma region Static BLAS Functions /// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = alpha * op(a) * op(b) + beta*c</summary> /// <param name="alpha">Scalar</param> /// <param name="a">Input matrix</param> /// <param name="transposeA">Whether matrix a is transposed</param> /// <param name="b">Input matrix</param> /// <param name="transposeB">Whether matrix b is transposed</param> /// <param name="beta">Scalar</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::MultiplyAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB, ElemType beta, CPUMatrix<ElemType>& c, shared_ptr<QuantizedMultiplier<ElemType>> pQuantizedMultiplier) { if (a.IsEmpty() || b.IsEmpty()) return; int m, n, k, l; int lda, ldb, ldc; CBLAS_TRANSPOSE mklTransA; CBLAS_TRANSPOSE mklTransB; if (transposeA) { m = (int) a.GetNumCols(); k = (int) a.GetNumRows(); lda = k; mklTransA = CBLAS_TRANSPOSE::CblasTrans; } else { m = (int) a.GetNumRows(); k = (int) a.GetNumCols(); lda = m; mklTransA = CBLAS_TRANSPOSE::CblasNoTrans; } if (transposeB) { l = (int) b.GetNumCols(); n = (int) b.GetNumRows(); ldb = n; mklTransB = CBLAS_TRANSPOSE::CblasTrans; } else { l = (int) b.GetNumRows(); n = (int) b.GetNumCols(); ldb = l; mklTransB = CBLAS_TRANSPOSE::CblasNoTrans; } assert(m > 0 && k > 0 && l > 0 && n > 0); // converting from size_t to int may cause overflow if (k != l) InvalidArgument("CPUMatrix<ElemType>::MultiplyAndWeightedAdd : The inner dimensions of a and b must match."); if (beta == 0) c.RequireSize(m, n); else c.VerifySize(m, n); // Can't resize if beta != 0 ldc = (int) c.GetNumRows(); if (pQuantizedMultiplier == nullptr) { if (sizeof(ElemType) == sizeof(double)) { cblas_dgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<double*>(a.Data()), lda, reinterpret_cast<double*>(b.Data()), ldb, beta, reinterpret_cast<double*>(c.Data()), ldc); } else { #pragma warning(suppress : 4244) cblas_sgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<float*>(a.Data()), lda, reinterpret_cast<float*>(b.Data()), ldb, beta, reinterpret_cast<float*>(c.Data()), ldc); } } else { // TODO: support transpose product if (mklTransA == CBLAS_TRANSPOSE::CblasTrans || mklTransB == CBLAS_TRANSPOSE::CblasTrans) LogicError("Quantized multiplier currently doesn't support transpose."); pQuantizedMultiplier->Multiply(m, n, k, a.Data(), b.Data(), c.Data()); } } template <class ElemType> void CPUMatrix<ElemType>::Multiply1x1AndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, ElemType beta, CPUMatrix<ElemType>& c) { if (a.GetNumElements() != 1) InvalidArgument("the argument a must be a scalar"); // a is a scalar ElemType f = alpha * a.Get00Element(); if (beta == 0) // don't even read the memory if beta is 0 #pragma omp parallel for foreach_coord (i, j, c) c(i, j) = b(i, j) * f; else #pragma omp parallel for foreach_coord (i, j, c) c(i, j) = b(i, j) * f + c(i, j) * beta; } template <class ElemType> void CPUMatrix<ElemType>::ColumnwiseScaleAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& v, ElemType beta, CPUMatrix<ElemType>& c) { if (v.GetNumRows() != 1 && v.GetNumCols() != 1) InvalidArgument("the argument v must be a vector"); // v is a vector if (beta == 0) c.RequireSize(a.GetNumRows(), a.GetNumCols()); else c.VerifySize(a.GetNumRows(), a.GetNumCols()); // Can't resize if beta != 0 const ElemType* vd = v.Data(); if (beta == 0) // don't even read the memory if beta is 0 #pragma omp parallel for foreach_coord(i, j, c) c(i, j) = alpha * a(i, j) * vd[j]; else #pragma omp parallel for foreach_coord(i, j, c) c(i, j) = alpha * a(i, j) * vd[j] + c(i, j) * beta; } /* compute singular value decomposition as A = U*SIGMA*VT W is used as temp working memory */ template <class ElemType> void CPUMatrix<ElemType>::SVD(const CPUMatrix<ElemType>& A, CPUMatrix<ElemType>& SIGMA, CPUMatrix<ElemType>& U, CPUMatrix<ElemType>& VT, CPUMatrix<ElemType>& W) { if (A.IsEmpty()) LogicError("SVD: input matrix is empty."); int info; int m, n, lda, ldu, ldvt; m = (int) A.GetNumRows(); n = (int) A.GetNumCols(); W.GetNumRows(); // W is used as temp working memory lda = m; ldu = m; ldvt = n; U.RequireSize(m, m); SIGMA.RequireSize(std::min(m, n), 1); VT.RequireSize(n, n); if (sizeof(ElemType) == sizeof(double)) { #ifdef USE_MKL double wkopt; int lwork = -1; dgesvd("All", "All", &m, &n, reinterpret_cast<double*>(A.Data()), &lda, reinterpret_cast<double*>(SIGMA.Data()), reinterpret_cast<double*>(U.Data()), &ldu, reinterpret_cast<double*>(VT.Data()), &ldvt, &wkopt, &lwork, &info); lwork = (int) wkopt; W.RequireSize(lwork, 1); dgesvd("All", "All", &m, &n, reinterpret_cast<double*>(A.Data()), &lda, reinterpret_cast<double*>(SIGMA.Data()), reinterpret_cast<double*>(U.Data()), &ldu, reinterpret_cast<double*>(VT.Data()), &ldvt, reinterpret_cast<double*>(W.Data()), &lwork, &info); #else std::vector<double> superb(std::max(std::min(m, n) - 1, 1)); info = LAPACKE_dgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<double*>(A.Data()), (int) lda, reinterpret_cast<double*>(SIGMA.Data()), reinterpret_cast<double*>(U.Data()), (int) ldu, reinterpret_cast<double*>(VT.Data()), (int) ldvt, &superb[0]); #endif } else { #ifdef USE_MKL float wkopt; int lwork = -1; sgesvd("All", "All", &m, &n, reinterpret_cast<float*>(A.Data()), &lda, reinterpret_cast<float*>(SIGMA.Data()), reinterpret_cast<float*>(U.Data()), &ldu, reinterpret_cast<float*>(VT.Data()), &ldvt, &wkopt, &lwork, &info); lwork = (int) wkopt; W.RequireSize(lwork, 1); sgesvd("All", "All", &m, &n, reinterpret_cast<float*>(A.Data()), &lda, reinterpret_cast<float*>(SIGMA.Data()), reinterpret_cast<float*>(U.Data()), &ldu, reinterpret_cast<float*>(VT.Data()), &ldvt, reinterpret_cast<float*>(W.Data()), &lwork, &info); #else std::vector<float> superb(std::max(std::min(m, n) - 1, 1)); info = LAPACKE_sgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<float*>(A.Data()), (int) lda, reinterpret_cast<float*>(SIGMA.Data()), reinterpret_cast<float*>(U.Data()), (int) ldu, reinterpret_cast<float*>(VT.Data()), (int) ldvt, &superb[0]); #endif } if (info > 0) { RuntimeError("The algorithm computing SVD failed to converge.\n"); } } /// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b) + c</summary> /// <param name="a">Input matrix</param> /// <param name="transposeA">Whether matrix a is transposed</param> /// <param name="b">Input matrix</param> /// <param name="transposeB">Whether matrix b is transposed</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::MultiplyAndAdd(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB, CPUMatrix<ElemType>& c) { return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 1.0, c); } template <class ElemType> void CPUMatrix<ElemType>::AssignSoftmaxSum(const CPUMatrix<ElemType>& softmax, CPUMatrix<ElemType>& c) { ElemType log_likelihood = 0.0; size_t batch_size = GetNumCols(); #pragma omp parallel for reduction(+ : log_likelihood) for (int instance_id = 0; instance_id < batch_size; instance_id++) { int sample = (int) (*this)(0, instance_id); log_likelihood += softmax(instance_id, sample); } c(0, 0) = -log_likelihood; } template <class ElemType> void CPUMatrix<ElemType>::AssignNCEUnnormalizedEval(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& c) //this: samples+probs // a: hidden // b: embedding // tmp: softmax // c: loglikelihood { ElemType log_likelihood = 0.0; size_t batch_size = GetNumCols(); #pragma omp parallel for reduction(+ : log_likelihood) for (int instance_id = 0; instance_id < batch_size; instance_id++) { int sample = -(int) (*this)(0, instance_id); ElemType score = bias(sample, 0); for (int dim = 0; dim < b.GetNumRows(); dim++) score += b(dim, sample) * a(dim, instance_id); log_likelihood += score; } c(0, 0) = -log_likelihood; } //samples+prob gradient hidden embedding embedding/hidden //a.m_CPUMatrix->AssignNCEDerivative(*tmp.m_CPUMatrix, *a.m_CPUMatrix, *b.m_CPUMatrix, inputIndex, *c.m_CPUMatrix); template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNCEDerivative(const CPUMatrix<ElemType>& tmp, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t inputIndex, CPUMatrix<ElemType>& c) { size_t sample_size = GetNumRows() / 2; size_t batch_size = GetNumCols(); if (inputIndex == 1) { #pragma omp parallel for for (int instance_id = 0; instance_id < batch_size; instance_id++) for (int sample_id = 0; sample_id < sample_size; sample_id++) { int sample = (int) (*this)(2 * sample_id, instance_id); for (int dim = 0; dim < b.GetNumRows(); dim++) c(dim, instance_id) -= b(dim, sample) * tmp(sample_id, instance_id); } } else if (inputIndex == 2) { int i_blocks = omp_get_num_threads() * 16; // Assume only one block in k direction. // We don't need to explicitly block in the j direction. #pragma omp parallel for for (int ib = 0; ib < i_blocks; ib++) for (int instance_id = 0; instance_id < batch_size; instance_id++) for (int sample_id = 0; sample_id < sample_size; sample_id++) { int sample = (int) (*this)(2 * sample_id, instance_id); if (sample % i_blocks == ib) for (int dim = 0; dim < b.GetNumRows(); dim++) c(dim, sample) -= a(dim, instance_id) * tmp(sample_id, instance_id); } } else if (inputIndex == 3) { // Assume only one block in k direction. // We don't need to explicitly block in the j direction. for (int instance_id = 0; instance_id < batch_size; instance_id++) for (int sample_id = 0; sample_id < sample_size; sample_id++) { int sample = (int) (*this)(2 * sample_id, instance_id); c(0, sample) -= tmp(sample_id, instance_id); } } else InvalidArgument("The argument inputIndex must be 1 or 2 or 3."); return *this; } template <class ElemType> void CPUMatrix<ElemType>::AssignNoiseContrastiveEstimation(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& tmp, CPUMatrix<ElemType>& c) //this: samples+probs // a: hidden // b: embedding // tmp: softmax // c: loglikelihood { double log_likelihood = 0.0; size_t sample_size = GetNumRows() / 2; size_t batch_size = GetNumCols(); size_t num_noise_samples = sample_size - 1; double log_num_noise_samples = std::log(num_noise_samples); #pragma omp parallel for reduction(+ : log_likelihood) for (int instance_id = 0; instance_id < batch_size; instance_id++) for (int sample_id = 0; sample_id < sample_size; sample_id++) { int sample = (int) (*this)(2 * sample_id, instance_id); double score = bias(0, sample); for (int dim = 0; dim < b.GetNumRows(); dim++) score += a(dim, instance_id) * b(dim, sample); double sample_prob = -(*this)(2 * sample_id + 1, instance_id); if (sample_id == 0) sample_prob = -sample_prob; double score_noise = log_num_noise_samples + sample_prob; double z = LogAdd(score, score_noise); double logprob = score - z; double logprob_noise = score_noise - z; tmp(sample_id, instance_id) = (ElemType) -std::exp(logprob); if (sample_id == 0) tmp(sample_id, instance_id) += 1; log_likelihood += sample_id == 0 ? logprob : logprob_noise; } c(0, 0) = (ElemType) -log_likelihood; } /// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b)</summary> /// <param name="a">Input matrix</param> /// <param name="transposeA">Whether matrix a is transposed</param> /// <param name="b">Input matrix</param> /// <param name="transposeB">Whether matrix b is transposed</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB, CPUMatrix<ElemType>& c) { return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 0.0, c); } /// <summary>Matrix-matrix multiply with col-major matrices (a and b are not transposed): c = a * b</summary> /// <param name="a">Input matrix</param> /// <param name="b">Input matrix</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c) { return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, false, b, false, 0.0, c); } /// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a + c</summary> /// if a is a column vector, add to all columns of c /// if a is a row vector, add to all rows of c /// if a is a scalar, add to all rows of c /// <param name="alpha">Scalar</param> /// <param name="a">Input matrix</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::ScaleAndAdd(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c) { if (a.IsEmpty() || c.IsEmpty()) LogicError("ScaleAndAdd: one of the input matrices is empty."); if (a.GetNumRows() != 1 && a.GetNumCols() != 1) // a is not a col or row vector { const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); const int len = m * n; const int incx = 1; const int incy = 1; assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow if ((int) c.GetNumRows() != m || (int) c.GetNumCols() != n) InvalidArgument("Dimension of matrix c does not match dimension of matrix a."); if (sizeof(ElemType) == sizeof(double)) { cblas_daxpy(len, alpha, reinterpret_cast<double*>(a.Data()), incx, reinterpret_cast<double*>(c.Data()), incy); } else { #pragma warning(suppress : 4244) cblas_saxpy(len, alpha, reinterpret_cast<float*>(a.Data()), incx, reinterpret_cast<float*>(c.Data()), incy); } } else if (a.GetNumElements() == 1) // scalar, add to all elements { ElemType v = alpha * a(0, 0); long m = (long) c.GetNumRows(), n = (long) c.GetNumCols(); #pragma omp parallel for for (long j = 0; j < n; j++) { // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { c(i, j) += v; c(i + 1, j) += v; c(i + 2, j) += v; c(i + 3, j) += v; } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { c(i, j) += v; } } } else if (a.GetNumCols() == 1) // col vector, add it to all columns { int m = (int) c.GetNumRows(); if (m != (int) a.GetNumRows()) InvalidArgument("To add column vector, rows should match."); ElemType* aBufPtr = a.Data(); ElemType* cBufPtr = c.Data(); if (sizeof(ElemType) == sizeof(double)) { #pragma omp parallel for foreach_column (j, c) { cblas_daxpy(m, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + c.LocateColumn(j)), 1); } } else { #pragma omp parallel for foreach_column (j, c) { #pragma warning(suppress : 4244) cblas_saxpy(m, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + c.LocateColumn(j)), 1); } } } else // row vector, add it to all rows { int m = (int) c.GetNumRows(); int n = (int) c.GetNumCols(); if (n != (int) a.GetNumCols()) InvalidArgument("To add row vector, cols should match."); ElemType* aBufPtr = a.Data(); ElemType* cBufPtr = c.Data(); if (sizeof(ElemType) == sizeof(double)) { #pragma omp parallel for foreach_row (i, c) { cblas_daxpy(n, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + i), m); } } else { #pragma omp parallel for foreach_row (i, c) { #pragma warning(suppress : 4244) cblas_saxpy(n, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + i), m); } } } } /// <summary>c += alpha * (a-b)</summary> /// if a, b, c must have same dim /// <param name="alpha">Scalar</param> /// <param name="a">Input matrix</param> /// <param name="b">Input matrix</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::AddScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c) { if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumRows() == c.GetNumRows() && a.GetNumCols() == b.GetNumCols() && a.GetNumCols() == c.GetNumCols())) { InvalidArgument("AddScaledDifference: a, b, and c must have same dimension."); } if (a.IsEmpty()) LogicError("AddScaledDifference: Input matrix a is empty."); ElemType* aBufPtr = a.Data(); ElemType* bBufPtr = b.Data(); ElemType* cBufPtr = c.Data(); long m = (long) c.GetNumElements(); #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]); cBufPtr[i + 1] += alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]); cBufPtr[i + 2] += alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]); cBufPtr[i + 3] += alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]); } } /// <summary> c = alpha * (a-b)</summary> /// if a, b, c must have same dim /// <param name="alpha">Scalar</param> /// <param name="a">Input matrix</param> /// <param name="b">Input matrix</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::AssignScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c) { if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) { InvalidArgument("AssignScaledDifference: a, b must have same dimension."); } if (a.IsEmpty()) LogicError("AssignScaledDifference: Input matrix a is empty."); if (&c != &a && &c != &b) c.RequireSize(a.GetNumRows(), a.GetNumCols()); ElemType* aBufPtr = a.Data(); ElemType* bBufPtr = b.Data(); ElemType* cBufPtr = c.Data(); long m = (long) c.GetNumElements(); #pragma omp parallel for // four-way unrolling for (long i = 0; i < (m & ~3); i += 4) { cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]); cBufPtr[i + 1] = alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]); cBufPtr[i + 2] = alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]); cBufPtr[i + 3] = alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]); } // handle remaining stuffs for (long i = m & ~3; i < m; i++) { cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]); } } // c[ci,cj] += a[ai,aj] template <class ElemType> void CPUMatrix<ElemType>::AddElementToElement(ElemType beta, const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj) { if (ai >= a.GetNumRows() || aj >= a.GetNumCols() || ci >= c.GetNumRows() || cj >= c.GetNumCols()) InvalidArgument("AddElementToElement: index out of range."); ElemType us = beta ? beta * c(ci, cj) : 0; // do not multiply if beta is 0, could be a NaN us += a(ai, aj); c(ci, cj) = us; } ////c[ci,cj] += a[ai,aj] //template<class ElemType> //void CPUMatrix<ElemType>::AddLogElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj) //{ // if (ai >= a.GetNumRows() || aj >=a.GetNumCols() || // ci >= c.GetNumRows() || cj >=c.GetNumCols()) // InvalidArgument("AddElementToElement: index out of range."); // // ElemType v = a(ai,aj); // c(ci, cj) += ((v < EPS_IN_LOG) ? LOG_OF_EPS_IN_LOG : log(v)); //} #if 0 // now done as AddElementToElement (beta=0) // c[ci,cj] = a[ai,aj] template <class ElemType> void CPUMatrix<ElemType>::AssignElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj) { if (ai >= a.GetNumRows() || aj >= a.GetNumCols() || ci >= c.GetNumRows() || cj >= c.GetNumCols()) InvalidArgument("AssignElementToElement: index out of range."); c(ci, cj) = a(ai, aj); } #endif /// <summary>c += alpha * (a-b)</summary> /// if a, b, c must have same dim /// <param name="alpha">1X1 matrix</param> /// <param name="a">Input matrix</param> /// <param name="b">Input matrix</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::AddScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c) { if (alpha.GetNumElements() != 1) InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix."); AddScaledDifference(alpha(0, 0), a, b, c); } /// <summary> c = alpha * (a-b)</summary> /// if a, b, c must have same dim /// <param name="alpha">1X1 matrix</param> /// <param name="a">Input matrix</param> /// <param name="b">Input matrix</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> void CPUMatrix<ElemType>::AssignScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c) { if (alpha.GetNumElements() != 1) InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix."); AssignScaledDifference(alpha(0, 0), a, b, c); } /// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a</summary> /// <param name="alpha">Scalar</param> /// <param name="a">Input matrix</param> /// <param name="c">Resulting matrix, user is responsible for allocating this</param> template <class ElemType> /*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c) { if (a.IsEmpty()) LogicError("Scale: Input matrix a is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow c.RequireSize(m, n); ElemType* aBufPtr = a.Data(); ElemType* cBufPtr = c.Data(); if (alpha == 0) { memset(cBufPtr, 0, sizeof(ElemType) * c.GetNumElements()); return; } long size = (long) c.GetNumElements(); #pragma omp parallel for // four-way unrolling for (long i = 0; i < (size & ~3); i += 4) { cBufPtr[i] = alpha * aBufPtr[i]; cBufPtr[i + 1] = alpha * aBufPtr[i + 1]; cBufPtr[i + 2] = alpha * aBufPtr[i + 2]; cBufPtr[i + 3] = alpha * aBufPtr[i + 3]; } // remaining elements for (long i = size & ~3; i < size; i++) { cBufPtr[i] = alpha * aBufPtr[i]; } } /// <summary>Matrix-scalar multiply with col-major matrices: a = alpha * a</summary> /// <param name="alpha">Scalar</param> /// <param name="a">Input matrix</param> template <class ElemType> /*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("Scale: Input matrix a is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); const int len = m * n; const int incx = 1; assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow if (alpha == 0 && incx == 1) { memset(a.Data(), 0, sizeof(ElemType) * len); } else if (sizeof(ElemType) == sizeof(double)) { cblas_dscal(len, alpha, reinterpret_cast<double*>(a.Data()), incx); } else { #pragma warning(suppress : 4244) cblas_sscal(len, alpha, reinterpret_cast<float*>(a.Data()), incx); } } /// <summary>Matrix multiply with col-major matrices: a = alpha[1,1] * a</summary> /// <param name="alpha">1x1 matrix</param> /// <param name="a">Input matrix</param> template <class ElemType> /*static*/ void CPUMatrix<ElemType>::Scale(CPUMatrix<ElemType> alpha, CPUMatrix<ElemType>& a) { if (a.IsEmpty()) LogicError("Scale: Input matrix a is empty."); if (alpha.GetNumElements() != 1) LogicError("Matrix alpha must be 1x1"); CPUMatrix<ElemType>::Scale(alpha(0, 0), a); } template <class ElemType> void CPUMatrix<ElemType>::InnerProduct(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise) { if (a.IsEmpty() || b.IsEmpty()) LogicError("InnerProduct: one of the input matrices is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); const int k = (int) b.GetNumRows(); const int l = (int) b.GetNumCols(); assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow if (m != k || n != l) InvalidArgument("InnerProduct: Matrices a and b should have same dimension."); if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product { c.AssignElementProductOf(a, b); } else if (isColWise) // col-wise { c.RequireSize(1, n); ElemType* aBufPtr = a.Data(); ElemType* bBufPtr = b.Data(); if (sizeof(ElemType) == sizeof(double)) { #pragma omp parallel for foreach_column (j, c) { c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1); } } else { #pragma omp parallel for foreach_column (j, c) { #pragma warning(suppress : 4244) c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1); } } } else { c.RequireSize(m, 1); ElemType* aBufPtr = a.Data(); ElemType* bBufPtr = b.Data(); if (sizeof(ElemType) == sizeof(double)) { #pragma omp parallel for foreach_row (i, c) { c(i, 0) = cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m); } } else { #pragma omp parallel for foreach_row (i, c) { #pragma warning(suppress : 4244) c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m); } } } } // treat matrices as vectors. do vec(a)^T vec(b) template <class ElemType> ElemType CPUMatrix<ElemType>::InnerProductOfMatrices(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b) { if (a.IsEmpty() || b.IsEmpty()) LogicError("InnerProductOfMatrices: one of the input matrices is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); const int k = (int) b.GetNumRows(); const int l = (int) b.GetNumCols(); assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow if (m != k || n != l) InvalidArgument("InnerProductOfMatrices: Matrices a and b should have same dimension."); if (sizeof(ElemType) == sizeof(double)) { return (ElemType) cblas_ddot((int) a.GetNumElements(), reinterpret_cast<double*>(a.Data()), 1, reinterpret_cast<double*>(b.Data()), 1); } else { #pragma warning(suppress : 4244) return (ElemType) cblas_sdot((int) a.GetNumElements(), reinterpret_cast<float*>(a.Data()), 1, reinterpret_cast<float*>(b.Data()), 1); } } template <class ElemType> void CPUMatrix<ElemType>::ElementWisePower(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c) { if (a.IsEmpty()) LogicError("Scale: The input matrix a is empty."); c.RequireSize(a.GetNumRows(), a.GetNumCols()); if (alpha == 2) { #pragma omp parallel for foreach_coord (i, j, c) { c(i, j) = a(i, j) * a(i, j); } } else if (alpha == 3) { #pragma omp parallel for foreach_coord (i, j, c) { c(i, j) = a(i, j) * a(i, j) * a(i, j); } } else { #pragma omp parallel for foreach_coord (i, j, c) { c(i, j) = pow(a(i, j), alpha); } } } template <class ElemType> bool CPUMatrix<ElemType>::AreEqual(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const ElemType threshold /*= 1e-8*/) { if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols()) return false; bool result = true; #pragma omp parallel for foreach_coord (i, j, a) { if (abs(a(i, j) - b(i, j)) > threshold) { result = false; break; } } return result; } // see Matrix<ElemType>::TensorShuffleScaleAndAdd() for comments template <class ElemType> void CPUMatrix<ElemType>::TensorShuffleScaleAndAdd(ElemType keepWeight, const CPUMatrix<ElemType>& a, size_t D, size_t S, size_t M, size_t K, size_t T, ElemType scaleFactor, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c) { size_t N = D * S * M * K * T; const auto pa = a.Data(); const auto pb = b.Data(); auto pc = c.Data(); // Note: This code is written to match a GPU implementation. It is not super-efficient on the CPU. for (size_t na = 0; na < N; na++) // loop over all elements { // recover the 5 indices from the loop counter size_t d = na % D; size_t s = (na / D) % S; size_t m = (na / D / S) % M; size_t k = (na / D / S / M) % K; size_t t = (na / D / S / M / K) % T; // compute index for the a and b/c tensors assert(na == (((t * K + k) * M + m) * S + s) * D + d); // input tensor of dimension (D x S x M x K x T) size_t nb = (((t * S + s) * M + m) * K + k) * D + d; // output tensor of dimension (D x K x M x S x T): k/K and s/S swapped assert(nb < N); // perform the computation ElemType cval = keepWeight ? keepWeight * pb[nb] : 0; // if weight is 0 then don't bother to read memory (efficiency) or to multiply (NaN-safe) cval += scaleFactor * pa[na]; pc[nb] = cval; } } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::Ones(const size_t rows, const size_t cols) { CPUMatrix<ElemType> c(rows, cols); // will initialize to 0 c.SetValue(1); return c; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::Zeros(const size_t rows, const size_t cols) { CPUMatrix<ElemType> c(rows, cols); // will initialize to 0 c.SetValue(0); return c; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::Eye(const size_t rows) { CPUMatrix<ElemType> c(rows, rows); // will initialize to 0 c.SetDiagonalValue(1); return c; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomUniform(const size_t rows, const size_t cols, const ElemType low, const ElemType high, unsigned long seed) { CPUMatrix<ElemType> c(rows, cols); // will initialize to 0 c.SetUniformRandomValue(low, high, seed); return c; } template <class ElemType> CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomGaussian(const size_t rows, const size_t cols, const ElemType mean, const ElemType sigma, unsigned long seed) { CPUMatrix<ElemType> c(rows, cols); // will initialize to 0 c.SetGaussianRandomValue(mean, sigma, seed); return c; } template <class ElemType> bool CPUMatrix<ElemType>::HasElement(const CPUMatrix<ElemType>& mat, const ElemType v) { bool bHas = false; bool isvFinite = std::isfinite(v); #pragma omp parallel for for (long j = 0; j < mat.GetNumElements(); j++) { #pragma omp flush(bHas) if (!bHas) { ElemType cur = mat.Data()[j]; if (isvFinite && std::isfinite(cur)) { if (cur == v) bHas = true; } else if (std::isnan(v) && std::isnan(cur)) bHas = true; else if (std::isinf(v) && std::isinf(cur) && std::signbit(v) == std::signbit(cur)) bHas = true; } } return bHas; } // CPUMatrix<ElemType>& AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber); //[this]=a .* b // here, a and b must be two row vectors of the same size, i.e. [1,m] // the inputs are two rwo vectors // the output is a matrix of size(neg+1, col) template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber) { if (a.IsEmpty() || b.IsEmpty()) LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty."); if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols())) InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match."); if (a.GetNumRows() != 1) InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector."); auto& us = *this; if (this != &a) { RequireSize(negnumber + 1, a.GetNumCols()); // RequireSize(a.GetNumRows(), a.GetNumCols()); } long m = (long) GetNumRows(), n = (long) GetNumCols(); // a and b are of size (1,n) // #pragma omp parallel for for (long j = 0; j < n; j++) { us(0, j) = a(0, j) * b(0, j); } for (long j = 0; j < n; j++) { for (long i = 1; i < m; i++) { us(i, j) = a(0, j) * b(0, (j + shift + i - 1) % n); } } return *this; } template <class ElemType> void CPUMatrix<ElemType>::InnerProductWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise, size_t shift, size_t negnumber) { if (a.IsEmpty() || b.IsEmpty()) LogicError("InnerProduct: one of the input matrices is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); const int k = (int) b.GetNumRows(); const int l = (int) b.GetNumCols(); assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow if (m != k || n != l) InvalidArgument("InnerProduct: Matrices a and b should have same dimension."); if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product { InvalidArgument("InnerProduct: Both matrices should be normal ones, not vectors"); // c.AssignElementProductOf(a, b); } else if (isColWise) // col-wise { c.RequireSize(negnumber + 1, n); // this line ischanged ElemType* aBufPtr = a.Data(); ElemType* bBufPtr = b.Data(); if (sizeof(ElemType) == sizeof(double)) { for (long j = 0; j < n; j++) { c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1); } for (long j = 0; j < n; j++) { for (long i = 1; i < negnumber + 1; i++) { c(i, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1); } } } else { for (long j = 0; j < n; j++) { c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1); } for (long j = 0; j < n; j++) { for (long i = 1; i < negnumber + 1; i++) { c(i, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1); } } } } else { InvalidArgument("InnerProduct: Rowwise is not supported yet"); c.RequireSize(m, 1); ElemType* aBufPtr = a.Data(); ElemType* bBufPtr = b.Data(); if (sizeof(ElemType) == sizeof(double)) { #pragma omp parallel for foreach_row (i, c) { c(i, 0) = (ElemType) cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m); } } else { #pragma omp parallel for foreach_row (i, c) { #pragma warning(suppress : 4244) c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m); } } } } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::GetARowByIndex(const CPUMatrix<ElemType>& a, size_t index) { if (a.IsEmpty()) LogicError("GetARowByIndex: the input matrices is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); if (index < 0 || index >= m) LogicError("GetARowByIndex: the row index is out of range."); assert(m > 0 && n > 0); // converting from size_t to int may cause overflow auto& us = *this; RequireSize(1, n); for (long j = 0; j < n; j++) { us(0, j) = a(index, j); } return *this; } // input: a, a row vector // input: b, a matrix. b.col == a.col // input firstmatrixfixed: If true, keep a's order. Otherwise, keep b's order // output: c, a matrix. c.size == b.size /* Example, a = [a1 a2 a3] b = [b11 b12 b13; b21 b22 b23 ] if true: shift = 1 then c = [a1*b12 a2*b13 a3*b11 a1*b22 a2*b23 a3*b21] if shift = 2 then c = [ a1*b13 a2*b11 a3*b12 a1*b23 a2*b21 a3*b22] i.e. we do column-wise shift if false: shift = 1 then c = [a2*b11 a3*b12 a1*b13 a2*b21 a3*b22 a1*b23] shift = 2 then c = [ a3*b11 a1*b12 a2*b13 a3*b21 a1*b22 a2*b23] */ template <class ElemType> void CPUMatrix<ElemType>::ConductRowElementMultiplyWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, size_t shift, bool bFirstmatrixfixed) { if (a.IsEmpty() || b.IsEmpty()) LogicError("InnerProduct: one of the input matrices is empty."); const int m = (int) a.GetNumRows(); const int n = (int) a.GetNumCols(); const int k = (int) b.GetNumRows(); const int l = (int) b.GetNumCols(); assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow if (m != 1 || n != l) InvalidArgument("InnerProduct: Matrices a and b should have same dimension."); c.RequireSize(k, l); // c must the the same size of b if (bFirstmatrixfixed) { for (long j = 0; j < l; j++) { for (long i = 0; i < k; i++) { c(i, j) = a(0, j) * b(i, (j + shift) % l); } } } else { for (long j = 0; j < l; j++) { for (long i = 0; i < k; i++) { c(i, j) = a(0, (j + shift) % l) * b(i, j); } } } } // CPUMatrix<ElemType>& AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift); //[this]=a .* b // here, a and b must be two row vectors of the same size, i.e. [1,m]. We will do element product with shift. // inputs are 2 row vectors // output is a row vector template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift) { if (a.IsEmpty() || b.IsEmpty()) LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty."); if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols()) InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match."); if (a.GetNumRows() != 1) InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector."); auto& us = *this; if (this != &a) { RequireSize(1, a.GetNumCols()); // RequireSize(a.GetNumRows(), a.GetNumCols()); } // long m = (long)GetNumRows(), n = (long)GetNumCols(); // a and b are of size (1,n) long n = (long) GetNumCols(); // a and b are of size (1,n) #pragma omp parallel for for (long j = 0; j < n; j++) { us(0, j) = a(0, j) * b(0, (j + shift) % n); } return *this; } #pragma endregion Static BLAS Functions // 'double' version of LogAdd inline double LogAddD(double x, double y) { return LogAdd(x, y); } template <class ElemType> ElemType CPUMatrix<ElemType>::LogSumOfElements() const { ElemType fAlpha = (ElemType) LZERO; ElemType* bufPtr = Data(); for (int k = 0; k < GetNumElements(); k++) fAlpha = (ElemType) LogAddD(fAlpha, bufPtr[k]); return fAlpha; } template <class ElemType> void CPUMatrix<ElemType>::RCRFBackwardCompute(const CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta, const CPUMatrix<ElemType>& lbls, const CPUMatrix<ElemType>& pair_scores) { int iNumPos = (int) lbls.GetNumCols(); int iNumLab = (int) lbls.GetNumRows(); int lastLbl = -1; for (int ik = 0; ik < lbls.GetNumRows(); ik++) if (lbls(ik, iNumPos - 1) != 0) { lastLbl = ik; break; } beta.RequireSize(iNumLab, iNumPos); for (int t = iNumPos - 1; t >= 0; t--) { #pragma omp parallel for for (int k = 0; k < iNumLab; k++) { _rcrfBackwardCompute(t, k, alpha, beta, pair_scores); } } }; // Calculate alpha in forward-backward calculation. equation (6), (7) in http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf // GPU x dimension corresponds to utterances, y dimension corresponds to phone sequence in each utterance // prob (input): the posterior output from the network // alpha (output): alpha for forward-backward calculation. // phoneSeq (input): phone ID sequence for each utterance in this minibatch, each col is one utterance // phoneBound (input): phone boundary (frame index) of each phone for each utterance in this minibatch, each col is one utterance // uttToChanInd (input): map from utterance ID to minibatch channel ID. We need this because each channel may contain more than one utterance. // uttFrameNum (input): the frame number of each utterance. The size of this vector = the number of all utterances in this minibatch // uttBeginFrame(input): the positon of the first frame of each utterance in the minibatch channel. We need this because each channel may contain more than one utterance. // uttPhoneNum (input): the phone number of each utterance. The size of this vector = the number of all utterances in this minibatch // numChannels (input): channel number in this minibatch // uttNum (input): number of utterances // t (input): time stamp to process // maxPhoneNum (input): the max number of phones between utterances // totalPhoneNum (input): the total number of phones of all utterances // blankTokenId (input): id of the CTC blank token // delayConstraint -- label output delay constraint introduced during training that allows to have shorter delay during inference. // Alpha and Beta scores outside of the delay boundary are set to zero. // Setting this parameter smaller will result in shorted delay between label output during decoding. // delayConstraint=-1 means no constraint template<class ElemType> void _assignAlphaScore( const ElemType *prob, ElemType *alphaScore, ElemType *phoneSeq, ElemType *phoneBound, const std::vector<size_t>& uttToChanInd, const std::vector<size_t>& uttFrameNum, const std::vector<size_t>& uttBeginFrame, const std::vector<size_t>& uttPhoneNum, size_t numChannels, const size_t uttNum, const size_t t, const size_t maxPhoneNum, // Maximum length of utterance in this MB const size_t totalPhoneNum, // Total number of phones const size_t blankTokenId, const int delayConstraint) { for (size_t uttId = 0;uttId < uttNum;uttId++) { // Number of phones and frames in this utterance size_t frameNum = uttFrameNum[uttId]; if (t >= frameNum) continue; size_t phoneNum = uttPhoneNum[uttId]; #pragma omp parallel for for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) { // Index of the label in the sequence // Current and previous phone indices in phoneSeq matrix size_t labelid = uttId*maxPhoneNum + phoneSeqId; // Actual current phone label size_t phoneId = (size_t)(phoneSeq[labelid]); // Index of the current frame in minibatch size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId]; // Index of probability of observing phoneId at frame timeId size_t probId = timeId*totalPhoneNum + phoneId; size_t alphaId = maxPhoneNum* timeId + phoneSeqId; // alpha_t(s) if (t == 0) { // Initialize recursion if (phoneSeqId == 1 || phoneSeqId == 2) { alphaScore[alphaId] = prob[probId]; } } else { if (phoneSeqId >= 1) { size_t timeId_1 = timeId - numChannels; // Index corresponding to (t-1) size_t alphaId_0 = maxPhoneNum* timeId_1 + phoneSeqId; // alpha_{t-1}(s) size_t alphaId_1 = alphaId_0 - 1; // alpha_{t-1}(s-1) size_t alphaId_2 = alphaId_0 - 2; // alpha_{t-1}(s-2) ElemType x = LZERO; ElemType ascore; if (phoneSeqId > 2) { size_t labelid_2 = labelid - 2; // if current label is not blank and not equal prev non-blank label if ((size_t)(phoneSeq[labelid]) != blankTokenId && phoneId != (size_t)(phoneSeq[labelid_2])) { x = LogAdd(x, alphaScore[alphaId_2]); } } if (phoneSeqId > 1) { x = LogAdd(x, alphaScore[alphaId_1]); } x = LogAdd(x, alphaScore[alphaId_0]); if (phoneId != SIZE_MAX) ascore = prob[probId]; // Probability of observing given label at given time else ascore = 0; alphaScore[alphaId] = (ElemType)x + ascore; if (delayConstraint != -1) { size_t labelid_r = labelid + 2; size_t phoneBoundId_r = (size_t)(phoneBound[labelid_r]); if (phoneId == blankTokenId) { // only constraint right side if (t > phoneBoundId_r + delayConstraint - 1) alphaScore[alphaId] = LZERO; } else if (phoneId != blankTokenId) { if (t > phoneBoundId_r + delayConstraint) alphaScore[alphaId] = LZERO; } } } } } } } // Calculate beta in forward-backward calculation, equation (10), (11) in http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf // See _assignAlphaScore for the explanation of parameters template<class ElemType> void _assignBetaScore( const ElemType *prob, ElemType *betaScore, ElemType *phoneSeq, ElemType *phoneBound, const std::vector<size_t>& uttToChanInd, const std::vector<size_t>& uttFrameNum, const std::vector<size_t>& uttBeginFrame, const std::vector<size_t>& uttPhoneNum, const size_t numChannels, const size_t uttNum, const long t, const size_t maxPhoneNum, const size_t totalPhoneNum, const size_t blankTokenId, const int delayConstraint) { for (size_t uttId = 0;uttId < uttNum;uttId++) { // Number of phones and frames in this utterance size_t frameNum = uttFrameNum[uttId]; if (t >= frameNum) continue; size_t phoneNum = uttPhoneNum[uttId]; #pragma omp parallel for for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) { size_t labelid = uttId*maxPhoneNum + phoneSeqId; size_t labelid_2 = labelid + 2; size_t phoneId = (LONG64)(phoneSeq[labelid]); size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId]; size_t probId = timeId*totalPhoneNum + phoneId; size_t betaid = maxPhoneNum* timeId + phoneSeqId; size_t timeId_1 = timeId + numChannels; size_t betaid_0 = maxPhoneNum* timeId_1 + phoneSeqId; size_t betaid_1 = betaid_0 + 1; size_t betaid_2 = betaid_0 + 2; if (t == frameNum - 1) { if (phoneSeqId == phoneNum - 3 || phoneSeqId == phoneNum - 2) { betaScore[betaid] = prob[probId]; } } else { if (phoneSeqId >= 1) { ElemType x = LZERO; ElemType ascore; if (phoneSeqId < phoneNum - 3) { if (phoneSeq[labelid] != blankTokenId && phoneId != phoneSeq[labelid_2]) { x = LogAdd(x, betaScore[betaid_2]); } } if (phoneSeqId < phoneNum - 2) { x = LogAdd(x, betaScore[betaid_1]); } x = LogAdd(x, betaScore[betaid_0]); if (phoneId != SIZE_MAX) ascore = prob[probId]; else ascore = 0; betaScore[betaid] = (ElemType)x + ascore; if (delayConstraint != -1) { size_t phoneBoundId_r = (size_t)(phoneBound[labelid_2]); if (phoneId == blankTokenId) { if (t > phoneBoundId_r + delayConstraint - 1) betaScore[betaid] = LZERO; } else if (phoneId != blankTokenId) { if (t > phoneBoundId_r + delayConstraint) betaScore[betaid] = LZERO; } } } } } } } // Calculate CTC score. equation (8) in http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf template<class ElemType> void _assignTotalScore(ElemType *betaScore, std::vector<ElemType>& totalScore, const size_t uttNum, const std::vector<size_t>& uttToChanInd, const std::vector<size_t>& uttBeginFrame, const size_t numChannels, const size_t maxPhoneNum) { #pragma omp parallel for for (int uttId = 0; uttId < uttNum; uttId++) { if (uttId < uttNum) { LONG64 alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum; betaScore[alphaId_0] = LogAdd(betaScore[alphaId_0 + 1], betaScore[alphaId_0 + 2]); totalScore[uttId] = betaScore[alphaId_0]; } } } // Calculate derivative, equation (15) in http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf // See _assignAlphaScore for the explanation of parameters template<class ElemType> void _assignCTCScore( ElemType *CTCscore, ElemType *prob, ElemType *alphaScore, ElemType *betaScore, ElemType *phoneSeq, const size_t uttNum, const std::vector<size_t>& uttToChanInd, const std::vector<size_t>& uttBeginFrame, const std::vector<size_t>& uttPhoneNum, const std::vector<size_t>& uttFrameNum, const size_t numChannels, const size_t maxPhoneNum, const size_t totalPhoneNum) { for (size_t uttId = 0;uttId < uttNum;uttId++) { #pragma omp parallel for for (int t = 0; t < uttFrameNum[uttId]; t++) { size_t phoneNum = uttPhoneNum[uttId]; size_t alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum; size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId]; ElemType P_lx = betaScore[alphaId_0]; for (int s = 1; s < phoneNum - 1; s++) { long phoneId = phoneSeq[uttId*maxPhoneNum + s]; size_t alphaId = maxPhoneNum* timeId + s; size_t probId = timeId*totalPhoneNum + phoneId; if (phoneId != SIZE_MAX) { ElemType logoccu = alphaScore[alphaId] + betaScore[alphaId] - prob[probId] - (ElemType)P_lx; CTCscore[probId] = LogAdd(CTCscore[probId], logoccu); } } for (int s = 0; s < totalPhoneNum; s++) { size_t probId = timeId*totalPhoneNum + s; ElemType logoccu = CTCscore[probId]; if (logoccu < LZERO) CTCscore[probId] = 0.0f; else CTCscore[probId] = exp(logoccu); } } } } template<class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCTCScore( const CPUMatrix<ElemType>& prob, CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta, const CPUMatrix<ElemType>& phoneSeq, const CPUMatrix<ElemType>& phoneBoundary, ElemType &totalScore, const std::vector<size_t>& uttToChanInd, const std::vector<size_t> & uttBeginFrame, const std::vector<size_t> & uttFrameNum, const std::vector<size_t> & uttPhoneNum, const size_t numParallelSequences, const size_t maxFrameNum, const size_t blankTokenId, const int delayConstraint, const bool isColWise) { // Column wise representation of sequences in input matrices (each column is one sequence/utterance) if (isColWise) { // Total number of phones size_t totalPhoneNum = prob.GetNumRows(); size_t uttNum = uttFrameNum.size(); // Max number of phones in utterances in this minibatch size_t maxPhoneNum = phoneSeq.GetNumRows(); for (size_t t = 0; t < maxFrameNum; t++) { _assignAlphaScore(prob.Data(), alpha.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd, uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint); } for (LONG64 t = maxFrameNum - 1; t >= 0; t--) { _assignBetaScore(prob.Data(), beta.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd, uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint); } std::vector<ElemType> scores(uttNum); _assignTotalScore(beta.Data(), scores, uttNum, uttToChanInd, uttBeginFrame, numParallelSequences, maxPhoneNum); _assignCTCScore(Data(), prob.Data(), alpha.Data(), beta.Data(), phoneSeq.Data(), uttNum, uttToChanInd, uttBeginFrame, uttPhoneNum, uttFrameNum, numParallelSequences, maxPhoneNum, totalPhoneNum); for (size_t utt = 0; utt < uttNum; utt++) { totalScore += scores[utt]; } return *this; } else { LogicError("Only ColWise minibatch layout is supported."); } return *this; } /// the kernel function for RCRF backward computation template <class ElemType> void CPUMatrix<ElemType>::_rcrfBackwardCompute(size_t t, size_t k, const CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta, const CPUMatrix<ElemType>& pair_scores) { size_t iNumLab = alpha.GetNumRows(); size_t iNumPos = alpha.GetNumCols(); ElemType fSum; ElemType fTmp = (ElemType) LZERO; if (t == iNumPos - 1) { fSum = (ElemType) LZERO; for (int j = 0; j < iNumLab; j++) { fSum = (ElemType) LogAddD(fSum, alpha(j, t)); } fTmp = alpha(k, t) - fSum; beta(k, t) = fTmp; } else { for (int j = 0; j < iNumLab; j++) { fSum = (ElemType) LZERO; for (int m = 0; m < iNumLab; m++) { fSum = (ElemType) LogAddD(fSum, alpha(m, t) + pair_scores(j, m)); } fTmp = (ElemType) LogAddD(fTmp, beta(j, t + 1) + alpha(k, t) + pair_scores(j, k) - fSum); } beta(k, t) = fTmp; } } template <class ElemType> void CPUMatrix<ElemType>::RCRFTransGrdCompute(const CPUMatrix<ElemType>& lbls, const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& beta, const CPUMatrix<ElemType>& pair_scores, CPUMatrix<ElemType>& grd) { int iNumPos = (int) alpha.GetNumCols(); int iNumLab = (int) alpha.GetNumRows(); int firstLbl = -1; for (int ik = 0; ik < lbls.GetNumRows(); ik++) if (lbls(ik, 0) != 0) { firstLbl = ik; break; } for (size_t tPos = 0; tPos < iNumPos; tPos++) { CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1); CPUMatrix<ElemType> a; if (tPos > 0) a = alpha.ColumnSlice(tPos - 1, 1); #pragma omp parallel for for (int i = 0; i < iNumLab; i++) { _rcrfTransGrdCompute(i, lbls, alpha, beta, pair_scores, grd, tPos); } // transition score int i = -1; if (tPos == 0) i = firstLbl; else { for (int ik = 0; ik < lbls.GetNumRows(); ik++) if (lbls(ik, tPos - 1) != 0) { i = ik; break; } } int j = -1; for (int ik = 0; ik < lbls.GetNumRows(); ik++) { if (lbls(ik, tPos) != 0) { j = ik; break; } } grd(j, i) -= 1.0; } }; template <class ElemType> void CPUMatrix<ElemType>::_rcrfTransGrdCompute(size_t i, const CPUMatrix<ElemType>& lbls, const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& beta, const CPUMatrix<ElemType>& pair_scores, CPUMatrix<ElemType>& grd, const size_t tPos // position ) { int iNumLab = (int) alpha.GetNumRows(); int firstLbl = -1; for (int ik = 0; ik < lbls.GetNumRows(); ik++) if (lbls(ik, 0) != 0) { firstLbl = ik; break; } CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1); CPUMatrix<ElemType> a; if (tPos > 0) a = alpha.ColumnSlice(tPos - 1, 1); { ElemType fTmp = (ElemType) LZERO; for (int j = 0; j < iNumLab; j++) { if (tPos == 0) { if (i == firstLbl) { fTmp = 0; } else { fTmp = (ElemType) LZERO; } } else { fTmp = a(i, 0); } fTmp += pair_scores(j, i); ElemType fSum = (ElemType) LZERO; for (int k = 0; k < iNumLab; k++) { ElemType fTmp2; if (tPos == 0) { if (k == firstLbl) { fTmp2 = 0; } else { fTmp2 = (ElemType) LZERO; } } else { fTmp2 = a(k, 0); } fSum = (ElemType) LogAddD(fSum, fTmp2 + pair_scores(j, k)); } fTmp -= fSum; fTmp += b(j, 0); grd(j, i) += exp(fTmp); } } }; template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::DropFrame(const CPUMatrix<ElemType>& label, const CPUMatrix<ElemType>& gamma, const ElemType& threshhold) { auto& us = *this; if (us.GetNumCols() != gamma.GetNumCols() || us.GetNumRows() != gamma.GetNumRows()) LogicError("DropFrame: target matrix is not in the same size as gamm matrix."); #pragma omp parallel for foreach_column (j, label) { bool dropframe = false; foreach_row (i, label) { if (fabs(label(i, j) - 1.0f) < 0.1) { if (gamma(i, j) < threshhold) dropframe = true; break; } } foreach_row (i, label) { us(i, j) = 0.0f; } } return *this; } template <class ElemType> CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSequenceError(const ElemType hsmoothingWeight, const CPUMatrix<ElemType>& label, const CPUMatrix<ElemType>& dnnoutput, const CPUMatrix<ElemType>& gamma, ElemType alpha) { auto& us = *this; foreach_coord (i, j, us) us(i, j) += alpha * (label(i, j) - (1 - hsmoothingWeight) * dnnoutput(i, j) - hsmoothingWeight * gamma(i, j)); return *this; } // note: this function does not depend on the <ElemType> parameter template <class ElemType> int CPUMatrix<ElemType>::SetNumThreads(int numThreads) { if (numThreads == 0) // use default return numThreads; int mthreads = (int) std::thread::hardware_concurrency(); if (numThreads <= 0) numThreads = std::max(1, mthreads + numThreads); if (numThreads > mthreads) numThreads = mthreads; #ifdef _OPENMP omp_set_num_threads(numThreads); numThreads = omp_get_max_threads(); #ifdef USE_MKL mkl_set_num_threads(numThreads); #elif defined(USE_OPENBLAS) openblas_set_num_threads(numThreads); #endif #endif return numThreads; } template <class ElemType> int CPUMatrix<ElemType>::GetMaxNumThreads() { int numThreads = (int)std::thread::hardware_concurrency(); #ifdef _OPENMP numThreads = omp_get_max_threads(); #endif return numThreads; } // To ensure Intel MKL calls return the same results on all Intel or Intel compatible CPUs, // the function set CBWR compatible mode. template <class ElemType> void CPUMatrix<ElemType>::SetCompatibleMode() { #ifdef USE_MKL if (mkl_cbwr_set(MKL_CBWR_COMPATIBLE) != MKL_CBWR_SUCCESS) RuntimeError("Could not set MKL compatible mode."); #endif } // ======================================================================= // TensorView support // ======================================================================= // To save time, this makes extensive use of templates and macros. // ----------------------------------------------------------------------- // function to compute the value for a given output location (perform reduction if needed) // ----------------------------------------------------------------------- // perform loop over reduction index m // This function is declared inside a wrapper struct to allow partial specialization (m = -1). template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int m> struct TensorOpReduction { // reduction case (non-reduction case is specialized) static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled strides[i] = reducingStrides[i][(size_t) m]; double aggregate = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides); for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;) { // advance the pointers for (size_t i = 0; i < N - 1; i++) pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here // need to descend into one loop deeper aggregate = reductionOp(aggregate, TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides)); } // Actually it would be nicer to return double but we keep ElementType so that test don't return different numbers than previous implementation. return static_cast<double>(aggregate); } }; // perform loop over reduction index m // This is the specialized version for m = -1, which terminates the recursion. template <class ElemType, typename OPFN, typename ReductionOp, size_t N> struct TensorOpReduction<ElemType, OPFN, ReductionOp, N, -1> { static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&) { return opfn(pointers); // finally we are doing some work!!! } }; // perform loop over reduction index m, while keeping track of the number of elements and their corresponding indices. // This function is declared inside a wrapper struct to allow partial specialization (m = -1). template <class ElemType, size_t N, int m> struct TensorArgOpReduction { static inline std::pair<ElemType, size_t> ReduceAll(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp) { size_t counter = 0; size_t index = 0; ElemType val = (ElemType)0; switch (reducingOpDims.size()) { case 3: val = TensorArgOpReduction<ElemType, N, 2>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; case 2: val = TensorArgOpReduction<ElemType, N, 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; case 1: val = TensorArgOpReduction<ElemType, N, 0>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; case 0: val = TensorArgOpReduction<ElemType, N, -1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); break; default: LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)reducingOpDims.size()); } return make_pair(val, index); } // reduction case (non-reduction case is specialized) static inline ElemType Loop(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp, size_t& counter, size_t& index) { array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled strides[i] = reducingStrides[i][(size_t)m]; ElemType aggregate = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;) { // advance the pointers for (size_t i = 0; i < N - 1; i++) pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here ElemType val = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index); bool update = false; switch (reductionOp) { case ElementWiseOperator::opArgmin: update = (aggregate > val); break; case ElementWiseOperator::opArgmax: update = (aggregate < val); break; } if (update) { aggregate = val; index = counter - 1; } } return aggregate; } }; // perform loop over reduction index m // This is the specialized version for m = -1, which terminates the recursion. template <class ElemType, size_t N> struct TensorArgOpReduction<ElemType, N, -1> { static inline ElemType Loop(array<ElemType*, N> pointers, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, ElementWiseOperator reductionOp, size_t& counter, size_t& index) { counter++; return *pointers[0]; // finally we are doing some work!!! } }; // ----------------------------------------------------------------------- // perform loop over regular index k for N-nary operations (N counting the output) // ----------------------------------------------------------------------- // perform loop over regular index k and reducing index m for N operands (counting the output) template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m, int k> struct TensorOpIteration { static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { // non-scalar case: still nested result loops left array<ptrdiff_t, N> strides; for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled strides[i] = regularStrides[i][(size_t) k]; for (size_t dim = regularOpDims[(size_t) k]; dim-- > 0;) { // need to descend into one loop deeper TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, k - 1>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); // advance the pointers for (size_t i = 0; i < N; i++) pointers[i] += strides[i]; } } }; // Special version for innermost loop with strides all being 1 and no further reduction. Compiler can use SSE. // This is a very common case, e.g. adding vectors or computing the Sigmoid. template <class ElemType, typename OPFN, typename ReductionOp> struct TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/> { static inline void Loop(ElemType beta, array<ElemType*, 3> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides) { ElemType* pa = pointers[0]; ElemType* pb = pointers[1]; ElemType* pc = pointers[2]; size_t K = regularOpDims[0]; // special-case beta and alpha to allow the compiler to short-circuit it if (beta != 0) #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else if (alpha != 1) #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); // TODO: According to Amit, the VS compiler is not able to vectorize into lambdas. Solution: change the lambda to take an N, or to implement the loop inside (with 1 element by default). // TODO: The signedness of k (required for omp) causes an extra sign-extend. // TODO: OMP adds LOTS of overhead. Do we need a guard, a min size when to use it? } }; // and unary template <class ElemType, typename OPFN, typename ReductionOp> struct TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/> { static inline void Loop(ElemType beta, array<ElemType*, 2> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides) { ElemType* pa = pointers[0]; ElemType* pb = pointers[1]; size_t K = regularOpDims[0]; // special-case beta and alpha to allow the compiler to short-circuit it if (beta != 0) #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else if (alpha != 1) #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else #pragma omp parallel for for (int k = 0; k < (int) K; k++) TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); } }; template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m> struct TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, -1> { static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { // we are at element level for the result: perform the op (there may still be reduction) ElemType val = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides); // scale val *= alpha; // combine with previous value in target matrix, then write it out auto* pout = pointers.back(); if (beta != 0) val += beta * *pout; // save *pout = val; return; } }; // perform loop over regular index k and reducing index m for N operands (counting the output), the difference // between TensorOpIteration and TensorArgOpIteration, is that the latter store the index of the result, instead of // the result. The reason that they aren't combined is because of performance. template <class ElemType, size_t N, int k> struct TensorArgOpIteration { static inline void Loop(array<ElemType*, N> pointers, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp) { // non-scalar case: still nested result loops left array<ptrdiff_t, N> strides; for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled strides[i] = regularStrides[i][(size_t)k]; for (size_t dim = regularOpDims[(size_t)k]; dim-- > 0;) { // need to descend into one loop deeper TensorArgOpIteration<ElemType, N, k - 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); // advance the pointers for (size_t i = 0; i < N; i++) pointers[i] += strides[i]; } } }; template <class ElemType, size_t N> struct TensorArgOpIteration<ElemType, N, -1> { static inline void Loop(array<ElemType*, N> pointers, const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp) { // we are at element level for the result: perform the op (there may still be reduction) auto val = TensorArgOpReduction<ElemType, N, 2>::ReduceAll(pointers, reducingOpDims, reducingStrides, reductionOp); auto* pout = pointers.back(); *pout = (ElemType)val.second; return; } }; // ----------------------------------------------------------------------- // map runtime parameters N to template parameters // ----------------------------------------------------------------------- // tensor operation with k+1 dimensions (-1 means scalar) template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int k> static void TensorOpWithRegularLoop(ElemType beta, const array<ElemType*, N>& pointers, ElemType alpha, const OPFN& opfn, ReductionOp reductionOp, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { size_t dims = reducingOpDims.size(); switch (dims) { case 2: return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 1: return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 0, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 0: { // if all leading dimensions are 1, we can let the compiler do some unrolling bool leadingAllOne = true; for (size_t i = 0; i < N; i++) leadingAllOne &= k >= 0 && regularStrides[i][0] == 1; if (leadingAllOne) // special version that uses a hard-coded increment of 1 for all leading dimensions return TensorOpIteration<ElemType, OPFN, ReductionOp, N, true /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); else return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); } default: LogicError("TensorOp: %d non-flattened reduction dimensions are not supported.", (int) dims); } } // tensor operation, generalized in number of arguments, operation already provided as a lambda // This function now expands into different k. template <class ElemType, typename OPFN, typename ReductionOp, size_t N> static void TensorOpWithFnAndReduction(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp, const array<size_t, N>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled pointers[i] += offsets[i]; size_t dims = regularOpDims.size(); switch (dims) { case 4: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 3>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 3: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 2>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 2: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 1: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 0>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); case 0: return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, -1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides); default: LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)dims); } } // tensor operation, generalized in number of arguments, operation already provided as a lambda // This function now expands into different reductionOps template <class ElemType, typename OPFN, size_t N> static void TensorOpWithFn(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, ElementWiseOperator reductionOp, const array<size_t, N>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides) { // BUGBUG: Using always 'double' as type of aggregator even for ElemType==float. Reason: otherwise some e2e test would fail as historically we // used double for aggregator of sum. But: // * for min and max reductions this is meaningless. // * It is not consitent with what we do on GPU, there we aggregate on ElemType. // * It costs performance. // TODO: apdapt e2e tests to run with aggregator of type ElemType. #define CaseTensorOpWithFnAndReduction(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFnAndReduction(beta, pointers, alpha, opfn, [](double a, double b) \ { \ return Op##oper(a, b); \ }, \ offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) switch (reductionOp) { CaseTensorOpWithFnAndReduction(Sum); CaseTensorOpWithFnAndReduction(LogSum); CaseTensorOpWithFnAndReduction(Min); CaseTensorOpWithFnAndReduction(Max); CaseTensorOpWithFnAndReduction(ElementwiseProduct); default: LogicError("Specified ElementWiseOperator op %d not suported as reduction operation.", (int)reductionOp); } } // ----------------------------------------------------------------------- // entry points from Matrix.cpp; also map op to a lambda // ----------------------------------------------------------------------- // perform unary operation 'op' on a giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 2>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides) { if (reductionOp != ElementWiseOperator::opSum && reductionOp != ElementWiseOperator::opLogSum && reductionOp != ElementWiseOperator::opMin && reductionOp != ElementWiseOperator::opMax && reductionOp != ElementWiseOperator::opElementwiseProduct) InvalidArgument("TensorOp: Unary reduction operations other than opMax, opMin, opSum, and opLogSum are not implemented."); // TODO: Change the lambda to take a pointer and a number of elements, so that we can pass it 1 or 4 elements, in order for it to SSE-vectorize. #define CaseUnaryTensorOp(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 2>& pp) \ { \ return Op##oper((*(pp[0]))); \ }, \ reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) array<ElemType*, 2> pointers = {a.Data(), Data()}; switch (op) { ForAllUnaryOps(CaseUnaryTensorOp); default: LogicError("TensorOp: Unknown unary op code %d.", (int) op); } } // perform binary operation 'op' on a and b giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 3>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides) { if (reductionOp != ElementWiseOperator::opSum) InvalidArgument("TensorOp (binary): The only permitted binary reduction operation is opSum."); #define CaseBinaryTensorOp(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 3>& pp) \ { \ return Op##oper((*(pp[0])), (*(pp[1]))); \ }, \ reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) array<ElemType*, 3> pointers = {a.Data(), b.Data(), Data()}; switch (op) { ForAllBinaryOps(CaseBinaryTensorOp); default: LogicError("TensorOp: Unknown op binary code %d.", (int) op); } } // perform ternary operation 'op' on a, and c giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides // This maps 'op' to a lambda. template <class ElemType> void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp, const array<size_t, 4>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides) { if (reductionOp != ElementWiseOperator::opSum) InvalidArgument("TensorOp: The only permitted ternary reduction operation is opSum."); #define CaseTernaryTensorOp(oper) \ case ElementWiseOperator::op##oper: \ return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 4>& pp) \ { \ return Op##oper((*(pp[0])), (*(pp[1])), (*(pp[2]))); \ }, \ reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides) array<ElemType*, 4> pointers = {a.Data(), b.Data(), c.Data(), Data()}; switch (op) { ForAllTernaryOps(CaseTernaryTensorOp); default: LogicError("TensorOp: Unknown ternary op code %d.", (int) op); } } template <class ElemType> int CPUMatrix<ElemType>::Argmin() const { int minArg = -1; ElemType minValue = std::numeric_limits<ElemType>::max(); #pragma omp parallel { int localMinArg = -1; ElemType localMinValue = std::numeric_limits<ElemType>::max(); #pragma omp for for (int index = 0; index < (int)GetNumElements(); ++index) { if (localMinValue > Data()[index]) { localMinArg = index; localMinValue = Data()[index]; } // If we have more then one min value, select the one with lower index. else if ((localMinValue == Data()[index]) && (localMinArg > index)) { localMinArg = index; } } #pragma omp critical { if (minValue > localMinValue) { minArg = localMinArg; minValue = localMinValue; } // If we have more then one min value, select the one with lower index. else if ((minValue == localMinValue) && (minArg > localMinArg)) { minArg = localMinArg; } } } return minArg; } template <class ElemType> int CPUMatrix<ElemType>::Argmax() const { int maxArg = -1; ElemType maxValue = std::numeric_limits<ElemType>::min(); #pragma omp parallel { int localMaxArg = -1; ElemType localMaxValue = std::numeric_limits<ElemType>::min(); #pragma omp for for (int index = 0; index < (int)GetNumElements(); ++index) { if (localMaxValue < Data()[index]) { localMaxArg = index; localMaxValue = Data()[index]; } // If we have more then one max value, select the one with lower index. else if ((localMaxValue == Data()[index]) && (localMaxArg > index)) { localMaxArg = index; } } #pragma omp critical { if (maxValue < localMaxValue) { maxArg = localMaxArg; maxValue = localMaxValue; } // If we have more then one max value, select the one with lower index. else if ((maxValue == localMaxValue) && (maxArg > localMaxArg)) { maxArg = localMaxArg; } } } return maxArg; } template <class ElemType> int CPUMatrix<ElemType>::ArgOp(ElementWiseOperator reductionOp) const { switch (reductionOp) { case ElementWiseOperator::opArgmin: return Argmin(); break; case ElementWiseOperator::opArgmax: return Argmax(); break; } InvalidArgument("ArgOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented."); return -1; } template <class ElemType> void CPUMatrix<ElemType>::TensorArgOp(const CPUMatrix<ElemType>& a, ElementWiseOperator reductionOp, const array<size_t, 2>& offsets, const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides) { if (reductionOp != ElementWiseOperator::opArgmin && reductionOp != ElementWiseOperator::opArgmax) InvalidArgument("TensorOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented."); if (GetNumElements() == 1) { Data()[0] = (ElemType) a.ArgOp(reductionOp); } else { const size_t N = 2; array<ElemType*, N> pointers = { a.Data(), Data() }; for (size_t i = 0; i < N; i++) pointers[i] += offsets[i]; switch (regularOpDims.size()) { case 2: TensorArgOpIteration<ElemType, N, 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); break; case 1: TensorArgOpIteration<ElemType, N, 0>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); break; case 0: TensorArgOpIteration<ElemType, N, -1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp); break; default: LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)regularOpDims.size()); } } } // We use Matrix<char> as the backing store for QuantizedMatrix // Let's explicitly instantiate the methods we need for that purpose template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols); template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols, char* pArray, const size_t matrixFlags); template CPUMatrix<char>::CPUMatrix(); template CPUMatrix<char>::CPUMatrix(CPUMatrix<char> const&); template CPUMatrix<char>::CPUMatrix(CPUMatrix<char>&&); template size_t CPUMatrix<char>::LocateElement(size_t, size_t) const; template CPUMatrix<char> CPUMatrix<char>::ColumnSlice(size_t startColumn, size_t numCols) const; template CPUMatrix<char>& CPUMatrix<char>::operator=(CPUMatrix<char>&&); template void CPUMatrix<char>::SetValue(const char); template void CPUMatrix<char>::SetValue(const size_t numRows, const size_t numCols, char* pArray, size_t matrixFlags); template void CPUMatrix<char>::SetValue(CPUMatrix<char> const&); //template void CPUMatrix<char>::SetValue(GPUMatrix<char> const&); //template void CPUMatrix<char>::SetValue(CPUSparseMatrix<char> const&); //template void CPUMatrix<char>::SetValue(GPUSparseMatrix<char> const&); template void CPUMatrix<char>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly); template void CPUMatrix<char>::Resize(const size_t numRows, const size_t numCols, bool growOnly); template char* CPUMatrix<char>::CopyToArray(void) const; template void CPUMatrix<char>::CopySection(size_t numRows, size_t numCols, char* dst, size_t colStride) const; template void CPUMatrix<char>::Reshape(const size_t, const size_t); // Support <short> template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols); template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols, short* pArray, const size_t matrixFlags); template CPUMatrix<short>::CPUMatrix(); template CPUMatrix<short>::CPUMatrix(CPUMatrix<short> const&); template CPUMatrix<short>::CPUMatrix(CPUMatrix<short>&&); template size_t CPUMatrix<short>::LocateElement(size_t, size_t) const; template CPUMatrix<short> CPUMatrix<short>::ColumnSlice(size_t startColumn, size_t numCols) const; template CPUMatrix<short>& CPUMatrix<short>::operator=(CPUMatrix<short>&&); template void CPUMatrix<short>::SetValue(const short); template void CPUMatrix<short>::SetValue(const size_t numRows, const size_t numCols, short* pArray, size_t matrixFlags); template void CPUMatrix<short>::SetValue(CPUMatrix<short> const&); //template void CPUMatrix<short>::SetValue(GPUMatrix<short> const&); //template void CPUMatrix<short>::SetValue(CPUSparseMatrix<short> const&); //template void CPUMatrix<short>::SetValue(GPUSparseMatrix<short> const&); template void CPUMatrix<short>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly); template void CPUMatrix<short>::Resize(const size_t numRows, const size_t numCols, bool growOnly); template short* CPUMatrix<short>::CopyToArray(void) const; template void CPUMatrix<short>::CopySection(size_t numRows, size_t numCols, short* dst, size_t colStride) const; template void CPUMatrix<short>::Reshape(const size_t, const size_t); template CPUMatrix<int>::CPUMatrix(const size_t, const size_t, int*, const size_t); }}}
{ "alphanum_fraction": 0.5698053256, "avg_line_length": 36.0776685588, "ext": "h", "hexsha": "aae1cae785d1ab712567c8bc79a39c7fa2615dac", "lang": "C", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2022-01-06T08:41:32.000Z", "max_forks_repo_forks_event_min_datetime": "2019-08-23T11:42:14.000Z", "max_forks_repo_head_hexsha": "894d9e1a5d65d30cd33803c06a988844bb87fcb7", "max_forks_repo_licenses": [ "RSA-MD" ], "max_forks_repo_name": "vschs007/CNTK", "max_forks_repo_path": "Source/Math/CPUMatrixImpl.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "894d9e1a5d65d30cd33803c06a988844bb87fcb7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "RSA-MD" ], "max_issues_repo_name": "vschs007/CNTK", "max_issues_repo_path": "Source/Math/CPUMatrixImpl.h", "max_line_length": 272, "max_stars_count": 5, "max_stars_repo_head_hexsha": "894d9e1a5d65d30cd33803c06a988844bb87fcb7", "max_stars_repo_licenses": [ "RSA-MD" ], "max_stars_repo_name": "vschs007/CNTK", "max_stars_repo_path": "Source/Math/CPUMatrixImpl.h", "max_stars_repo_stars_event_max_datetime": "2021-04-20T21:12:52.000Z", "max_stars_repo_stars_event_min_datetime": "2017-08-28T08:27:18.000Z", "num_tokens": 70645, "size": 260589 }
#pragma once #include <gsl/span> /** * \brief Class for managing Bluetooth state. */ class Bluetooth { public: /** * \brief Disconnects a device with matching MAC address from * the first Bluetooth radio it is connected to. * \param macAddress The MAC address to search for. * \return \c true on success. */ static bool disconnectDevice(const gsl::span<uint8_t>& macAddress); };
{ "alphanum_fraction": 0.7081218274, "avg_line_length": 21.8888888889, "ext": "h", "hexsha": "ecdeacd520a0d31ee08aecfc0223570228cc45aa", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "4d2ddc15b7db7cc5618c8676c91cf81614921c3c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "SonicFreak94/ds4wizard", "max_forks_repo_path": "ds4wizard-cpp/Bluetooth.h", "max_issues_count": 1, "max_issues_repo_head_hexsha": "4d2ddc15b7db7cc5618c8676c91cf81614921c3c", "max_issues_repo_issues_event_max_datetime": "2020-06-30T04:00:38.000Z", "max_issues_repo_issues_event_min_datetime": "2020-01-29T20:34:26.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "SonicFreak94/ds4wizard", "max_issues_repo_path": "ds4wizard-cpp/Bluetooth.h", "max_line_length": 68, "max_stars_count": 7, "max_stars_repo_head_hexsha": "4d2ddc15b7db7cc5618c8676c91cf81614921c3c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "SonicFreak94/ds4wizard", "max_stars_repo_path": "ds4wizard-cpp/Bluetooth.h", "max_stars_repo_stars_event_max_datetime": "2021-11-13T08:35:31.000Z", "max_stars_repo_stars_event_min_datetime": "2019-02-27T19:23:34.000Z", "num_tokens": 94, "size": 394 }
/* MIT License Copyright (c) 2020 Huy Vo Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef PACMENSL_UTIL_H #define PACMENSL_UTIL_H #define ARMA_DONT_PRINT_ERRORS #define ARMA_DONT_USE_WRAPPER #include <armadillo> #include <petscmat.h> #include <petscvec.h> #include <petscao.h> #include <petscis.h> #include <petscistypes.h> #include <petscoptions.h> #include <petsc.h> #include <petscsys.h> #include <petscconf.h> #include <cassert> #include <memory> #include <mpi.h> #include <zoltan.h> #include <parmetis.h> #include <string> #include <sstream> #include "ErrorHandling.h" #define NOT_COPYABLE_NOT_MOVABLE(object)\ object( const object & ) = delete;\ object &operator=( const object & ) = delete;\ namespace pacmensl { #define SQR1 sqrt(0.1e0) /*! Round to 2 significant digits */ double round2digit(double x); /*! Initialize and finalize Parallel context * */ int PACMENSLInit(int *argc, char ***argv, const char *help); int PACMENSLFinalize(); void sequential_action(MPI_Comm comm, std::function<void(void *)> action, void *data); class Environment { public: Environment(); Environment(int *argc, char ***argv, const char *help); ~Environment(); private: bool initialized = false; bool init_petsc = false; // If PETSc or MPI were already set, we do not meddle with them bool init_mpi = false; }; } #endif
{ "alphanum_fraction": 0.7533020878, "avg_line_length": 28.2771084337, "ext": "h", "hexsha": "cee8004f1d47891a82f68449491ad8059afab254", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "d35adb165caef5c8fa992be6fda16e1bfb1dfd4a", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "voduchuy/pacmensl", "max_forks_repo_path": "src/Sys/Sys.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "d35adb165caef5c8fa992be6fda16e1bfb1dfd4a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "voduchuy/pacmensl", "max_issues_repo_path": "src/Sys/Sys.h", "max_line_length": 90, "max_stars_count": null, "max_stars_repo_head_hexsha": "d35adb165caef5c8fa992be6fda16e1bfb1dfd4a", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "voduchuy/pacmensl", "max_stars_repo_path": "src/Sys/Sys.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 554, "size": 2347 }
/// \file energy.c /// \brief Sort and fill energy levels to obtain occupancy of each level. // // Copyright (c) 2014, Christian B. Mendl // All rights reserved. // http://christian.mendl.net // // This program is free software; you can redistribute it and/or // modify it under the terms of the Simplified BSD License // http://www.opensource.org/licenses/bsd-license.php // // Reference: // Christian B. Mendl, Francesc Malet, Paola Gori-Giorgi // Wigner localization in quantum dots from Kohn-Sham density functional theory without symmetry breaking // Physical Review B 89, 125106 (2014) // (preprint http://arxiv.org/abs/1311.6011) //_______________________________________________________________________________________________________________________ // #include "energy.h" #include <stdlib.h> #include <assert.h> #include <gsl/gsl_roots.h> #include <gsl/gsl_errno.h> /// \brief Allocate memory for energy levels structure void AllocateEnergyLevels(const int m_max, const int num, en_levels_t *levels) { levels->m_max = m_max; levels->num = num; levels->en = malloc((m_max+1)*num * sizeof(double)); } /// \brief Free memory of energy levels structure void FreeEnergyLevels(en_levels_t *levels) { free(levels->en); levels->num = 0; levels->m_max = 0; } //_______________________________________________________________________________________________________________________ // /// \brief Comparison function used for sorting static int en_occupancy_cmp(const void *e1, const void *e2) { const en_occupancy_t *a = (en_occupancy_t *)e1; const en_occupancy_t *b = (en_occupancy_t *)e2; // sort according to energy if (a->en < b->en) { return -1; } else if (a->en > b->en) { return 1; } // should actually never reach this point // compare 'm' quantum numbers if (a->m < b->m) { return -1; } else if (a->m > b->m) { return 1; } // compare energy quantum numbers if (a->n < b->n) { return -1; } else if (a->n > b->n) { return 1; } return 0; } /// \brief Get the maximum occupancy of an energy level, depending on 'm' quantum number and whether state is spin-polarized static inline int GetMaxOccupancy(const int m, const bool spinpol) { int occ = 1; if (m != 0) { occ *= 2; } if (!spinpol) { occ *= 2; } return occ; } //_______________________________________________________________________________________________________________________ // /// \brief Fermi-Dirac distribution function static double FermiDirac(const double kBT, const double mu, const double en) { return 1 / (exp((en - mu)/kBT) + 1); } /// \brief GSL parameters for NumParticleDifference() typedef struct { int nelec; //!< physical number of electrons int nlevels; //!< number of energy levels double *en; //!< energies int *occ; //!< maximum occupancy of each energy level double kBT; //!< k_B T (Boltzmann constant times temperature) } gsl_FD_params_t; /// \brief Calculate difference in number of particles given a Fermi-Dirac distribution with chemical potential 'mu' static double NumParticleDifference(double mu, void *p) { int i; const gsl_FD_params_t *params = (gsl_FD_params_t *)p; double N = 0; for (i = 0; i < params->nlevels; i++) { N += params->occ[i] * FermiDirac(params->kBT, mu, params->en[i]); } return N - params->nelec; } //_______________________________________________________________________________________________________________________ /// /// \brief Calculate chemical potential 'mu' such that FermiDirac distribution gives exactly 'nelec' electrons /// static double CalculateChemicalPotential(const en_levels_t *levels, const int nelec, const bool spinpol, const double kBT) { int m, n, i; const int maxiter = 128; gsl_FD_params_t params; params.nelec = nelec; params.nlevels = (levels->m_max+1)*levels->num; params.en = malloc(params.nlevels * sizeof(double)); params.occ = malloc(params.nlevels * sizeof(int)); params.kBT = kBT; // fill energy levels and occupancies for (m = 0; m <= levels->m_max; m++) { for (n = 0; n < levels->num; n++) { params.en [n + m*levels->num] = levels->en[n + m*levels->num]; params.occ[n + m*levels->num] = GetMaxOccupancy(m, spinpol); } } // first interval used in interval bisection method double mu_lo = 0.0; double mu_hi = levels->en[(levels->m_max+1)*levels->num-1]; // use "last" energy level as upper bound gsl_function F; F.function = &NumParticleDifference; F.params = &params; const gsl_root_fsolver_type *T = gsl_root_fsolver_brent; gsl_root_fsolver *s = gsl_root_fsolver_alloc(T); gsl_root_fsolver_set(s, &F, mu_lo, mu_hi); double mu = 0; int status = GSL_CONTINUE; for (i = 0; i < maxiter && status == GSL_CONTINUE; i++) { status = gsl_root_fsolver_iterate(s); if (status != GSL_SUCCESS) { fprintf(stderr, "CalculateOccupancy() warning: gsl_root_fsolver_iterate() returned with status code %i\n", status); } mu = gsl_root_fsolver_root(s); mu_lo = gsl_root_fsolver_x_lower(s); mu_hi = gsl_root_fsolver_x_upper(s); status = gsl_root_test_interval(mu_lo, mu_hi, 0, 1e-8); } if (status != GSL_SUCCESS) { fprintf(stderr, "CalculateOccupancy() warning: not converged after %i iterations, status: %i\n", maxiter, status); } // clean up free(params.occ); free(params.en); gsl_root_fsolver_free(s); return mu; } //_______________________________________________________________________________________________________________________ /// /// \brief Calculate occupancy of energy levels /// void CalculateOccupancy(const en_levels_t *levels, const int nelec, const bool spinpol, const double kBT, en_occlist_t *occlist) { int i, m, n; // accumulate all energies into one list occlist->occupancy = malloc((levels->m_max+1)*levels->num * sizeof(en_occupancy_t)); if (kBT == 0) // zero temperature { for (m = 0; m <= levels->m_max; m++) { for (n = 0; n < levels->num; n++) { en_occupancy_t *encur = &occlist->occupancy[n + m*levels->num]; encur->en = levels->en[n + m*levels->num]; encur->m = m; encur->n = n; encur->occ = 0; // initially set to zero } } // sort energies qsort(occlist->occupancy, (levels->m_max+1)*levels->num, sizeof(en_occupancy_t), en_occupancy_cmp); // fill up energy levels en_occupancy_t *encur = occlist->occupancy; occlist->length = 1; for (i = 0; i < nelec; i++) { if (encur->occ >= GetMaxOccupancy(encur->m, spinpol)) { encur++; // use next level occlist->length++; } encur->occ++; } } else // k_B T > 0 { assert(kBT > 0); const double mu = CalculateChemicalPotential(levels, nelec, spinpol, kBT); // all energy levels have nonzero occupancy since Fermi-Dirac function is strictly positive occlist->length = (levels->m_max+1)*levels->num; for (m = 0; m <= levels->m_max; m++) { for (n = 0; n < levels->num; n++) { en_occupancy_t *encur = &occlist->occupancy[n + m*levels->num]; encur->en = levels->en[n + m*levels->num]; encur->m = m; encur->n = n; encur->occ = GetMaxOccupancy(m, spinpol) * FermiDirac(kBT, mu, encur->en); } } // sort energies qsort(occlist->occupancy, (levels->m_max+1)*levels->num, sizeof(en_occupancy_t), en_occupancy_cmp); } } /// \brief Free memory of energy level occupancy list void FreeOccupancy(en_occlist_t *occlist) { free(occlist->occupancy); occlist->length = 0; }
{ "alphanum_fraction": 0.6933351322, "avg_line_length": 25.3835616438, "ext": "c", "hexsha": "3b572ddebbb19e4b1a58c60db7f909ef1369a97e", "lang": "C", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-03-25T22:00:35.000Z", "max_forks_repo_forks_event_min_datetime": "2020-10-26T21:06:00.000Z", "max_forks_repo_head_hexsha": "f149e1d6684ea321b62710638f153d3dce196ca2", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "Arfaouim/quantum-dot", "max_forks_repo_path": "src/energy.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "f149e1d6684ea321b62710638f153d3dce196ca2", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "Arfaouim/quantum-dot", "max_issues_repo_path": "src/energy.c", "max_line_length": 128, "max_stars_count": 1, "max_stars_repo_head_hexsha": "f149e1d6684ea321b62710638f153d3dce196ca2", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "Arfaouim/quantum-dot", "max_stars_repo_path": "src/energy.c", "max_stars_repo_stars_event_max_datetime": "2021-06-24T13:59:40.000Z", "max_stars_repo_stars_event_min_datetime": "2021-06-24T13:59:40.000Z", "num_tokens": 2167, "size": 7412 }
/** @file */ #ifndef __CCL_HALOFIT_H_INCLUDED__ #define __CCL_HALOFIT_H_INCLUDED__ #include <gsl/gsl_spline.h> #include <gsl/gsl_spline.h> CCL_BEGIN_DECLS typedef struct halofit_struct { gsl_spline *rsigma; gsl_spline *sigma2; gsl_spline *n_eff; gsl_spline *C; gsl_spline *weff; gsl_spline *omeff; gsl_spline *deeff; } halofit_struct; /* * Allocate a new struct for storing halofit data * @param cosmo Cosmological data * @return int, status of computations */ halofit_struct* ccl_halofit_struct_new(ccl_cosmology *cosmo, int *status); /* * Free a halofit struct * @param hf, pointer to halofit struct to free */ void ccl_halofit_struct_free(halofit_struct *hf); /** * Computes the halofit non-linear power spectrum * @param cosmo: cosmology object containing parameters * @param k: wavenumber in units of Mpc^{-1} * @param a: scale factor normalised to a=1 today * @param status: Status flag: 0 if there are no errors, non-zero otherwise * @param hf: halofit splines for evaluating the power spectrum * @return halofit_matter_power: halofit power spectrum, P(k), units of Mpc^{3} */ double ccl_halofit_power(ccl_cosmology *cosmo, double k, double a, halofit_struct *hf, int *status); CCL_END_DECLS #endif
{ "alphanum_fraction": 0.7514078842, "avg_line_length": 26.4468085106, "ext": "h", "hexsha": "c09956eeea929bd3390dd4788898f57b13b7f30a", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-02-10T07:35:07.000Z", "max_forks_repo_forks_event_min_datetime": "2021-02-10T07:35:07.000Z", "max_forks_repo_head_hexsha": "3a5f9dec72c6ce602ac8b11ceed0ee6c0460a926", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "benediktdiemer/CCL", "max_forks_repo_path": "include/ccl_halofit.h", "max_issues_count": 1, "max_issues_repo_head_hexsha": "3a5f9dec72c6ce602ac8b11ceed0ee6c0460a926", "max_issues_repo_issues_event_max_datetime": "2020-07-28T12:22:35.000Z", "max_issues_repo_issues_event_min_datetime": "2020-07-28T12:22:35.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "benediktdiemer/CCL", "max_issues_repo_path": "include/ccl_halofit.h", "max_line_length": 100, "max_stars_count": null, "max_stars_repo_head_hexsha": "3a5f9dec72c6ce602ac8b11ceed0ee6c0460a926", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "benediktdiemer/CCL", "max_stars_repo_path": "include/ccl_halofit.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 373, "size": 1243 }
#pragma once #include "HttpHeaders.h" #include "Library.h" #include <gsl/span> #include <cstddef> #include <map> #include <string> namespace CesiumAsync { /** * @brief A completed response for a 3D Tiles asset. */ class CESIUMASYNC_API IAssetResponse { public: /** * @brief Default destructor */ virtual ~IAssetResponse() = default; /** * @brief Returns the HTTP response code. */ virtual uint16_t statusCode() const = 0; /** * @brief Returns the HTTP content type */ virtual std::string contentType() const = 0; /** * @brief Returns the HTTP headers of the response */ virtual const HttpHeaders& headers() const = 0; /** * @brief Returns the data of this response */ virtual gsl::span<const std::byte> data() const = 0; }; } // namespace CesiumAsync
{ "alphanum_fraction": 0.655596556, "avg_line_length": 17.6739130435, "ext": "h", "hexsha": "10519057806beb521d144c174ffdb21144ae2c85", "lang": "C", "max_forks_count": 66, "max_forks_repo_forks_event_max_datetime": "2022-03-31T13:38:41.000Z", "max_forks_repo_forks_event_min_datetime": "2021-03-30T15:14:32.000Z", "max_forks_repo_head_hexsha": "9493b9baebea601bd00d8139f2000e41ba4505ef", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "yieryi/cesium-native", "max_forks_repo_path": "CesiumAsync/include/CesiumAsync/IAssetResponse.h", "max_issues_count": 256, "max_issues_repo_head_hexsha": "9493b9baebea601bd00d8139f2000e41ba4505ef", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:44:21.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-30T18:12:28.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "yieryi/cesium-native", "max_issues_repo_path": "CesiumAsync/include/CesiumAsync/IAssetResponse.h", "max_line_length": 54, "max_stars_count": 154, "max_stars_repo_head_hexsha": "9493b9baebea601bd00d8139f2000e41ba4505ef", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "yieryi/cesium-native", "max_stars_repo_path": "CesiumAsync/include/CesiumAsync/IAssetResponse.h", "max_stars_repo_stars_event_max_datetime": "2022-03-30T00:01:43.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-30T14:08:39.000Z", "num_tokens": 208, "size": 813 }
//! \file openmc_driver.h //! Driver to initialize and run OpenMC in stages #ifndef ENRICO_OPENMC_DRIVER_H #define ENRICO_OPENMC_DRIVER_H #include "enrico/cell_instance.h" #include "enrico/geom.h" #include "enrico/neutronics_driver.h" #include "openmc/cell.h" #include "openmc/tallies/filter_cell_instance.h" #include "openmc/tallies/tally.h" #include <gsl/gsl> #include <mpi.h> #include <vector> namespace enrico { //! Driver to initialize and run OpenMC in stages class OpenmcDriver : public NeutronicsDriver { public: //! One-time initalization of OpenMC and member variables //! \param comm An existing MPI communicator used to inialize OpenMC explicit OpenmcDriver(MPI_Comm comm); //! One-time finalization of OpenMC ~OpenmcDriver(); ////////////////////////////////////////////////////////////////////////////// // NeutronicsDriver interface //! Find cells corresponding to a vector of positions //! \param positions (x,y,z) coordinates to search for //! \return Handles to cells std::vector<CellHandle> find(const std::vector<Position>& position) override; //! Set the density of the material in a cell //! \param cell Handle to a cell //! \param rho Density in [g/cm^3] void set_density(CellHandle cell, double rho) const override; //! Set the temperature of a cell //! \param cell Handle to a cell //! \param T Temperature in [K] void set_temperature(CellHandle cell, double T) const override; //! Get the density of a cell //! \param cell Handle to a cell //! \return Cell density in [g/cm^3] double get_density(CellHandle cell) const override; //! Get the temperature of a cell //! \param cell Handle to a cell //! \return Temperature in [K] double get_temperature(CellHandle cell) const override; //! Get the volume of a cell //! \param cell Handle to a cell //! \return Volume in [cm^3] double get_volume(CellHandle cell) const override; //! Detemrine whether a cell contains fissionable nuclides //! \param cell Handle to a cell //! \return Whether the cell contains fissionable nuclides bool is_fissionable(CellHandle cell) const override; std::size_t n_cells() const override { return cells_.size(); } //! Create energy production tallies void create_tallies() override; //! Determine number of cells participating in coupling //! \return Number of cells xt::xtensor<double, 1> heat_source(double power) const final; std::string cell_label(CellHandle cell) const; ////////////////////////////////////////////////////////////////////////////// // Driver interface //! Initialization required in each Picard iteration void init_step() final; //! Runs OpenMC for one Picard iteration void solve_step() final; //! Writes OpenMC output for given timestep and iteration //! \param timestep timestep index //! \param iteration iteration index void write_step(int timestep, int iteration) final; //! Finalization required in each Picard iteration void finalize_step() final; private: // Data members openmc::Tally* tally_; //!< Fission energy deposition tally openmc::CellInstanceFilter* filter_; //!< Cell instance filter std::vector<CellInstance> cells_; //!< Array of cell instances int n_fissionable_cells_; //!< Number of fissionable cells in model }; } // namespace enrico #endif // ENRICO_OPENMC_DRIVER_H
{ "alphanum_fraction": 0.6880896226, "avg_line_length": 31.7009345794, "ext": "h", "hexsha": "d6073e69df182dbbaa87cae89c857999eeee4fdd", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "edd04f1e02caf1c3fae2992e55d9a47e4429655c", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "lebuller/enrico", "max_forks_repo_path": "include/enrico/openmc_driver.h", "max_issues_count": 1, "max_issues_repo_head_hexsha": "72b95ca947804f672e5f1726e169ef6f4889e78e", "max_issues_repo_issues_event_max_datetime": "2020-06-15T15:30:51.000Z", "max_issues_repo_issues_event_min_datetime": "2020-06-14T18:14:35.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "pshriwise/enrico", "max_issues_repo_path": "include/enrico/openmc_driver.h", "max_line_length": 80, "max_stars_count": 1, "max_stars_repo_head_hexsha": "72b95ca947804f672e5f1726e169ef6f4889e78e", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "pshriwise/enrico", "max_stars_repo_path": "include/enrico/openmc_driver.h", "max_stars_repo_stars_event_max_datetime": "2021-04-02T16:21:59.000Z", "max_stars_repo_stars_event_min_datetime": "2021-04-02T16:21:59.000Z", "num_tokens": 804, "size": 3392 }
/** * * @file testing_zgels.c * * PLASMA testing routines * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @version 2.6.0 * @author Bilel Hadri * @author Hatem Ltaief * @date 2010-11-15 * @precisions normal z -> c d s * **/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <plasma.h> #include <cblas.h> #include <lapacke.h> #include <core_blas.h> #include "testing_zmain.h" #undef REAL #define COMPLEX enum blas_order_type { blas_rowmajor = 101, blas_colmajor = 102 }; enum blas_uplo_type { blas_upper = 121, blas_lower = 122 }; enum blas_cmach_type { blas_base = 151, blas_t = 152, blas_rnd = 153, blas_ieee = 154, blas_emin = 155, blas_emax = 156, blas_eps = 157, blas_prec = 158, blas_underflow = 159, blas_overflow = 160, blas_sfmin = 161}; enum blas_norm_type { blas_one_norm = 171, blas_real_one_norm = 172, blas_two_norm = 173, blas_frobenius_norm = 174, blas_inf_norm = 175, blas_real_inf_norm = 176, blas_max_norm = 177, blas_real_max_norm = 178 }; static void BLAS_error(char *rname, int err, int val, int x) { fprintf( stderr, "%s %d %d %d\n", rname, err, val, x ); abort(); } static void BLAS_zsy_norm(enum blas_order_type order, enum blas_norm_type norm, enum blas_uplo_type uplo, int n, const PLASMA_Complex64_t *a, int lda, double *res) { int i, j; double anorm, v; char rname[] = "BLAS_zsy_norm"; if (order != blas_colmajor) BLAS_error( rname, -1, order, 0 ); if (norm == blas_inf_norm) { anorm = 0.0; if (blas_upper == uplo) { for (i = 0; i < n; ++i) { v = 0.0; for (j = 0; j < i; ++j) { v += cabs( a[j + i * lda] ); } for (j = i; j < n; ++j) { v += cabs( a[i + j * lda] ); } if (v > anorm) anorm = v; } } else { BLAS_error( rname, -3, norm, 0 ); return; } } else { BLAS_error( rname, -2, norm, 0 ); return; } if (res) *res = anorm; } static void BLAS_zge_norm(enum blas_order_type order, enum blas_norm_type norm, int m, int n, const PLASMA_Complex64_t *a, int lda, double *res) { int i, j; float anorm, v; char rname[] = "BLAS_zge_norm"; if (order != blas_colmajor) BLAS_error( rname, -1, order, 0 ); if (norm == blas_frobenius_norm) { anorm = 0.0f; for (j = n; j; --j) { for (i = m; i; --i) { v = a[0]; anorm += v * v; a++; } a += lda - m; } anorm = sqrt( anorm ); } else if (norm == blas_inf_norm) { anorm = 0.0f; for (i = 0; i < m; ++i) { v = 0.0f; for (j = 0; j < n; ++j) { v += cabs( a[i + j * lda] ); } if (v > anorm) anorm = v; } } else { BLAS_error( rname, -2, norm, 0 ); return; } if (res) *res = anorm; } static double BLAS_dpow_di(double x, int n) { double rv = 1.0; if (n < 0) { n = -n; x = 1.0 / x; } for (; n; n >>= 1, x *= x) { if (n & 1) rv *= x; } return rv; } static double BLAS_dfpinfo(enum blas_cmach_type cmach) { double eps = 1.0, r = 1.0, o = 1.0, b = 2.0; int t = 53, l = 1024, m = -1021; char rname[] = "BLAS_dfpinfo"; if ((sizeof eps) == sizeof(float)) { t = 24; l = 128; m = -125; } else { t = 53; l = 1024; m = -1021; } /* for (i = 0; i < t; ++i) eps *= half; */ eps = BLAS_dpow_di( b, -t ); /* for (i = 0; i >= m; --i) r *= half; */ r = BLAS_dpow_di( b, m-1 ); o -= eps; /* for (i = 0; i < l; ++i) o *= b; */ o = (o * BLAS_dpow_di( b, l-1 )) * b; switch (cmach) { case blas_eps: return eps; case blas_sfmin: return r; default: BLAS_error( rname, -1, cmach, 0 ); break; } return 0.0; } static int check_orthogonality(int, int, int, PLASMA_Complex64_t*, double); static int check_factorization(int, int, PLASMA_Complex64_t*, PLASMA_Complex64_t*, int, PLASMA_Complex64_t*, double); static int check_solution(int, int, int, PLASMA_Complex64_t*, int, PLASMA_Complex64_t*, PLASMA_Complex64_t*, int, double); int testing_zgels(int argc, char **argv) { int mode = 0; if ( argc < 1 ){ goto usage; } else { mode = atoi(argv[0]); } /* Check for number of arguments*/ if ( ((mode == 0) && (argc != 6)) || ((mode != 0) && (argc != 7)) ){ usage: USAGE("GELS", "MODE M N LDA NRHS LDB [RH]", " - MODE : 0: flat, 1: tree (RH needed)\n" " - M : number of rows of the matrix A\n" " - N : number of columns of the matrix A\n" " - LDA : leading dimension of the matrix A\n" " - NRHS : number of RHS\n" " - LDB : leading dimension of the matrix B\n" " - RH : Size of each subdomains\n"); return -1; } int M = atoi(argv[1]); int N = atoi(argv[2]); int LDA = atoi(argv[3]); int NRHS = atoi(argv[4]); int LDB = atoi(argv[5]); int rh; int K = min(M, N); double eps; int info_ortho, info_solution, info_factorization; int i,j; int LDAxN = LDA*N; int LDBxNRHS = LDB*NRHS; PLASMA_Complex64_t *A1 = (PLASMA_Complex64_t *)malloc(LDA*N*sizeof(PLASMA_Complex64_t)); PLASMA_Complex64_t *A2 = (PLASMA_Complex64_t *)malloc(LDA*N*sizeof(PLASMA_Complex64_t)); PLASMA_Complex64_t *B1 = (PLASMA_Complex64_t *)malloc(LDB*NRHS*sizeof(PLASMA_Complex64_t)); PLASMA_Complex64_t *B2 = (PLASMA_Complex64_t *)malloc(LDB*NRHS*sizeof(PLASMA_Complex64_t)); PLASMA_Complex64_t *Q = (PLASMA_Complex64_t *)malloc(LDA*N*sizeof(PLASMA_Complex64_t)); PLASMA_desc *T; /* Check if unable to allocate memory */ if ((!A1)||(!A2)||(!B1)||(!B2)||(!Q)){ printf("Out of Memory \n "); return -2; } if ( mode ) { rh = atoi(argv[6]); PLASMA_Set(PLASMA_HOUSEHOLDER_MODE, PLASMA_TREE_HOUSEHOLDER); PLASMA_Set(PLASMA_HOUSEHOLDER_SIZE, rh); } PLASMA_Alloc_Workspace_zgels(M, N, &T); eps = BLAS_dfpinfo( blas_eps ); /*---------------------------------------------------------- * TESTING ZGELS */ /* Initialize A1 and A2 */ LAPACKE_zlarnv_work(IONE, ISEED, LDAxN, A1); for (i = 0; i < M; i++) for (j = 0; j < N; j++) A2[LDA*j+i] = A1[LDA*j+i] ; /* Initialize B1 and B2 */ LAPACKE_zlarnv_work(IONE, ISEED, LDBxNRHS, B1); for (i = 0; i < M; i++) for (j = 0; j < NRHS; j++) B2[LDB*j+i] = B1[LDB*j+i] ; memset((void*)Q, 0, LDA*N*sizeof(PLASMA_Complex64_t)); for (i = 0; i < K; i++) Q[LDA*i+i] = 1.0; /* PLASMA ZGELS */ PLASMA_zgels(PlasmaNoTrans, M, N, NRHS, A2, LDA, T, B2, LDB); /* PLASMA ZGELS */ if (M >= N) /* Building the economy-size Q */ PLASMA_zungqr(M, N, K, A2, LDA, T, Q, LDA); else /* Building the economy-size Q */ PLASMA_zunglq(M, N, K, A2, LDA, T, Q, LDA); printf("\n"); printf("------ TESTS FOR PLASMA ZGELS ROUTINE ------- \n"); printf(" Size of the Matrix %d by %d\n", M, N); printf("\n"); printf(" The matrix A is randomly generated for each test.\n"); printf("============\n"); printf(" The relative machine precision (eps) is to be %e \n",eps); printf(" Computational tests pass if scaled residuals are less than 60.\n"); /* Check the orthogonality, factorization and the solution */ info_ortho = check_orthogonality(M, N, LDA, Q, eps); info_factorization = check_factorization(M, N, A1, A2, LDA, Q, eps); info_solution = check_solution(M, N, NRHS, A1, LDA, B1, B2, LDB, eps); if ((info_solution == 0)&(info_factorization == 0)&(info_ortho == 0)) { printf("***************************************************\n"); printf(" ---- TESTING ZGELS ...................... PASSED !\n"); printf("***************************************************\n"); } else { printf("************************************************\n"); printf(" - TESTING ZGELS ... FAILED !\n"); printf("************************************************\n"); } /*------------------------------------------------------------- * TESTING ZGEQRF + ZGEQRS or ZGELQF + ZGELQS */ /* Initialize A1 and A2 */ LAPACKE_zlarnv_work(IONE, ISEED, LDAxN, A1); for (i = 0; i < M; i++) for (j = 0; j < N; j++) A2[LDA*j+i] = A1[LDA*j+i]; /* Initialize B1 and B2 */ LAPACKE_zlarnv_work(IONE, ISEED, LDBxNRHS, B1); for (i = 0; i < M; i++) for (j = 0; j < NRHS; j++) B2[LDB*j+i] = B1[LDB*j+i]; memset((void*)Q, 0, LDA*N*sizeof(PLASMA_Complex64_t)); for (i = 0; i < K; i++) Q[LDA*i+i] = 1.0; if (M >= N) { printf("\n"); printf("------ TESTS FOR PLASMA ZGEQRF + ZGEQRS ROUTINE ------- \n"); printf(" Size of the Matrix %d by %d\n", M, N); printf("\n"); printf(" The matrix A is randomly generated for each test.\n"); printf("============\n"); printf(" The relative machine precision (eps) is to be %e \n", eps); printf(" Computational tests pass if scaled residuals are less than 60.\n"); /* Plasma routines */ PLASMA_zgeqrf(M, N, A2, LDA, T); PLASMA_zungqr(M, N, K, A2, LDA, T, Q, LDA); PLASMA_zgeqrs(M, N, NRHS, A2, LDA, T, B2, LDB); /* Check the orthogonality, factorization and the solution */ info_ortho = check_orthogonality(M, N, LDA, Q, eps); info_factorization = check_factorization(M, N, A1, A2, LDA, Q, eps); info_solution = check_solution(M, N, NRHS, A1, LDA, B1, B2, LDB, eps); if ((info_solution == 0)&(info_factorization == 0)&(info_ortho == 0)) { printf("***************************************************\n"); printf(" ---- TESTING ZGEQRF + ZGEQRS ............ PASSED !\n"); printf("***************************************************\n"); } else{ printf("***************************************************\n"); printf(" - TESTING ZGEQRF + ZGEQRS ... FAILED !\n"); printf("***************************************************\n"); } } else { printf("\n"); printf("------ TESTS FOR PLASMA ZGELQF + ZGELQS ROUTINE ------- \n"); printf(" Size of the Matrix %d by %d\n", M, N); printf("\n"); printf(" The matrix A is randomly generated for each test.\n"); printf("============\n"); printf(" The relative machine precision (eps) is to be %e \n", eps); printf(" Computational tests pass if scaled residuals are less than 60.\n"); /* Plasma routines */ PLASMA_zgelqf(M, N, A2, LDA, T); PLASMA_zunglq(M, N, K, A2, LDA, T, Q, LDA); PLASMA_zgelqs(M, N, NRHS, A2, LDA, T, B2, LDB); /* Check the orthogonality, factorization and the solution */ info_ortho = check_orthogonality(M, N, LDA, Q, eps); info_factorization = check_factorization(M, N, A1, A2, LDA, Q, eps); info_solution = check_solution(M, N, NRHS, A1, LDA, B1, B2, LDB, eps); if ( (info_solution == 0) & (info_factorization == 0) & (info_ortho == 0) ) { printf("***************************************************\n"); printf(" ---- TESTING ZGELQF + ZGELQS ............ PASSED !\n"); printf("***************************************************\n"); } else { printf("***************************************************\n"); printf(" - TESTING ZGELQF + ZGELQS ... FAILED !\n"); printf("***************************************************\n"); } } /*---------------------------------------------------------- * TESTING ZGEQRF + ZORMQR + ZTRSM */ /* Initialize A1 and A2 */ LAPACKE_zlarnv_work(IONE, ISEED, LDAxN, A1); for (i = 0; i < M; i++) for (j = 0; j < N; j++) A2[LDA*j+i] = A1[LDA*j+i]; /* Initialize B1 and B2 */ memset(B2, 0, LDB*NRHS*sizeof(PLASMA_Complex64_t)); LAPACKE_zlarnv_work(IONE, ISEED, LDBxNRHS, B1); for (i = 0; i < M; i++) for (j = 0; j < NRHS; j++) B2[LDB*j+i] = B1[LDB*j+i]; /* PLASMA ZGEQRF+ ZUNMQR + ZTRSM */ memset((void*)Q, 0, LDA*N*sizeof(PLASMA_Complex64_t)); for (i = 0; i < K; i++) Q[LDA*i+i] = 1.0; if (M >= N) { printf("\n"); printf("------ TESTS FOR PLASMA ZGEQRF + ZUNMQR + ZTRSM ROUTINE ------- \n"); printf(" Size of the Matrix %d by %d\n", M, N); printf("\n"); printf(" The matrix A is randomly generated for each test.\n"); printf("============\n"); printf(" The relative machine precision (eps) is to be %e \n",eps); printf(" Computational tests pass if scaled residuals are less than 60.\n"); PLASMA_zgeqrf(M, N, A2, LDA, T); PLASMA_zungqr(M, N, K, A2, LDA, T, Q, LDA); PLASMA_zunmqr(PlasmaLeft, PlasmaConjTrans, M, NRHS, N, A2, LDA, T, B2, LDB); PLASMA_ztrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, N, NRHS, 1.0, A2, LDA, B2, LDB); } else { printf("\n"); printf("------ TESTS FOR PLASMA ZGELQF + ZUNMLQ + ZTRSM ROUTINE ------- \n"); printf(" Size of the Matrix %d by %d\n", M, N); printf("\n"); printf(" The matrix A is randomly generated for each test.\n"); printf("============\n"); printf(" The relative machine precision (eps) is to be %e \n",eps); printf(" Computational tests pass if scaled residuals are less than 60.\n"); PLASMA_zgelqf(M, N, A2, LDA, T); PLASMA_ztrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaNonUnit, M, NRHS, 1.0, A2, LDA, B2, LDB); PLASMA_zunglq(M, N, K, A2, LDA, T, Q, LDA); PLASMA_zunmlq(PlasmaLeft, PlasmaConjTrans, N, NRHS, M, A2, LDA, T, B2, LDB); } /* Check the orthogonality, factorization and the solution */ info_ortho = check_orthogonality(M, N, LDA, Q, eps); info_factorization = check_factorization(M, N, A1, A2, LDA, Q, eps); info_solution = check_solution(M, N, NRHS, A1, LDA, B1, B2, LDB, eps); if ( (info_solution == 0) & (info_factorization == 0) & (info_ortho == 0) ) { if (M >= N) { printf("***************************************************\n"); printf(" ---- TESTING ZGEQRF + ZUNMQR + ZTRSM .... PASSED !\n"); printf("***************************************************\n"); } else { printf("***************************************************\n"); printf(" ---- TESTING ZGELQF + ZTRSM + ZUNMLQ .... PASSED !\n"); printf("***************************************************\n"); } } else { if (M >= N) { printf("***************************************************\n"); printf(" - TESTING ZGEQRF + ZUNMQR + ZTRSM ... FAILED !\n"); printf("***************************************************\n"); } else { printf("***************************************************\n"); printf(" - TESTING ZGELQF + ZTRSM + ZUNMLQ ... FAILED !\n"); printf("***************************************************\n"); } } free(A1); free(A2); free(B1); free(B2); free(Q); PLASMA_Dealloc_Handle_Tile( &T ); return 0; } /*------------------------------------------------------------------- * Check the orthogonality of Q */ static int check_orthogonality(int M, int N, int LDQ, PLASMA_Complex64_t *Q, double eps) { double alpha, beta; double normQ; int info_ortho; int i; int minMN = min(M, N); double *work = (double *)malloc(minMN*sizeof(double)); alpha = 1.0; beta = -1.0; /* Build the idendity matrix USE DLASET?*/ PLASMA_Complex64_t *Id = (PLASMA_Complex64_t *) malloc(minMN*minMN*sizeof(PLASMA_Complex64_t)); memset((void*)Id, 0, minMN*minMN*sizeof(PLASMA_Complex64_t)); for (i = 0; i < minMN; i++) Id[i*minMN+i] = (PLASMA_Complex64_t)1.0; /* Perform Id - Q'Q */ if (M >= N) cblas_zherk(CblasColMajor, CblasUpper, CblasConjTrans, N, M, alpha, Q, LDQ, beta, Id, N); else cblas_zherk(CblasColMajor, CblasUpper, CblasNoTrans, M, N, alpha, Q, LDQ, beta, Id, M); BLAS_zsy_norm( blas_colmajor, blas_inf_norm, blas_upper, minMN, Id, minMN, &normQ ); printf("============\n"); printf("Checking the orthogonality of Q \n"); printf("||Id-Q'*Q||_oo / (N*eps) = %e \n", normQ/(minMN*eps)); if ( isnan(normQ / (minMN * eps)) || isinf(normQ / (minMN * eps)) || (normQ / (minMN * eps) > 60.0) ) { printf("-- Orthogonality is suspicious ! \n"); info_ortho=1; } else { printf("-- Orthogonality is CORRECT ! \n"); info_ortho=0; } free(work); free(Id); return info_ortho; } /*------------------------------------------------------------ * Check the factorization QR */ static int check_factorization(int M, int N, PLASMA_Complex64_t *A1, PLASMA_Complex64_t *A2, int LDA, PLASMA_Complex64_t *Q, double eps ) { double Anorm, Rnorm; PLASMA_Complex64_t alpha, beta; int info_factorization; int i,j; PLASMA_Complex64_t *Ql = (PLASMA_Complex64_t *)malloc(M*N*sizeof(PLASMA_Complex64_t)); PLASMA_Complex64_t *Residual = (PLASMA_Complex64_t *)malloc(M*N*sizeof(PLASMA_Complex64_t)); double *work = (double *)malloc(max(M,N)*sizeof(double)); alpha=1.0; beta=0.0; if (M >= N) { /* Extract the R */ PLASMA_Complex64_t *R = (PLASMA_Complex64_t *)malloc(N*N*sizeof(PLASMA_Complex64_t)); memset((void*)R, 0, N*N*sizeof(PLASMA_Complex64_t)); LAPACKE_zlacpy_work(LAPACK_COL_MAJOR,'u', M, N, A2, LDA, R, N); /* Perform Ql=Q*R */ memset((void*)Ql, 0, M*N*sizeof(PLASMA_Complex64_t)); cblas_zgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, M, N, N, CBLAS_SADDR(alpha), Q, LDA, R, N, CBLAS_SADDR(beta), Ql, M); free(R); } else { /* Extract the L */ PLASMA_Complex64_t *L = (PLASMA_Complex64_t *)malloc(M*M*sizeof(PLASMA_Complex64_t)); memset((void*)L, 0, M*M*sizeof(PLASMA_Complex64_t)); LAPACKE_zlacpy_work(LAPACK_COL_MAJOR,'l', M, N, A2, LDA, L, M); /* Perform Ql=LQ */ memset((void*)Ql, 0, M*N*sizeof(PLASMA_Complex64_t)); cblas_zgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, M, N, M, CBLAS_SADDR(alpha), L, M, Q, LDA, CBLAS_SADDR(beta), Ql, M); free(L); } /* Compute the Residual */ for (i = 0; i < M; i++) for (j = 0 ; j < N; j++) Residual[j*M+i] = A1[j*LDA+i]-Ql[j*M+i]; BLAS_zge_norm( blas_colmajor, blas_inf_norm, M, N, Residual, M, &Rnorm ); BLAS_zge_norm( blas_colmajor, blas_inf_norm, M, N, A2, LDA, &Anorm ); if (M >= N) { printf("============\n"); printf("Checking the QR Factorization \n"); printf("-- ||A-QR||_oo/(||A||_oo.N.eps) = %e \n",Rnorm/(Anorm*N*eps)); } else { printf("============\n"); printf("Checking the LQ Factorization \n"); printf("-- ||A-LQ||_oo/(||A||_oo.N.eps) = %e \n",Rnorm/(Anorm*N*eps)); } if (isnan(Rnorm / (Anorm * N *eps)) || isinf(Rnorm / (Anorm * N *eps)) || (Rnorm / (Anorm * N * eps) > 60.0) ) { printf("-- Factorization is suspicious ! \n"); info_factorization = 1; } else { printf("-- Factorization is CORRECT ! \n"); info_factorization = 0; } free(work); free(Ql); free(Residual); return info_factorization; } /*-------------------------------------------------------------- * Check the solution */ static int check_solution(int M, int N, int NRHS, PLASMA_Complex64_t *A, int LDA, PLASMA_Complex64_t *B, PLASMA_Complex64_t *X, int LDB, double eps) { int info_solution; double Rnorm, Anorm, Xnorm, Bnorm; PLASMA_Complex64_t zone, mzone, zzero; double result; double *work = (double *)malloc(max(M, N)* sizeof(double)); zone = 1.0; mzone = -1.0; zzero = 0.0; BLAS_zge_norm( blas_colmajor, blas_inf_norm, M, N, A, LDA, &Anorm ); BLAS_zge_norm( blas_colmajor, blas_inf_norm, M, NRHS, B, LDB, &Bnorm ); BLAS_zge_norm( blas_colmajor, blas_inf_norm, N, NRHS, X, LDB, &Xnorm ); /* Compute Ax - b */ cblas_zgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, M, NRHS, N, CBLAS_SADDR(zone), A, LDA, X, LDB, CBLAS_SADDR(mzone), B, LDB); /* Compute A' * (Ax - b) */ cblas_zgemm(CblasColMajor, CblasConjTrans, CblasNoTrans, N, NRHS, M, CBLAS_SADDR(zone), A, LDA, B, LDB, CBLAS_SADDR(zzero), X, LDB); BLAS_zge_norm( blas_colmajor, blas_inf_norm, N, NRHS, X, LDB, &Rnorm ); if (getenv("PLASMA_TESTING_VERBOSE")) printf( "||A||_oo=%f\n||X||_oo=%f\n||B||_oo=%f\n||A X - B||_oo=%e\n", Anorm, Xnorm, Bnorm, Rnorm ); result = Rnorm / ( (Anorm*Xnorm+Bnorm)*N*eps ) ; printf("============\n"); printf("Checking the Residual of the solution \n"); printf("-- ||Ax-B||_oo/((||A||_oo||x||_oo+||B||_oo).N.eps) = %e \n", result); if ( isnan(Xnorm) || isinf(Xnorm) || isnan(result) || isinf(result) || (result > 60.0) ) { printf("-- The solution is suspicious ! \n"); info_solution = 1; } else{ printf("-- The solution is CORRECT ! \n"); info_solution = 0; } free(work); return info_solution; }
{ "alphanum_fraction": 0.5073445814, "avg_line_length": 33.4687975647, "ext": "c", "hexsha": "acede7c54ce6af472428b92ac78acc2f738f56cd", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "bcc99c164a256bc7df7c936b9c43afd38c12aea2", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "zhuangsc/Plasma-ompss1", "max_forks_repo_path": "testing/testing_zgels.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "bcc99c164a256bc7df7c936b9c43afd38c12aea2", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "zhuangsc/Plasma-ompss1", "max_issues_repo_path": "testing/testing_zgels.c", "max_line_length": 148, "max_stars_count": null, "max_stars_repo_head_hexsha": "bcc99c164a256bc7df7c936b9c43afd38c12aea2", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "zhuangsc/Plasma-ompss1", "max_stars_repo_path": "testing/testing_zgels.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 7098, "size": 21989 }
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include <gsl/gsl> namespace _winml { void LoadSpanFromDisjointBuffers( size_t num_buffers, std::function<gsl::span<byte>(size_t)> get_buffer, gsl::span<byte>& buffer_span); void StoreSpanIntoDisjointBuffers( size_t num_buffers, std::function<gsl::span<byte>(size_t)> get_buffer, gsl::span<byte>& buffer_span); } // namespace _winml
{ "alphanum_fraction": 0.7282377919, "avg_line_length": 23.55, "ext": "h", "hexsha": "9e6c354e4332657d9339484f437440765320649c", "lang": "C", "max_forks_count": 1566, "max_forks_repo_forks_event_max_datetime": "2022-03-31T17:06:50.000Z", "max_forks_repo_forks_event_min_datetime": "2019-05-07T01:30:07.000Z", "max_forks_repo_head_hexsha": "d5175795d2b7f2db18b0390f394a49238f814668", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "dennyac/onnxruntime", "max_forks_repo_path": "winml/lib/Api.Image/inc/DisjointBufferHelpers.h", "max_issues_count": 5730, "max_issues_repo_head_hexsha": "d5175795d2b7f2db18b0390f394a49238f814668", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:55:56.000Z", "max_issues_repo_issues_event_min_datetime": "2019-05-06T23:04:55.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "dennyac/onnxruntime", "max_issues_repo_path": "winml/lib/Api.Image/inc/DisjointBufferHelpers.h", "max_line_length": 60, "max_stars_count": 6036, "max_stars_repo_head_hexsha": "d5175795d2b7f2db18b0390f394a49238f814668", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "dennyac/onnxruntime", "max_stars_repo_path": "winml/lib/Api.Image/inc/DisjointBufferHelpers.h", "max_stars_repo_stars_event_max_datetime": "2022-03-31T17:59:54.000Z", "max_stars_repo_stars_event_min_datetime": "2019-05-07T06:03:57.000Z", "num_tokens": 118, "size": 471 }
/* Standalone c program to use viscdisk to run the Pringle (1981) ring problem, with a complex equation of state */ #include <math.h> #include <stdio.h> #include "init.h" #include "vader.h" #include "vader_common.h" #include <gsl/gsl_sf_bessel.h> /* Physical constants */ #define KB 1.3806488e-16 #define MH 1.6737236e-24 #define MU 0.61 #define A 7.5657314e-15 /* Problem parameters */ /* Set this to a non-empty value to restart from a checkpoint */ #define CHECKPOINT "ringrad_00001.vader" /* Grid parmeters */ #define NR 4096 #define RMIN 1.5e10 #define RMAX 1.5e12 #define LINEAR 1 /* EOS parameters */ #define EOS_FUNC 1 #define GAMMA 1.66666666667 #define DELTA 0.0 /* Viscosity parameters */ #define ALPHA_FUNC 1 #define ALPHA 0.0 /* Inner boundary condition */ #define IBC_PRES_TYPE FIXED_TORQUE #define IBC_ENTH_TYPE FIXED_ENTHALPY_VALUE #define IBC_FUNC 1 #define IBC_PRES_VAL 0.0 #define IBC_ENTH_VAL 0.0 /* Outer boundary condition */ #define OBC_PRES_TYPE FIXED_TORQUE #define OBC_ENTH_TYPE FIXED_ENTHALPY_VALUE #define OBC_FUNC 1 #define OBC_PRES_VAL 0.0 #define OBC_ENTH_VAL 0.0 /* Source functions */ #define MASS_SRC_FUNC 0 #define MASS_SRC_VAL 0.0 #define INT_EN_SRC_FUNC 0 #define INT_EN_SRC_VAL 0.0 /* Control parameters */ #define ERR_TOL 1.0e-6 #define DT_TOL 0.1 #define MAX_ITER 40 #define INTERP_ORDER 1 #define MAX_DT_INCREASE 1.5 #define DT_MIN 1.0e-20 #define MAX_STEP 100000 #define USE_BE 0 #define PRE_TIMESTEP 0 #define POST_TIMESTEP 0 #define VERBOSITY 1 /* Output parameters */ #define NSAVE 65 #define OUTFILE "ringrad.out" #define NUSEROUT 0 #define CHECKNAME "ringrad" #define USERREADCHK false #define USERWRITECHK false /* Time parameters */ #define DT_START -1 #define END_TIME 0.128 /* Problem-specific parameters*/ #define MSTAR (1.99e33*1.4) #define RING_MASS 1.99e27 #define INIT_TEMP 1.0e4 #define COL_RATIO 1.0e10 #define RING_LOC 7.5e11 #define NU 5.93e8 #define FZ0 7.5e9 #define GAMMA_GAS (5.0/3.0) /* Main */ int main () { grid *grd; double col0, t0, colAnalyt, x, tau; double *col, *pres, *eInt; double *colOut, *presOut, *mBndOut, *eBndOut, *eIntOut, *tOut; double param[5]; double tSave[NSAVE]; bool writeCheckpoint[NSAVE]; char *checkpoint = CHECKPOINT; int i, j, idx; FILE *fp; unsigned long nStep, nIter, nFail, nOut; /* Allocate grid and workspace */ grd = gridInitKeplerian(NR, RMIN, RMAX, MSTAR, LINEAR); /* Allocate data */ col = (double *) calloc(NR, sizeof(double)); pres = (double *) calloc(NR, sizeof(double)); eInt = (double *) calloc(NR, sizeof(double)); /* Initialize column density and pressure arrays, and associated variables */ col0 = RING_MASS / (M_PI*SQR(RING_LOC)); t0 = SQR(RING_LOC)/(12.0*NU); for (i=0, idx=-1; i<NR; i++) if ((grd->r_h[i] < RING_LOC) && (RING_LOC <= grd->r_h[i+1])) idx = i; for (i=0; i<NR; i++) col[i] = RING_MASS / grd->area[idx] / COL_RATIO; col[idx] = RING_MASS / grd->area[idx]; for (i=0; i<NR; i++) { pres[i] = col[i]*INIT_TEMP*KB/(MU*MH) + (1./3.)*FZ0*A*SQR(SQR(INIT_TEMP)); eInt[i] = col[i]*INIT_TEMP*KB/(MU*MH*(GAMMA_GAS-1)) + FZ0*A*SQR(SQR(INIT_TEMP)); } param[0] = NU; param[1] = RING_LOC; param[2] = RING_MASS; param[3] = RING_MASS / grd->area[idx] / COL_RATIO; param[4] = INIT_TEMP*KB/(MU*MH); /* Set output times */ for (i=0; i<NSAVE; i++) { tSave[i] = t0 * i*END_TIME/((float) NSAVE-1); writeCheckpoint[i] = true; } /* Run the simulation */ vader( /* Restart file name; if this is empty or NULL, calculation is run from the start */ checkpoint, /* Start and end time */ 0.0, END_TIME*t0, /* Equation of state parameters */ EOS_FUNC, GAMMA, DELTA, /* Viscosity parameters */ ALPHA_FUNC, ALPHA, /* Inner boundary condition */ IBC_PRES_TYPE, IBC_ENTH_TYPE, IBC_FUNC, IBC_PRES_VAL, IBC_ENTH_VAL, /* Outer boundary condition */ OBC_PRES_TYPE, OBC_ENTH_TYPE, OBC_FUNC, OBC_PRES_VAL, OBC_ENTH_VAL, /* Source functions */ MASS_SRC_FUNC, MASS_SRC_VAL, INT_EN_SRC_FUNC, INT_EN_SRC_VAL, /* Control and method parameters */ DT_START, DT_MIN, DT_TOL, ERR_TOL, MAX_DT_INCREASE, MAX_ITER, INTERP_ORDER, MAX_STEP, USE_BE, PRE_TIMESTEP, POST_TIMESTEP, VERBOSITY, /* Output control parameters */ NSAVE, tSave, NUSEROUT, NULL, writeCheckpoint, CHECKNAME, USERREADCHK, USERWRITECHK, /* Computational grid */ &grd, /* Initial conditions, holders for final conditions */ &col, &pres, &eInt, /* User-defined extra parameters */ &param, /* Diagnostic outputs */ &nStep, &nIter, &nFail, &nOut, /* Simulation outputs */ &tOut, &colOut, &presOut, &eIntOut, &mBndOut, &eBndOut, NULL, NULL, NULL ); /* Print diagnostic output */ printf("Total iterations = %ld, failed convergences = %ld\n", nIter, nFail); /* Write output to file */ if (!(fp=fopen(OUTFILE, "w"))) { fprintf(stderr, "Unable to open output file %s!\n", OUTFILE); exit(1); } fprintf(fp, "time/ts x col colExact pres eInt\n"); for (i=0; i<nOut; i++) { for (j=0; j<NR; j++) { x = grd->r_g[j+1]/RING_LOC; tau = tOut[i]/t0; colAnalyt = col0/ (pow(x, 0.25)*tau) * exp(-SQR(x-1.0)/tau) * gsl_sf_bessel_Inu_scaled(0.25, 2*x/tau); if (!isfinite(colAnalyt)) colAnalyt=0.0; if (colAnalyt < RING_MASS / grd->area[idx] / COL_RATIO) colAnalyt = RING_MASS / grd->area[idx] / COL_RATIO; fprintf(fp, "%e %e %e %e %e %e\n", tau, x, colOut[grd->nr*i+j], colAnalyt, presOut[grd->nr*i+j], eIntOut[grd->nr*i+j]); } } fclose(fp); /* Return successful exit */ return(0); }
{ "alphanum_fraction": 0.627039627, "avg_line_length": 27.8055555556, "ext": "c", "hexsha": "6a7e0c9bb6fe0a02fa6128c9890b54369751127f", "lang": "C", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2021-11-20T02:11:17.000Z", "max_forks_repo_forks_event_min_datetime": "2021-11-19T04:41:37.000Z", "max_forks_repo_head_hexsha": "646b3136c39da7152c82a032f8151555ec1e3d44", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "franciscaconcha/amuse-vader", "max_forks_repo_path": "src/amuse/community/vader/src/prob/main_ringrad.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "646b3136c39da7152c82a032f8151555ec1e3d44", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "franciscaconcha/amuse-vader", "max_issues_repo_path": "src/amuse/community/vader/src/prob/main_ringrad.c", "max_line_length": 99, "max_stars_count": null, "max_stars_repo_head_hexsha": "646b3136c39da7152c82a032f8151555ec1e3d44", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "franciscaconcha/amuse-vader", "max_stars_repo_path": "src/amuse/community/vader/src/prob/main_ringrad.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1967, "size": 6006 }
/* This code is written by <albanese@fbk.it>. (C) 2008 mlpy Developers. See DWT in the GSL Library. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <Python.h> #include <numpy/arrayobject.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include <gsl/gsl_wavelet.h> #include <gsl/gsl_math.h> static PyObject *dwt_dwt(PyObject *self, PyObject *args, PyObject *keywds) { PyObject *x = NULL; PyObject *xcopy = NULL; char wf; int k, n; double *_xcopy; PyObject *centered = Py_False; gsl_wavelet *w; gsl_wavelet_workspace *work; /* Parse Tuple*/ static char *kwlist[] = {"x", "wf", "k", "centered", NULL}; if (!PyArg_ParseTupleAndKeywords(args, keywds, "Oci|O", kwlist, &x, &wf, &k, &centered)) return NULL; /* Build xcopy */ xcopy = PyArray_FROM_OTF(x, NPY_DOUBLE, NPY_OUT_ARRAY | NPY_ENSURECOPY); if (xcopy == NULL) return NULL; n = (int) PyArray_DIM(xcopy, 0); _xcopy = (double *) PyArray_DATA(xcopy); switch (wf) { case 'd': if (centered == Py_True) w = gsl_wavelet_alloc (gsl_wavelet_daubechies_centered, k); else w = gsl_wavelet_alloc (gsl_wavelet_daubechies, k); break; case 'h': if (centered == Py_True) w = gsl_wavelet_alloc (gsl_wavelet_haar_centered, k); else w = gsl_wavelet_alloc (gsl_wavelet_haar, k); break; case 'b': if (centered == Py_True) w = gsl_wavelet_alloc (gsl_wavelet_bspline_centered, k); else w = gsl_wavelet_alloc (gsl_wavelet_bspline, k); break; default: PyErr_SetString(PyExc_ValueError, "wavelet family is not valid"); return NULL; } work = gsl_wavelet_workspace_alloc (n); gsl_wavelet_transform_forward (w, _xcopy, 1, n, work); gsl_wavelet_free (w); gsl_wavelet_workspace_free (work); return Py_BuildValue("N", xcopy); } static PyObject *dwt_idwt(PyObject *self, PyObject *args, PyObject *keywds) { PyObject *x = NULL; PyObject *xcopy = NULL; char wf; int k, n; double *_xcopy; PyObject *centered = Py_False; gsl_wavelet *w; gsl_wavelet_workspace *work; /* Parse Tuple*/ static char *kwlist[] = {"X", "wf", "k", "centered", NULL}; if (!PyArg_ParseTupleAndKeywords(args, keywds, "Oci|O", kwlist, &x, &wf, &k, &centered)) return NULL; /* Build xcopy */ xcopy = PyArray_FROM_OTF(x, NPY_DOUBLE, NPY_OUT_ARRAY | NPY_ENSURECOPY); if (xcopy == NULL) return NULL; n = (int) PyArray_DIM(xcopy, 0); _xcopy = (double *) PyArray_DATA(xcopy); switch (wf) { case 'd': if (centered == Py_True) w = gsl_wavelet_alloc (gsl_wavelet_daubechies_centered, k); else w = gsl_wavelet_alloc (gsl_wavelet_daubechies, k); break; case 'h': if (centered == Py_True) w = gsl_wavelet_alloc (gsl_wavelet_haar_centered, k); else w = gsl_wavelet_alloc (gsl_wavelet_haar, k); break; case 'b': if (centered == Py_True) w = gsl_wavelet_alloc (gsl_wavelet_bspline_centered, k); else w = gsl_wavelet_alloc (gsl_wavelet_bspline, k); break; default: PyErr_SetString(PyExc_ValueError, "wavelet family is not valid"); return NULL; } work = gsl_wavelet_workspace_alloc (n); gsl_wavelet_transform_inverse (w, _xcopy, 1, n, work); gsl_wavelet_free (w); gsl_wavelet_workspace_free (work); return Py_BuildValue("N", xcopy); } /* Doc strings: */ static char module_doc[] = "Discrete Wavelet Transform Module from GSL"; static char dwt_dwt_doc[] = "Discrete Wavelet Tranform\n\n" ":Parameters:\n" " x : 1d array_like object (the length is restricted to powers of two)\n" " data\n" " wf : string ('d': daubechies, 'h': haar, 'b': bspline)\n" " wavelet family\n" " k : integer\n" " member of the wavelet family\n\n" " * daubechies : k = 4, 6, ..., 20 with k even\n" " * haar : the only valid choice of k is k = 2\n" " * bspline : k = 103, 105, 202, 204, 206, 208, 301, 303, 305 307, 309\n" " centered : bool\n" " align the coefficients of the various sub-bands on edges.\n" " Thus the resulting visualization of the coefficients of the\n" " wavelet transform in the phase plane is easier to understand.\n\n" ":Returns:\n" " X : 1d numpy array\n" " discrete wavelet transformed data\n\n" "Example\n\n" ">>> import numpy as np\n" ">>> import mlpy.wavelet as wave\n" ">>> x = np.array([1,2,3,4,3,2,1,0])\n" ">>> wave.dwt(x=x, wf='d', k=6)\n" "array([ 5.65685425, 3.41458985, 0.29185347, -0.29185347, -0.28310081,\n" " -0.07045258, 0.28310081, 0.07045258])\n"; static char dwt_idwt_doc[] = "Inverse Discrete Wavelet Tranform\n\n" ":Parameters:\n" " X : 1d array_like object\n" " discrete wavelet transformed data\n" " wf : string ('d': daubechies, 'h': haar, 'b': bspline)\n" " wavelet type\n" " k : integer\n" " member of the wavelet family\n\n" " * daubechies : k = 4, 6, ..., 20 with k even\n" " * haar : the only valid choice of k is k = 2\n" " * bspline : k = 103, 105, 202, 204, 206, 208, 301, 303, 305 307, 309\n\n" " centered : bool\n" " if the coefficients are aligned\n\n" ":Returns:\n" " x : 1d numpy array\n" " data\n\n" "Example:\n\n" ">>> import numpy as np\n" ">>> import mlpy.wavelet as wave\n" ">>> X = np.array([ 5.65685425, 3.41458985, 0.29185347, -0.29185347, -0.28310081,\n" "... -0.07045258, 0.28310081, 0.07045258])\n" ">>> wave.idwt(X=X, wf='d', k=6)\n" "array([ 1.00000000e+00, 2.00000000e+00, 3.00000000e+00,\n" " 4.00000000e+00, 3.00000000e+00, 2.00000000e+00,\n" " 1.00000000e+00, -3.53954610e-09])\n"; /* Method table */ static PyMethodDef dwt_methods[] = { {"dwt", (PyCFunction)dwt_dwt, METH_VARARGS | METH_KEYWORDS, dwt_dwt_doc}, {"idwt", (PyCFunction)dwt_idwt, METH_VARARGS | METH_KEYWORDS, dwt_idwt_doc}, {NULL, NULL, 0, NULL} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_dwt", module_doc, -1, dwt_methods, NULL, NULL, NULL, NULL }; PyObject *PyInit__dwt(void) { PyObject *m; m = PyModule_Create(&moduledef); if (!m) { return NULL; } import_array(); return m; } #else PyMODINIT_FUNC init_dwt(void) { PyObject *m; m = Py_InitModule3("_dwt", dwt_methods, module_doc); if (m == NULL) { return; } import_array(); } #endif
{ "alphanum_fraction": 0.6078141783, "avg_line_length": 27.4833948339, "ext": "c", "hexsha": "a06cd28c792e3888f559b8f1bbea049562952850", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e52a6e88d4469284a071c0b96d009f6684dbb2ea", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "TonyZPW/CRC4Docker", "max_forks_repo_path": "src/mlpy-3.5.0/mlpy/wavelet/dwt.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "e52a6e88d4469284a071c0b96d009f6684dbb2ea", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "TonyZPW/CRC4Docker", "max_issues_repo_path": "src/mlpy-3.5.0/mlpy/wavelet/dwt.c", "max_line_length": 89, "max_stars_count": 1, "max_stars_repo_head_hexsha": "5ee26f9a590b727693202d8ad3b6460970304bd9", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "xuanxiaoliqu/CRC4Docker", "max_stars_repo_path": "src/mlpy-3.5.0/mlpy/wavelet/dwt.c", "max_stars_repo_stars_event_max_datetime": "2020-10-26T12:02:08.000Z", "max_stars_repo_stars_event_min_datetime": "2020-10-26T12:02:08.000Z", "num_tokens": 2359, "size": 7448 }
/* NAME: read_IC PURPOSE: read the initial conditions file CALLING SEQUENCE: read_IC(char ICfilename[]) INPUT: ICfilename - initial conditions filename OUTPUT: sets the options and the initial conditions REVISION HISTORY: 2008-09-21 - Written Bovy */ #include <stdbool.h> #include <stdio.h> #include <math.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_matrix.h> #include <proj_gauss_mixtures.h> #include <proj_gauss_main.h> bool read_IC(char ICfilename[]){ FILE *ICfile; if ( (ICfile= fopen(ICfilename,"r")) == NULL){ printf ("Opening the initial conditions file failed...\n"); return false; } char line[100]; printf("Reading the options section of the initial conditions file...\n"); while (fgets(line,100,ICfile) != NULL){ if (line[0] != '#'){ if (line[0] == '\n') break; if (parse_option(line) == false){ printf("One of the lines in the options section of the initial conditions file is corrupted\n"); printf("Please check the initial conditions file and try again\n"); return false; } } } printf("Successfully read the options\n"); printf("Reading the initial model parameters...\n"); //Read first block, establish the dimension d of the modeled quantities int countd=0; double *mmtemp = (double *) malloc (1000000 * sizeof (double)); while (fgets(line,100,ICfile) != NULL){ if (line[0] != '#'){ if (line[0] == '\n') break; *(mmtemp++) = atof(line); ++countd; } } //now determine d d = (int) (-3 + sqrt(9 + 8 * (countd-1)))/2 ; dV = (int) (d*(d+1)/2); //allocate the alpha, mm and VV matrices int kk; for (kk=0; kk != K; ++kk){ gaussians->mm = gsl_vector_alloc (d); gaussians->VV = gsl_matrix_alloc (d,d);; ++gaussians; } gaussians -= K; //first map the mmtemp values on the right alpha, mm, VV mmtemp -= countd; (*gaussians).alpha = *(mmtemp++); int dd; for (dd=0; dd != d; ++dd) gsl_vector_set(gaussians->mm,dd,*(mmtemp++)); int dd1,dd2; for (dd1=0; dd1 != d; ++dd1) gsl_matrix_set(gaussians->VV,dd1,dd1,*(mmtemp++)); for (dd1=0; dd1 != d-1; ++dd1) for (dd2=dd1+1; dd2 != d; ++dd2){ gsl_matrix_set(gaussians->VV,dd1,dd2,*mmtemp); gsl_matrix_set(gaussians->VV,dd2,dd1,*mmtemp); mmtemp++; } ++gaussians; //reallocate mmtemp mmtemp -= countd; mmtemp = (double *) realloc (mmtemp,countd * sizeof (double) ); if (mmtemp == NULL){ printf("Error reallocating memory\n"); printf("Returning\n"); return false; } //Then read the rest of the Gaussians. for (kk=1; kk != K; ++kk){ while (fgets(line,100,ICfile) != NULL){ if (line[0] != '#'){ if (line[0] == '\n') break; *(mmtemp++) = atof(line); } } mmtemp -=countd; (*gaussians).alpha = *(mmtemp++); for (dd=0; dd != d; ++dd) gsl_vector_set(gaussians->mm,dd,*(mmtemp++)); for (dd1=0; dd1 != d; ++dd1) gsl_matrix_set(gaussians->VV,dd1,dd1,*(mmtemp++)); for (dd1=0; dd1 != d-1; ++dd1) for (dd2=dd1+1; dd2 != d; ++dd2){ gsl_matrix_set(gaussians->VV,dd1,dd2,*mmtemp); gsl_matrix_set(gaussians->VV,dd2,dd1,*mmtemp); mmtemp++; } ++gaussians; mmtemp -= countd; } gaussians -= K; free(mmtemp); fclose(ICfile); printf("Successfully read initial model parameters from the initial conditions file\n"); //Print options printf("\nThe options are set to:\n"); printf("K\t\t=\t"); printf("%i",K); printf("\n"); printf("maxiter\t\t=\t"); printf("%lli",maxiter); printf("\n"); printf("tol\t\t=\t"); printf("%f",tol); printf("\n"); printf("splitnmerge\t=\t"); printf("%i",splitnmerge); printf("\n"); printf("likeonly\t=\t"); printf("%i",likeonly); printf("\n"); printf("w\t\t=\t"); printf("%f",w); printf("\n"); printf("fixamp\t\t=\t"); int ii; for (ii=0; ii != K; ++ii){ printf("%i",*fixampP); if (ii < K-1) printf("\t"); fixampP++; } fixampP -= K; printf("\n"); printf("fixmean\t\t=\t"); for (ii=0; ii != K; ++ii){ printf("%i",*fixmeanP); if (ii < K-1) printf("\t"); fixmeanP++; } fixmeanP -= K; printf("\n"); printf("fixcovar\t=\t"); for (ii=0; ii != K; ++ii){ printf("%i",*fixcovarP); if (ii < K-1) printf("\t"); fixcovarP++; } fixcovarP -= K; printf("\n"); //Print the initial model parameters printf("\nInitial model parameters used:\n\n"); for (kk=0; kk != K; ++kk){ printf("Gaussian "); printf("%i",kk); printf("\n"); printf("amp\t=\t"); printf("%f",(*gaussians).alpha); printf("\n"); printf("mean\t=\t"); for (dd=0; dd != d; ++dd){ printf("%f",gsl_vector_get(gaussians->mm,dd)); if (dd < d-1) printf("\t"); } printf("\n"); printf("covar\t=\t"); for (dd1=0; dd1 != d; ++dd1) printf("%f\t",gsl_matrix_get(gaussians->VV,dd1,dd1)); for (dd1=0; dd1 != d-1; ++dd1) for (dd2=dd1+1; dd2 != d; ++dd2){ printf("%f\t",gsl_matrix_get(gaussians->VV,dd1,dd2)); } ++gaussians; printf("\n\n"); } gaussians -= K; return true; }
{ "alphanum_fraction": 0.5708154506, "avg_line_length": 24.7632850242, "ext": "c", "hexsha": "00a6d18180c9de40345608cd0133c741452bd32b", "lang": "C", "max_forks_count": 26, "max_forks_repo_forks_event_max_datetime": "2021-12-13T03:37:58.000Z", "max_forks_repo_forks_event_min_datetime": "2015-02-05T22:21:22.000Z", "max_forks_repo_head_hexsha": "bc6d58199b17cd5329d72f6af3c7ba7e6d2ae780", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "HaifengWangNAOC/Learn-Bovy-Extreme-deconvolution", "max_forks_repo_path": "src/read_IC.c", "max_issues_count": 24, "max_issues_repo_head_hexsha": "bc6d58199b17cd5329d72f6af3c7ba7e6d2ae780", "max_issues_repo_issues_event_max_datetime": "2021-11-19T01:01:22.000Z", "max_issues_repo_issues_event_min_datetime": "2015-01-07T01:42:22.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "HaifengWangNAOC/Learn-Bovy-Extreme-deconvolution", "max_issues_repo_path": "src/read_IC.c", "max_line_length": 97, "max_stars_count": 73, "max_stars_repo_head_hexsha": "bc6d58199b17cd5329d72f6af3c7ba7e6d2ae780", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "HaifengWangNAOC/Learn-Bovy-Extreme-deconvolution", "max_stars_repo_path": "src/read_IC.c", "max_stars_repo_stars_event_max_datetime": "2022-01-21T01:27:34.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-22T09:22:38.000Z", "num_tokens": 1704, "size": 5126 }
// Import a generic GeoTIFF (a projected GeoTIFF flavor), including // projection data from its metadata file (ERDAS MIF HFA .aux file) into // our own ASF Tools format (.img, .meta) // // NOTE: // 1. At this time, only supports Albers Equal Area Conic, Lambert Azimuthal // Equal Area, Lambert Conformal Conic, Polar Stereographic, and UTM // 2. There may be some data duplication between the GeoTIFF tag contents // in the TIFF file and the data contents of the metadata (.aux) file. // 3. Data and parameters found in the metadata (.aux) file supercede the // data and parameter values found in the TIFF file // #include <assert.h> #include <ctype.h> #include <stdarg.h> #include "float_image.h" #include "asf_tiff.h" #include <gsl/gsl_math.h> #include <uint8_image.h> #include <spheroids.h> #include <proj.h> #include <libasf_proj.h> #include "asf.h" #include "asf_meta.h" #include "asf_nan.h" #include "asf_import.h" #include "asf_raster.h" #include "asf_tiff.h" #include "geo_tiffp.h" #include "geo_keyp.h" #include "projected_image_import.h" #include "tiff_to_float_image.h" #include "tiff_to_byte_image.h" #include "write_meta_and_img.h" #include "import_generic_geotiff.h" #include "arcgis_geotiff_support.h" #include "geotiff_support.h" #define BAD_VALUE_SCAN_ON #define FLOAT_COMPARE_TOLERANCE(a, b, t) (fabs (a - b) <= t ? 1: 0) #define IMPORT_GENERIC_FLOAT_MICRON 0.000000001 #ifdef FLOAT_EQUIVALENT #undef FLOAT_EQUIVALENT #endif #define FLOAT_EQUIVALENT(a, b) (FLOAT_COMPARE_TOLERANCE \ (a, b, IMPORT_GENERIC_FLOAT_MICRON)) #define FLOAT_TOLERANCE 0.00001 #define DEFAULT_UTM_SCALE_FACTOR 0.9996 #define DEFAULT_SCALE_FACTOR 1.0 #define UNKNOWN_PROJECTION_TYPE -1 #define NAD27_DATUM_STR "NAD27" #define NAD83_DATUM_STR "NAD83" #define HARN_DATUM_STR "HARN" #define WGS84_DATUM_STR "WGS84" #define HUGHES_DATUM_STR "HUGHES" #define USER_DEFINED_PCS 32767 #define USER_DEFINED_KEY 32767 #define BAND_NAME_LENGTH 12 #define TIFFTAG_GDAL_NODATA 42113 #ifdef USHORT_MAX #undef USHORT_MAX #endif #define USHORT_MAX 65535 #ifdef MAX_RGB #undef MAX_RGB #endif #define MAX_RGB 255 // Do not change the BAND_ID_STRING. It will break ingest of legacy TIFFs exported with this // string in their citation strings. If you are changing the citation string to have some _other_ // identifying string, then use a _new_ definition rather than replace what is in this one. // Then, make sure that your code supports both the legacy string and the new string. // Also see the export library string definitions (which MUST match these here) and // for associated code that needs to reflect the changes you are making here. #define BAND_ID_STRING "Color Channel (Band) Contents in RGBA+ order" spheroid_type_t SpheroidName2spheroid(char *sphereName); void check_projection_parameters(meta_projection *mp); int band_float_image_write(FloatImage *oim, meta_parameters *meta_out, const char *outBaseName, int num_bands, int *ignore); int band_byte_image_write(UInt8Image *oim_b, meta_parameters *meta_out, const char *outBaseName, int num_bands, int *ignore); int check_for_vintage_asf_utm_geotiff(const char *citation, int *geotiff_data_exists, short *model_type, short *raster_type, short *linear_units); int check_for_datum_in_string(const char *citation, datum_type_t *datum); int check_for_ellipse_definition_in_geotiff(GTIF *input_gtif, spheroid_type_t *spheroid); int vintage_utm_citation_to_pcs(const char *citation, int *zone, char *hem, datum_type_t *datum, short *pcs); static int UTM_2_PCS(short *pcs, datum_type_t datum, unsigned long zone, char hem); void classify_geotiff(GTIF *input_gtif, short *model_type, short *raster_type, short *linear_units, short *angular_units, int *geographic_geotiff, int *geocentric_geotiff, int *map_projected_geotiff, int *geotiff_data_exists); char *angular_units_to_string(short angular_units); char *linear_units_to_string(short linear_units); void get_look_up_table_name(char *citation, char **look_up_table); // Import an ERDAS ArcGIS GeoTIFF (a projected GeoTIFF flavor), including // projection data from its metadata file (ERDAS MIF HFA .aux file) into // our own ASF Tools format (.img, .meta) // void import_generic_geotiff (const char *inFileName, const char *outBaseName, ...) { TIFF *input_tiff; meta_parameters *meta; data_type_t data_type; int is_scanline_format; int is_palette_color_tiff; short num_bands; short int bits_per_sample, sample_format, planar_config; va_list ap; char image_data_type[256]; char *pTmpChar; int ignore[MAX_BANDS]; // Array of band flags ...'1' if a band is to be ignored (empty band) // Do NOT allocate less than MAX_BANDS bands since other tools will // receive the 'ignore' array and may assume there are MAX_BANDS in // the array, e.g. read_tiff_meta() in the asf_view tool. // Open the input tiff file. TIFFErrorHandler oldHandler; oldHandler = TIFFSetWarningHandler(NULL); input_tiff = XTIFFOpen (inFileName, "r"); if (input_tiff == NULL) asfPrintError ("Error opening input TIFF file:\n %s\n", inFileName); if (get_tiff_data_config(input_tiff, &sample_format, // TIFF type (uint == 1, int == 2, float == 3) &bits_per_sample, // 8, 16, or 32 &planar_config, // Contiguous == 1 (RGB or RGBA) or separate == 2 (separate bands) &data_type, // ASF datatype, (BYTE, INTEGER16, INTEGER32, or REAL32 ...no complex) &num_bands, // Initial number of bands &is_scanline_format, &is_palette_color_tiff, REPORT_LEVEL_WARNING)) { // Failed to determine tiff info or tiff info was bad char msg[1024]; tiff_type_t t; get_tiff_type(input_tiff, &t); sprintf(msg, "FOUND TIFF tag data as follows:\n" " Sample Format: %s\n" " Bits per Sample: %d\n" " Planar Configuration: %s\n" " Number of Bands: %d\n" " Format: %s\n" " Colormap: %s\n", (sample_format == SAMPLEFORMAT_UINT) ? "Unsigned Integer" : (sample_format == SAMPLEFORMAT_INT) ? "Signed Integer" : (sample_format == SAMPLEFORMAT_IEEEFP) ? "Floating Point" : "Unknown or Unsupported", bits_per_sample, (planar_config == PLANARCONFIG_CONTIG) ? "Contiguous (chunky RGB or RGBA etc) / Interlaced" : (planar_config == PLANARCONFIG_SEPARATE) ? "Separate planes (band-sequential)" : "Unknown or unrecognized", num_bands, t.format == SCANLINE_TIFF ? "SCANLINE TIFF" : t.format == STRIP_TIFF ? "STRIP TIFF" : t.format == TILED_TIFF ? "TILED TIFF" : "UNKNOWN", is_palette_color_tiff ? "PRESENT" : "NOT PRESENT"); switch (t.format) { case STRIP_TIFF: sprintf(msg, "%s" " Rows per Strip: %d\n", msg, t.rowsPerStrip); break; case TILED_TIFF: sprintf(msg, "%s" " Tile Width: %d\n" " Tile Height: %d\n", msg, t.tileWidth, t.tileLength); break; case SCANLINE_TIFF: default: break; } asfPrintWarning(msg); XTIFFClose(input_tiff); asfPrintError(" Unsupported TIFF type found or required TIFF tags are missing\n" " in TIFF File \"%s\"\n\n" " TIFFs must contain the following:\n" " Sample format: Unsigned or signed integer or IEEE floating point data\n" " (ASF is not yet supporting TIFF files with complex number type data),\n" " Planar config: Contiguous (Greyscale, RGB, or RGBA) or separate planes (band-sequential.)\n" " Bits per sample: 8, 16, or 32\n" " Number of bands: 1 through %d bands allowed.\n" " Format: Scanline, strip, or tiled\n" " Colormap: Present or not present (only valid for 1-band images)\n", inFileName, MAX_BANDS); } XTIFFClose(input_tiff); // Get the image data type from the variable arguments list va_start(ap, outBaseName); pTmpChar = (char *)va_arg(ap, char *); va_end(ap); // Read the metadata (map-projection data etc) from the TIFF asfPrintStatus("\nImporting TIFF/GeoTIFF image to ASF Internal format...\n\n"); int i; for (i=0; i<MAX_BANDS; i++) ignore[i]=0; // Default to ignoring no bands if (pTmpChar != NULL) { strcpy(image_data_type, pTmpChar); meta = read_generic_geotiff_metadata(inFileName, ignore, image_data_type); } else { meta = read_generic_geotiff_metadata(inFileName, ignore, NULL); } // Write the Metadata file meta_write(meta, outBaseName); // Write the binary file input_tiff = XTIFFOpen (inFileName, "r"); if (input_tiff == NULL) asfPrintError ("Error opening input TIFF file:\n %s\n", inFileName); if (geotiff_band_image_write(input_tiff, meta, outBaseName, num_bands, ignore, bits_per_sample, sample_format, planar_config)) { XTIFFClose(input_tiff); meta_free(meta); meta=NULL; asfPrintError("Unable to write binary image...\n %s\n", outBaseName); } XTIFFClose(input_tiff); if (meta) meta_free(meta); } meta_parameters * read_generic_geotiff_metadata(const char *inFileName, int *ignore, ...) { int geotiff_data_exists; short num_bands; char *bands[MAX_BANDS]; // list of band IDs int band_num = 0; int count; int read_count; int ret; short model_type=-1; short raster_type=-1; short linear_units=-1; short angular_units=-1; double scale_factor; char no_data[25]; TIFF *input_tiff; GTIF *input_gtif; meta_parameters *meta_out; // Return value data_type_t data_type; datum_type_t datum; va_list ap; /***** INITIALIZE PARAMETERS *****/ /* */ // Create a new metadata object for the image. meta_out = raw_init (); meta_out->optical = NULL; meta_out->thermal = NULL; meta_out->projection = meta_projection_init (); meta_out->stats = NULL; //meta_stats_init (); meta_out->state_vectors = NULL; meta_out->location = meta_location_init (); meta_out->colormap = NULL; // Updated below if palette color TIFF // Don't set any of the deprecated structure elements. meta_out->stVec = NULL; meta_out->geo = NULL; meta_out->ifm = NULL; meta_out->info = NULL; datum = UNKNOWN_DATUM; // Set up convenience pointers meta_general *mg = meta_out->general; meta_projection *mp = meta_out->projection; meta_location *ml = meta_out->location; // FIXME: // The following function works perfectly fine when called from asf_view. // However, over here it resulted in a segmentation fault. Did not get the // time to find a solution before the release. - RG //meta_out->insar = populate_insar_metadata(inFileName); // Init mp->spheroid = UNKNOWN_SPHEROID; // meta_projection_init() 'should' initialize this, but doesn't // Open the input tiff file. input_tiff = XTIFFOpen (inFileName, "r"); if (input_tiff == NULL) { asfPrintError("Error opening input TIFF file:\n %s\n", inFileName); } // Open the structure that contains the geotiff keys. input_gtif = GTIFNew (input_tiff); if (input_gtif == NULL) { asfPrintError("Error reading GeoTIFF keys from input TIFF file:\n %s\n", inFileName); } /***** GET WHAT WE CAN FROM THE TIFF FILE *****/ /* */ // Malloc the band names and give them numeric names as defaults // (Updated from citation string later ...if the info exists) for (band_num = 0; band_num < MAX_BANDS; band_num++) { bands[band_num] = MALLOC(sizeof(char) * (BAND_NAME_LENGTH+1)); sprintf (bands[band_num], "%02d", band_num+1); } // The data type returned is an ASF data type, e.g. BYTE or REAL32 etc // // The number of bands returned is based on the samples (values) per // pixel stored in the TIFF tags ...but some bands may be blank or ignored. // Later on, band-by-band statistics will determine if individual bands should // be ignored (not imported to an img file) and if the citation string contains // a band ID list, then any band marked with the word 'Empty' will also be ignored. // the number of bands will be updated to reflect these findings. // short sample_format; // TIFFTAG_SAMPLEFORMAT short bits_per_sample; // TIFFTAG_BITSPERSAMPLE short planar_config; // TIFFTAG_PLANARCONFIG int is_scanline_format; // False if tiled or strips > 1 TIFF file format int is_palette_color_tiff; ret = get_tiff_data_config(input_tiff, &sample_format, // TIFF type (uint, int, float) &bits_per_sample, // 8, 16, or 32 &planar_config, // Contiguous (RGB or RGBA) or separate (band sequential, not interlaced) &data_type, // ASF datatype, (BYTE, INTEGER16, INTEGER32, or REAL32 ...no complex &num_bands, // Initial number of bands &is_scanline_format, &is_palette_color_tiff, REPORT_LEVEL_WARNING); if (ret != 0) { char msg[1024]; tiff_type_t t; get_tiff_type(input_tiff, &t); sprintf(msg, "FOUND TIFF tag data as follows:\n" " Sample Format: %s\n" " Bits per Sample: %d\n" " Planar Configuration: %s\n" " Number of Bands: %d\n" " Format: %s\n", (sample_format == SAMPLEFORMAT_UINT) ? "Unsigned Integer" : (sample_format == SAMPLEFORMAT_INT) ? "Signed Integer" : (sample_format == SAMPLEFORMAT_IEEEFP) ? "Floating Point" : "Unknown or Unsupported", bits_per_sample, (planar_config == PLANARCONFIG_CONTIG) ? "Contiguous (chunky RGB or RGBA etc) / Interlaced" : (planar_config == PLANARCONFIG_SEPARATE) ? "Separate planes (band-sequential)" : "Unknown or unrecognized", num_bands, t.format == SCANLINE_TIFF ? "SCANLINE TIFF" : t.format == STRIP_TIFF ? "STRIP TIFF" : t.format == TILED_TIFF ? "TILED TIFF" : "UNKNOWN"); switch (t.format) { case STRIP_TIFF: sprintf(msg, "%s" " Rows per Strip: %d\n", msg, t.rowsPerStrip); break; case TILED_TIFF: sprintf(msg, "%s" " Tile Width: %d\n" " Tile Height: %d\n", msg, t.tileWidth, t.tileLength); break; case SCANLINE_TIFF: default: break; } asfPrintWarning(msg); asfPrintError(" Unsupported TIFF type found or required TIFF tags are missing\n" " in TIFF File \"%s\"\n\n" " TIFFs must contain the following:\n" " Sample format: Unsigned or signed integer or IEEE floating point data\n" " (ASF is not yet supporting TIFF files with complex number type data),\n" " Planar config: Contiguous (Greyscale, RGB, or RGBA) or separate planes (band-sequential.)\n" " Bits per sample: 8, 16, or 32\n" " Number of bands: 1 through %d bands allowed.\n" " Format: Scanline, strip, or tiled\n", inFileName, MAX_BANDS); } asfPrintStatus("\n Found %d-banded Generic GeoTIFF with %d-bit %s type data\n" " (Note: Empty or missing bands will be ignored)\n", num_bands, bits_per_sample, (sample_format == SAMPLEFORMAT_UINT) ? "Unsigned Integer" : (sample_format == SAMPLEFORMAT_INT) ? "Signed Integer" : (sample_format == SAMPLEFORMAT_IEEEFP) ? "Floating Point" : "Unknown or Unsupported"); char *citation = NULL; int citation_length; int typeSize; tagtype_t citation_type; citation_length = GTIFKeyInfo(input_gtif, GTCitationGeoKey, &typeSize, &citation_type); if (citation_length > 0) { citation = MALLOC ((citation_length) * typeSize); GTIFKeyGet (input_gtif, GTCitationGeoKey, citation, 0, citation_length); asfPrintStatus("\nCitation: %s\n\n", citation); } else { citation_length = GTIFKeyInfo(input_gtif, PCSCitationGeoKey, &typeSize, &citation_type); if (citation_length > 0) { citation = MALLOC ((citation_length) * typeSize); GTIFKeyGet (input_gtif, PCSCitationGeoKey, citation, 0, citation_length); asfPrintStatus("\nCitation: %s\n\n", citation); } else { asfPrintStatus("\nCitation: The GeoTIFF citation string is MISSING (Not req'd)\n\n"); } } // If this is a single-band TIFF with an embedded RGB colormap, then // grab it for the metadata and write it out as an ASF LUT file if (is_palette_color_tiff) { // Produce metadata char *look_up_table = NULL; unsigned short *red = NULL; unsigned short *green = NULL; unsigned short *blue = NULL; int i; int map_size = 1<<bits_per_sample; asfRequire(map_size > 0 && map_size <= 256, "Invalid colormap size\n"); asfPrintStatus("\nFound single-band TIFF with embedded RGB colormap\n\n"); meta_colormap *mc = meta_out->colormap = meta_colormap_init(); get_look_up_table_name(citation, &look_up_table); strcpy(mc->look_up_table, look_up_table ? look_up_table : MAGIC_UNSET_STRING); FREE(look_up_table); read_count = TIFFGetField(input_tiff, TIFFTAG_COLORMAP, &red, &green, &blue); if (!read_count) { asfPrintWarning("TIFF appears to be a palette-color TIFF, but the embedded\n" "color map (TIFFTAG_COLORMAP) appears to be missing. Ingest\n" "will continue, but as a non-RGB single-band greyscale image.\n"); FREE(mc->look_up_table); FREE(mc); } else { // Populate the RGB colormap char band_str[255]; strcpy(band_str, bands[0]); for (i=1; i<num_bands; i++) sprintf(band_str, ",%s", bands[i]); strcpy(mc->band_id, band_str); mc->num_elements = map_size; mc->rgb = (meta_rgb *)CALLOC(map_size, sizeof(meta_rgb)); for (i=0; i<map_size; i++) { mc->rgb[i].red = (unsigned char)((red[i]/(float)USHORT_MAX)*(float)MAX_RGB); mc->rgb[i].green = (unsigned char)((green[i]/(float)USHORT_MAX)*(float)MAX_RGB); mc->rgb[i].blue = (unsigned char)((blue[i]/(float)USHORT_MAX)*(float)MAX_RGB); } } // NOTE: Do NOT free the red/green/blue arrays ...this will result in a // glib double-free error when the TIFF file is closed. // Now that we have good metadata, produce the LUT char *lut_file = appendExt(inFileName, ".lut"); asfPrintStatus("\nSTORING TIFF file embedded color map in look up table file:\n %s\n", lut_file); FILE *lutFP = (FILE *)FOPEN(lut_file, "wt"); fprintf(lutFP, "# Look up table type: %s\n", mc->look_up_table); fprintf(lutFP, "# Originating source: %s\n", inFileName); fprintf(lutFP, "# Index Red Green Blue\n"); for (i=0; i<map_size; i++) { fprintf(lutFP, "%03d %03d %03d %03d\n", i, mc->rgb[i].red, mc->rgb[i].green, mc->rgb[i].blue); } fprintf(lutFP, "\n"); FCLOSE(lutFP); FREE(lut_file); } // Get the tie point which defines the mapping between raster // coordinate space and geographic coordinate space. Although // geotiff theoretically supports multiple tie points, we don't // (rationale: ArcView currently doesn't either, and multiple tie // points don't make sense with the pixel scale option, which we // need). // NOTE: Since neither ERDAS or ESRI store tie points in the .aux // file associated with their geotiffs, it is _required_ that they // are found in their tiff files. double *tie_point = NULL; (input_gtif->gt_methods.get)(input_gtif->gt_tif, GTIFF_TIEPOINTS, &count, &tie_point); if (count != 6) { asfPrintError ("GeoTIFF file does not contain tie points\n"); } // Get the scale factors which define the scale relationship between // raster pixels and geographic coordinate space. double *pixel_scale = NULL; (input_gtif->gt_methods.get)(input_gtif->gt_tif, GTIFF_PIXELSCALE, &count, &pixel_scale); if (count != 3) { asfPrintError ("GeoTIFF file does not contain pixel scale parameters\n"); } if (pixel_scale[0] <= 0.0 || pixel_scale[1] <= 0.0) { asfPrintError ("GeoTIFF file contains invalid pixel scale parameters\n"); } // CHECK TO SEE IF THE GEOTIFF CONTAINS USEFUL DATA: // If the tiff file contains geocoded information, then the model type // will be ModelTypeProjected. // FIXME: Geographic (lat/long) type geotiffs with decimal degrees are // supported, but arc-sec are not yet ... int geographic_geotiff, map_projected_geotiff, geocentric_geotiff; classify_geotiff(input_gtif, &model_type, &raster_type, &linear_units, &angular_units, &geographic_geotiff, &geocentric_geotiff, &map_projected_geotiff, &geotiff_data_exists); asfPrintStatus ("Input GeoTIFF key GTModelTypeGeoKey is %s\n", (model_type == ModelTypeGeographic) ? "ModelTypeGeographic" : (model_type == ModelTypeGeocentric) ? "ModelTypeGeocentric" : (model_type == ModelTypeProjected) ? "ModelTypeProjected" : "Unknown"); asfPrintStatus ("Input GeoTIFF key GTRasterTypeGeoKey is %s\n", (raster_type == RasterPixelIsArea) ? "RasterPixelIsArea" : "(Unsupported type)"); if (map_projected_geotiff) { asfPrintStatus ("Input GeoTIFF key ProjLinearUnitsGeoKey is %s\n", (linear_units == Linear_Meter) ? "Linear_Meter" : (linear_units == Linear_Foot) ? "Linear_Foot" : (linear_units == Linear_Foot_US_Survey) ? "Linear_Foot_US_Survey" : (linear_units == Linear_Foot_Modified_American) ? "Linear_Foot_Modified_American" : (linear_units == Linear_Foot_Clarke) ? "Linear_Foot_Clarke" : (linear_units == Linear_Foot_Indian) ? "Linear_Foot_Indian" : "(Unsupported type of linear units)"); } else if (geographic_geotiff) { asfPrintStatus ("Input GeoTIFF key GeogAngularUnitsGeoKey is %s\n", (angular_units == Angular_Arc_Second) ? "Angular_Arc_Second" : (angular_units == Angular_Degree) ? "Angular_Degree" : "(Unsupported type of angular units)"); } else { asfPrintError ("Cannot determine type of linear or angular units in GeoTIFF\n"); } /***** READ PROJECTION PARAMETERS FROM TIFF IF GEO DATA EXISTS *****/ /***** THEN READ THEM FROM THE METADATA (.AUX) FILE TO SUPERCEDE IF THEY EXIST *****/ /* */ /* */ // import_arcgis_geotiff() would not be called (see detect_geotiff_flavor()) // unless the model_type is either unknown or is ModelTypeProjected. If // ModelTypeProjected, then there are projection parameters inside the // GeoTIFF file. If not, then they must be parsed from the complementary // ArcGIS metadata (.aux) file // Read the model type from the GeoTIFF file ...expecting that it is // unknown, but could possibly be ModelTypeProjection // // Start of reading projection parameters from geotiff if (model_type == ModelTypeProjected && geotiff_data_exists) { char hemisphere; projection_type_t projection_type=UNKNOWN_PROJECTION; unsigned long pro_zone; // UTM zone (UTM only) short proj_coords_trans = UNKNOWN_PROJECTION_TYPE; short pcs; short geokey_datum; double false_easting = MAGIC_UNSET_DOUBLE; double false_northing = MAGIC_UNSET_DOUBLE; double lonOrigin = MAGIC_UNSET_DOUBLE; double latOrigin = MAGIC_UNSET_DOUBLE; double stdParallel1 = MAGIC_UNSET_DOUBLE; double stdParallel2 = MAGIC_UNSET_DOUBLE; double lonPole = MAGIC_UNSET_DOUBLE; //////// ALL PROJECTIONS ///////// // Set the projection block data that we know at this point if (tie_point[0] != 0 || tie_point[1] != 0 || tie_point[2] != 0) { // NOTE: To support tie points at other locations, or a set of other locations, // then things rapidly get more complex ...and a transformation must either be // derived or provided (and utilized etc). We're not at that point yet... // asfPrintError("Unsupported initial tie point type. Initial tie point must be for\n" "raster location (0,0) in the image.\n"); } mp->startX = tie_point[3]; mp->startY = tie_point[4]; if (pixel_scale[0] < 0 || pixel_scale[1] < 0) { asfPrintWarning("Unexpected negative pixel scale values found in GeoTIFF file.\n" "Continuing ingest, but defaulting perX to fabs(x pixel scale) and\n" "perY to (-1)*fabs(y pixel scale)... Results may vary."); } mp->perX = fabs(pixel_scale[0]); mp->perY = -(fabs(pixel_scale[1])); mp->height = 0.0; if (linear_units == Linear_Meter) { strcpy(mp->units, "meters"); } else if (linear_units == Linear_Foot || linear_units == Linear_Foot_US_Survey || linear_units == Linear_Foot_Modified_American || linear_units == Linear_Foot_Clarke || linear_units == Linear_Foot_Indian) strcpy(mp->units, "feet"); else { asfPrintError("Unsupported linear unit found in map-projected GeoTIFF. Only meters and feet are currently supported.\n"); } ///////// STANDARD UTM (PCS CODE) ////////// // Get datum and zone as appropriate read_count = GTIFKeyGet (input_gtif, ProjectedCSTypeGeoKey, &pcs, 0, 1); // Quick hack for Rick's State Plane data // Only supports State Plane if (read_count && pcs >= 26931 && pcs <=26940) { mp->type = STATE_PLANE; projection_type = STATE_PLANE; proj_coords_trans = CT_TransverseMercator; datum = mp->datum = NAD83_DATUM; mp->spheroid = GRS1980_SPHEROID; } if (!read_count) { // Check to see if this is a vintage ASF UTM geotiff (they only had the UTM // description in the UTM string rather than in the ProejctedCSTypeGeoKey) int sleepy; datum_type_t dopey; char sneezy; read_count = vintage_utm_citation_to_pcs(citation, &sleepy, &sneezy, &dopey, &pcs); } if (read_count == 1 && PCS_2_UTM(pcs, &hemisphere, &datum, &pro_zone)) { mp->type = UNIVERSAL_TRANSVERSE_MERCATOR; mp->hem = hemisphere; mp->param.utm.zone = pro_zone; mp->param.utm.false_easting = 500000.0; if (hemisphere == 'N') { mp->param.utm.false_northing = 0.0; } else { mp->param.utm.false_northing = 10000000.0; } mp->param.utm.lat0 = 0.0; mp->param.utm.lon0 = utm_zone_to_central_meridian(pro_zone); if (datum != UNKNOWN_DATUM) { mp->datum = datum; } else { asfPrintError("Unsupported or unknown datum found in GeoTIFF file.\n"); } char msg[256]; sprintf(msg,"UTM scale factor defaulting to %0.4lf\n", DEFAULT_UTM_SCALE_FACTOR); asfPrintStatus(msg); mp->param.utm.scale_factor = DEFAULT_UTM_SCALE_FACTOR; } ////////// ALL OTHER PROJECTION TYPES - INCLUDING GCS/USER-DEFINED UTMS ///////// else if (projection_type != STATE_PLANE) { // Hack !!!! // Not recognized as a supported UTM PCS or was a user-defined or unknown type of PCS... // // The ProjCoordTransGeoKey will be true if the PCS was user-defined or if the PCS was // not in the geotiff file... or so the standard says. If the ProjCoordTransGeoKey is // false, it means that an unsupported (by us) UTM or State Plane projection was // discovered (above.) All other projection types make use of the ProjCoordTransGeoKey // geokey. ////// GCS-CODE DEFINED UTM /////// // Check for a user-defined UTM projection (A valid UTM code may be in // ProjectionGeoKey, although this is not typical) read_count = GTIFKeyGet (input_gtif, ProjectionGeoKey, &pcs, 0, 0); if (read_count == 1 && PCS_2_UTM(pcs, &hemisphere, &datum, &pro_zone)) { mp->type = UNIVERSAL_TRANSVERSE_MERCATOR; mp->hem = hemisphere; mp->param.utm.zone = pro_zone; mp->param.utm.false_easting = 500000.0; if (hemisphere == 'N') { mp->param.utm.false_northing = 0.0; } else { mp->param.utm.false_northing = 10000000.0; } mp->param.utm.lat0 = utm_zone_to_central_meridian(pro_zone); mp->param.utm.lon0 = 0.0; if (datum != UNKNOWN_DATUM) { mp->datum = datum; } else if (pcs/100 == 160 || pcs/100 == 161) { // With user-defined UTMs (16001-16060, 16101-16160 in ProjectionGeoKey // the zone and hemisphere is defined, but not the datum... We should try // to determine the datum as follows: // // Read GeographicTypeGeoKey: // // GeographicTypeGeoKey, If the code is recognized, assign as appropriate. // If the code is 32676 (user-defined ...which also often // means "undefined" rather than "user defined") then // check to see if the datum is specifically defined // elsewhere: // - Check GeogGeodeticDatumGeoKey // - Check PCSCitationGeoKey, GeogCitationGeoKey, and // GTCitationGeoKey to see if it is textually described // - Check to see if semi-major and inv. flattening (etc) // is defined and then do a best-fit to determine a // ellipsoid // - Default to WGS-84 (if GeographicTypeGeoKey is // 160xx or 161xx format), else error out // //asfPrintError("Unsupported or unknown datum found in GeoTIFF file.\n");https://rt/Ticket/Display.html?id=7763 short gcs; read_count = GTIFKeyGet (input_gtif, GeographicTypeGeoKey, &gcs, 0, 1); if (read_count == 1) { switch(geokey_datum){ case GCS_WGS_84: case GCSE_WGS84: datum = WGS84_DATUM; break; case GCS_NAD27: datum = NAD27_DATUM; break; case GCS_NAD83: datum = NAD83_DATUM; break; case GCS_ED50: datum = ED50_DATUM; break; case GCS_SAD69: datum = SAD69_DATUM; break; default: datum = UNKNOWN_DATUM; break; } } if (datum == UNKNOWN_DATUM) { // The datum is not typically stored in GeogGeodeticDatumGeoKey, but some s/w // does put it there read_count = GTIFKeyGet (input_gtif, GeogGeodeticDatumGeoKey, &geokey_datum, 0, 1); if (read_count == 1) { switch(geokey_datum){ case Datum_WGS84: datum = WGS84_DATUM; break; case Datum_North_American_Datum_1927: datum = NAD27_DATUM; break; case Datum_North_American_Datum_1983: datum = NAD83_DATUM; break; case 6655: // ITRF97 datum = ITRF97_DATUM; break; case 6054: // HUGHES80 datum = HUGHES_DATUM; break; default: datum = UNKNOWN_DATUM; break; } } } if (datum == UNKNOWN_DATUM) { // Try citation strings to see if the datum was textually described char *citation = NULL; int citation_length; int typeSize; tagtype_t citation_type; citation_length = GTIFKeyInfo(input_gtif, GeogCitationGeoKey, &typeSize, &citation_type); if (citation_length > 0) { citation = MALLOC ((citation_length) * typeSize); GTIFKeyGet (input_gtif, GeogCitationGeoKey, citation, 0, citation_length); check_for_datum_in_string(citation, &datum); FREE(citation); } if (datum == UNKNOWN_DATUM) { citation_length = GTIFKeyInfo(input_gtif, GTCitationGeoKey, &typeSize, &citation_type); if (citation_length > 0) { citation = MALLOC ((citation_length) * typeSize); GTIFKeyGet (input_gtif, GTCitationGeoKey, citation, 0, citation_length); check_for_datum_in_string(citation, &datum); FREE(citation); } } if (datum == UNKNOWN_DATUM) { citation_length = GTIFKeyInfo(input_gtif, PCSCitationGeoKey, &typeSize, &citation_type); if (citation_length > 0) { citation = MALLOC ((citation_length) * typeSize); GTIFKeyGet (input_gtif, PCSCitationGeoKey, citation, 0, citation_length); check_for_datum_in_string(citation, &datum); FREE(citation); } } if (datum == UNKNOWN_DATUM) { spheroid_type_t spheroid; check_for_ellipse_definition_in_geotiff(input_gtif, &spheroid); switch (spheroid) { case BESSEL_SPHEROID: case CLARKE1866_SPHEROID: case CLARKE1880_SPHEROID: case GEM6_SPHEROID: case GEM10C_SPHEROID: case GRS1980_SPHEROID: case INTERNATIONAL1924_SPHEROID: case INTERNATIONAL1967_SPHEROID: case WGS72_SPHEROID: case WGS84_SPHEROID: case HUGHES_SPHEROID: case UNKNOWN_SPHEROID: default: datum = UNKNOWN_DATUM; break; } } if (datum == UNKNOWN_DATUM) { // If all else fails, make it a WGS-84 and spit out a warning datum = WGS84_DATUM; mp->datum = datum; asfPrintWarning("Could not determine datum type from GeoTIFF, but since this\n" "is a EPSG 160xx/161xx type UTM projection (WGS84 typ.),\n" "a WGS84 datum type is assumed.\n"); } } } char msg[256]; sprintf(msg,"UTM scale factor defaulting to %0.4lf\n", DEFAULT_UTM_SCALE_FACTOR); asfPrintStatus(msg); mp->param.utm.scale_factor = DEFAULT_UTM_SCALE_FACTOR; } /////// OTHER PROJECTION DEFINITIONS - INCLUDING USER-DEFINED UTMS/////// else { // Some other type of projection may exist, including a projection-coordinate-transformation // UTM (although that is not typical) // Get the projection coordinate transformation key (identifies the projection type) read_count = GTIFKeyGet (input_gtif, ProjCoordTransGeoKey, &proj_coords_trans, 0, 1); if (read_count != 1 || proj_coords_trans == UNKNOWN_PROJECTION_TYPE) { asfPrintWarning("Unable to determine type of projection coordinate system in GeoTIFF file\n"); } // Attempt to find a defined datum (the standard says to store it in GeographicTypeGeoKey) read_count = GTIFKeyGet (input_gtif, GeographicTypeGeoKey, &geokey_datum, 0, 1); if (read_count == 1) { switch(geokey_datum){ case GCS_WGS_84: case GCSE_WGS84: datum = WGS84_DATUM; break; case GCS_NAD27: datum = NAD27_DATUM; break; case GCS_NAD83: datum = NAD83_DATUM; break; case GCS_ED50: datum = ED50_DATUM; break; case GCS_SAD69: datum = SAD69_DATUM; break; default: datum = UNKNOWN_DATUM; break; } } if (datum == UNKNOWN_DATUM) { // The datum is not typically stored in GeogGeodeticDatumGeoKey, but some s/w // does put it there read_count = GTIFKeyGet (input_gtif, GeogGeodeticDatumGeoKey, &geokey_datum, 0, 1); if (read_count == 1) { switch(geokey_datum){ case Datum_WGS84: datum = WGS84_DATUM; break; case Datum_North_American_Datum_1927: datum = NAD27_DATUM; break; case Datum_North_American_Datum_1983: datum = NAD83_DATUM; break; case 6655: // ITRF97 datum = ITRF97_DATUM; break; case 6054: // HUGHES80 datum = HUGHES_DATUM; break; default: datum = UNKNOWN_DATUM; break; } } } // Hughes datum support ...The Hughes-1980 datum is user-defined and the // typically defined by the major and inv-flattening // FIXME: There are several ways of representing otherwise-undefined datum // datum types ...maybe consider supporting those? (Probably not...) // FIXME: Found out that there is an EPSG GCS code for NSIDC SSM/I polar // stereo ...for PS using Hughes, we should write this numeric value out // and avoid using a user-defined datum (technically there is no such thing as // a 'hughes datum' ...it's an earth-centered reference spheroid and the datum // is undetermined. Sigh... works exactly the same either way blah blah blah.) if (datum == UNKNOWN_DATUM) { short int ellipsoid_key; read_count = GTIFKeyGet(input_gtif, GeogEllipsoidGeoKey, &ellipsoid_key, 0, 1); if (read_count && ellipsoid_key == USER_DEFINED_KEY) { double semi_major = 0.0; double semi_minor = 0.0; double inv_flattening = 0.0; double hughes_semiminor = HUGHES_SEMIMAJOR * (1.0 - 1.0/HUGHES_INV_FLATTENING); read_count = GTIFKeyGet(input_gtif, GeogSemiMajorAxisGeoKey, &semi_major, 0, 1); read_count += GTIFKeyGet(input_gtif, GeogInvFlatteningGeoKey, &inv_flattening, 0, 1); read_count += GTIFKeyGet(input_gtif, GeogSemiMinorAxisGeoKey, &semi_minor, 0, 1); if (read_count >= 2 && semi_major != USER_DEFINED_KEY && inv_flattening != USER_DEFINED_KEY && FLOAT_COMPARE_TOLERANCE(semi_major, HUGHES_SEMIMAJOR, FLOAT_TOLERANCE) && FLOAT_COMPARE_TOLERANCE(inv_flattening, HUGHES_INV_FLATTENING, FLOAT_TOLERANCE)) { datum = HUGHES_DATUM; } else if (read_count >= 2 && semi_major != USER_DEFINED_KEY && semi_minor != USER_DEFINED_KEY && FLOAT_COMPARE_TOLERANCE(semi_major, HUGHES_SEMIMAJOR, FLOAT_TOLERANCE) && FLOAT_COMPARE_TOLERANCE(semi_minor, hughes_semiminor, FLOAT_TOLERANCE)) { datum = HUGHES_DATUM; } else if (read_count >= 2 && semi_minor != USER_DEFINED_KEY && inv_flattening != USER_DEFINED_KEY && FLOAT_COMPARE_TOLERANCE(semi_minor, hughes_semiminor, FLOAT_TOLERANCE) && FLOAT_COMPARE_TOLERANCE(inv_flattening, HUGHES_INV_FLATTENING, FLOAT_TOLERANCE)) { datum = HUGHES_DATUM; } else if (read_count >=2 && FLOAT_COMPARE_TOLERANCE(semi_minor, semi_major, FLOAT_TOLERANCE)) { mp->spheroid = SPHERE; mp->re_major = semi_major; mp->re_minor = semi_minor; datum = UNKNOWN_DATUM; } else { datum = UNKNOWN_DATUM; } } else { datum = UNKNOWN_DATUM; } } if (datum == UNKNOWN_DATUM && mp->spheroid != SPHERE) { asfPrintWarning("Unable to determine datum type from GeoTIFF file\n" "Defaulting to WGS-84 ...This may result in projection errors\n"); datum = WGS84_DATUM; } // Take whatever datum we have at this point mp->datum = datum; // Base on the type of projection coordinate transformation, e.g. type of projection, // retrieve the rest of the projection parameters projection_type = UNKNOWN_PROJECTION; scale_factor = DEFAULT_SCALE_FACTOR; switch(proj_coords_trans) { case CT_TransverseMercator: case CT_TransvMercator_Modified_Alaska: case CT_TransvMercator_SouthOriented: read_count = GTIFKeyGet (input_gtif, ProjFalseEastingGeoKey, &false_easting, 0, 1); if (read_count != 1) { asfPrintStatus("No false easting in ProjFalseEastingGeoKey ...OK for a UTM\n"); } read_count = GTIFKeyGet (input_gtif, ProjFalseNorthingGeoKey, &false_northing, 0, 1); if (read_count != 1) { asfPrintStatus("No false northing in ProjFalseNorthingGeoKey ...OK for a UTM\n"); } read_count = GTIFKeyGet (input_gtif, ProjNatOriginLongGeoKey, &lonOrigin, 0, 1); if (read_count != 1) { asfPrintStatus("No center longitude in ProjNatOriginLongGeoKey ...OK for a UTM\n"); } read_count = GTIFKeyGet (input_gtif, ProjNatOriginLatGeoKey, &latOrigin, 0, 1); if (read_count != 1) { asfPrintStatus("No center latitude in ProjNatOriginLatGeoKey ...OK for a UTM\n"); } else { latOrigin = 0.0; } read_count = GTIFKeyGet (input_gtif, ProjScaleAtNatOriginGeoKey, &scale_factor, 0, 1); if (read_count == 0) { scale_factor = DEFAULT_UTM_SCALE_FACTOR; char msg[256]; sprintf(msg,"UTM scale factor defaulting to %0.4lf ...OK for a UTM\n", scale_factor); asfPrintStatus(msg); } mp->type = UNIVERSAL_TRANSVERSE_MERCATOR; mp->hem = (false_northing == 0.0) ? 'N' : (false_northing == 10000000.0) ? 'S' : '?'; mp->param.utm.zone = utm_zone(lonOrigin); mp->param.utm.false_easting = false_easting; mp->param.utm.false_northing = false_northing; mp->param.utm.lat0 = latOrigin; mp->param.utm.lon0 = lonOrigin; mp->param.utm.scale_factor = scale_factor; check_projection_parameters(mp); break; // Albers Conical Equal Area case IS tested case CT_AlbersEqualArea: read_count = GTIFKeyGet (input_gtif, ProjStdParallel1GeoKey, &stdParallel1, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine first standard parallel from GeoTIFF file\n" "using ProjStdParallel1GeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjStdParallel2GeoKey, &stdParallel2, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine second standard parallel from GeoTIFF file\n" "using ProjStdParallel2GeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjFalseEastingGeoKey, &false_easting, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine false easting from GeoTIFF file\n" "using ProjFalseEastingGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjFalseNorthingGeoKey, &false_northing, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine false northing from GeoTIFF file\n" "using ProjFalseNorthingGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjNatOriginLongGeoKey, &lonOrigin, 0, 1); if (read_count != 1) { asfPrintWarning("Unable to determine center longitude from GeoTIFF file\n" "using ProjNatOriginLongGeoKey. Trying ProjCenterLongGeoKey...\n"); read_count = GTIFKeyGet (input_gtif, ProjCenterLongGeoKey, &lonOrigin, 0, 1); if (read_count != 1) { asfPrintWarning("Unable to determine center longitude from GeoTIFF file\n" "using ProjCenterLongGeoKey as well...\n"); } else { asfPrintStatus("\nFound center longitude from ProjCenterLongGeoKey in GeoTIFF" "file...\n\n"); } } read_count = GTIFKeyGet (input_gtif, ProjNatOriginLatGeoKey, &latOrigin, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine center latitude from GeoTIFF file\n" "using ProjNatOriginLatGeoKey\n"); } mp->type = ALBERS_EQUAL_AREA; mp->hem = (latOrigin > 0.0) ? 'N' : 'S'; mp->param.albers.std_parallel1 = stdParallel1; mp->param.albers.std_parallel2 = stdParallel2; mp->param.albers.center_meridian = lonOrigin; mp->param.albers.orig_latitude = latOrigin; mp->param.albers.false_easting = false_easting; mp->param.albers.false_northing = false_northing; check_projection_parameters(mp); break; // FIXME: The Lambert Conformal Conic 1-Std Parallel case is UNTESTED case CT_LambertConfConic_1SP: read_count = GTIFKeyGet (input_gtif, ProjFalseEastingGeoKey, &false_easting, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine false easting from GeoTIFF file\n" "using ProjFalseEastingGeoKey. Assuming 0.0 meters and continuing...\n"); false_easting = 0.0; } read_count = GTIFKeyGet (input_gtif, ProjFalseNorthingGeoKey, &false_northing, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine false northing from GeoTIFF file\n" "using ProjFalseNorthingGeoKey. Assuming 0.0 meters and continuing...\n"); false_northing = 0.0; } read_count = GTIFKeyGet (input_gtif, ProjNatOriginLongGeoKey, &lonOrigin, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine center longitude from GeoTIFF file\n" "using ProjNatOriginLongGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjNatOriginLatGeoKey, &latOrigin, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine center latitude from GeoTIFF file\n" "using ProjNatOriginLatGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjScaleAtNatOriginGeoKey, &scale_factor, 0, 1); if (read_count != 1) { scale_factor = DEFAULT_SCALE_FACTOR; char msg[256]; sprintf(msg, "Lambert Conformal Conic scale factor from ProjScaleAtNatOriginGeoKey not found in GeoTIFF ...defaulting to %0.4lf\n", scale_factor); asfPrintWarning(msg); } mp->type = LAMBERT_CONFORMAL_CONIC; mp->hem = (latOrigin > 0.0) ? 'N' : 'S'; mp->param.lamcc.plat1 = latOrigin; mp->param.lamcc.plat2 = latOrigin; mp->param.lamcc.lat0 = latOrigin; mp->param.lamcc.lon0 = lonOrigin; mp->param.lamcc.false_easting = false_easting; mp->param.lamcc.false_northing = false_northing; mp->param.lamcc.scale_factor = scale_factor; check_projection_parameters(mp); break; case CT_LambertConfConic_2SP: read_count = GTIFKeyGet (input_gtif, ProjStdParallel1GeoKey, &stdParallel1, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine first standard parallel from GeoTIFF file\n" "using ProjStdParallel1GeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjStdParallel2GeoKey, &stdParallel2, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine second standard parallel from GeoTIFF file\n" "using ProjStdParallel2GeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjFalseEastingGeoKey, &false_easting, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine false easting from GeoTIFF file\n" "using ProjFalseEastingGeoKey. Assuming 0.0 meters and continuing...\n"); false_easting = 0.0; } read_count = GTIFKeyGet (input_gtif, ProjFalseNorthingGeoKey, &false_northing, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine false northing from GeoTIFF file\n" "using ProjFalseNorthingGeoKey. Assuming 0.0 meters and continuing...\n"); false_northing = 0.0; } read_count = GTIFKeyGet (input_gtif, ProjFalseOriginLongGeoKey, &lonOrigin, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine center longitude from GeoTIFF file\n" "using ProjFalseOriginLongGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjFalseOriginLatGeoKey, &latOrigin, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine center latitude from GeoTIFF file\n" "using ProjFalseOriginLatGeoKey\n"); } mp->type = LAMBERT_CONFORMAL_CONIC; mp->hem = (latOrigin > 0.0) ? 'N' : 'S'; mp->param.lamcc.plat1 = stdParallel1; mp->param.lamcc.plat2 = stdParallel2; mp->param.lamcc.lat0 = latOrigin; mp->param.lamcc.lon0 = lonOrigin; mp->param.lamcc.false_easting = false_easting; mp->param.lamcc.false_northing = false_northing; mp->param.lamcc.scale_factor = scale_factor; check_projection_parameters(mp); break; case CT_PolarStereographic: read_count = GTIFKeyGet (input_gtif, ProjNatOriginLatGeoKey, &latOrigin, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine center latitude from GeoTIFF file\n" "using ProjNatOriginLatGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjStraightVertPoleLongGeoKey, &lonPole, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine vertical pole longitude from GeoTIFF file\n" "using ProjStraightVertPoleLongGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjFalseEastingGeoKey, &false_easting, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine false easting from GeoTIFF file\n" "using ProjFalseEastingGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjFalseNorthingGeoKey, &false_northing, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine false northing from GeoTIFF file\n" "using ProjFalseNorthingGeoKey\n"); } // NOTE: The scale_factor exists in the ProjScaleAtNatOriginGeoKey, but we do not // use it, e.g. it is not current written to the meta data file with meta_write(). mp->type = POLAR_STEREOGRAPHIC; mp->hem = (latOrigin > 0) ? 'N' : 'S'; mp->param.ps.slat = latOrigin; mp->param.ps.slon = lonPole; mp->param.ps.is_north_pole = (mp->hem == 'N') ? 1 : 0; mp->param.ps.false_easting = false_easting; mp->param.ps.false_northing = false_northing; check_projection_parameters(mp); break; case CT_LambertAzimEqualArea: read_count = GTIFKeyGet (input_gtif, ProjFalseEastingGeoKey, &false_easting, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine false easting from GeoTIFF file\n" "using ProjFalseEastingGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjFalseNorthingGeoKey, &false_northing, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine false northing from GeoTIFF file\n" "using ProjFalseNorthingGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjCenterLongGeoKey, &lonOrigin, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine center longitude from GeoTIFF file\n" "using ProjCenterLongGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjCenterLatGeoKey, &latOrigin, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine center latitude from GeoTIFF file\n" "using ProjCenterLatGeoKey\n"); } mp->type = LAMBERT_AZIMUTHAL_EQUAL_AREA; mp->hem = (latOrigin > 0) ? 'N' : 'S'; mp->param.lamaz.center_lon = lonOrigin; mp->param.lamaz.center_lat = latOrigin; mp->param.lamaz.false_easting = false_easting; mp->param.lamaz.false_northing = false_northing; check_projection_parameters(mp); break; case CT_Equirectangular: read_count = GTIFKeyGet (input_gtif, ProjNatOriginLatGeoKey, &latOrigin, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine center latitude from GeoTIFF file\n" "using ProjNatOriginLatGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjNatOriginLongGeoKey, &lonOrigin, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine center longitude from GeoTIFF file\n" "using ProjNatOriginLongGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjFalseEastingGeoKey, &false_easting, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine false easting from GeoTIFF file\n" "using ProjFalseEastingGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjFalseNorthingGeoKey, &false_northing, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine false northing from GeoTIFF file\n" "using ProjFalseNorthingGeoKey\n"); } mp->type = EQUI_RECTANGULAR; mp->hem = (latOrigin > 0.0) ? 'N' : 'S'; mp->param.eqr.orig_latitude = latOrigin; mp->param.eqr.central_meridian = lonOrigin; mp->param.eqr.false_easting = false_easting; mp->param.eqr.false_northing = false_northing; check_projection_parameters(mp); break; case CT_Mercator: read_count = GTIFKeyGet (input_gtif, ProjNatOriginLatGeoKey, &latOrigin, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine center latitude from GeoTIFF file\n" "using ProjNatOriginLatGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjNatOriginLongGeoKey, &lonOrigin, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine center longitude from GeoTIFF file\n" "using ProjNatOriginLongGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjFalseEastingGeoKey, &false_easting, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine false easting from GeoTIFF file\n" "using ProjFalseEastingGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjFalseNorthingGeoKey, &false_northing, 0, 1); if (read_count != 1) { asfPrintWarning( "Unable to determine false northing from GeoTIFF file\n" "using ProjFalseNorthingGeoKey\n"); } // FIXME: convert scale factor into standard parallel mp->type = MERCATOR; mp->hem = (latOrigin > 0.0) ? 'N' : 'S'; mp->param.mer.orig_latitude = latOrigin; mp->param.mer.central_meridian = lonOrigin; mp->param.mer.false_easting = false_easting; mp->param.mer.false_northing = false_northing; check_projection_parameters(mp); break; case CT_Sinusoidal: read_count = GTIFKeyGet (input_gtif, ProjCenterLongGeoKey, &lonOrigin, 0, 1); if (read_count != 1) { asfPrintWarning("Unable to determine center longitude from " "GeoTIFF file\nusing ProjCenterLongGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjFalseEastingGeoKey, &false_easting, 0, 1); if (read_count != 1) { asfPrintWarning("Unable to determine false easting from GeoTIFF " "file\nusing ProjFalseEastingGeoKey\n"); } read_count = GTIFKeyGet (input_gtif, ProjFalseNorthingGeoKey, &false_northing, 0, 1); if (read_count != 1) { asfPrintWarning("Unable to determine false northing from GeoTIFF" " file\nusing ProjFalseNorthingGeoKey\n"); } mp->type = SINUSOIDAL; mp->hem = mg->center_latitude > 0.0 ? 'N' : 'S'; mp->param.sin.longitude_center = lonOrigin; mp->param.sin.false_easting = false_easting; mp->param.sin.false_northing = false_northing; mp->param.sin.sphere = mp->re_major; check_projection_parameters(mp); break; default: asfPrintWarning( "Unable to determine projection type from GeoTIFF file\n" "using ProjectedCSTypeGeoKey or ProjCoordTransGeoKey\n"); asfPrintWarning("Projection parameters missing in the GeoTIFF\n" "file. Projection parameters may be incomplete unless\n" "they are found in the associated .aux file (if it exists.)\n"); break; } } } // End of if UTM else OTHER projection type } // End of reading projection parameters from geotiff ...if it existed else if (model_type == ModelTypeGeographic && geotiff_data_exists) { // Set the projection block data that we know at this point if (tie_point[0] != 0 || tie_point[1] != 0 || tie_point[2] != 0) { // NOTE: To support tie points at other locations, or a set of other locations, // then things rapidly get more complex ...and a transformation must either be // derived or provided (and utilized etc). We're not at that point yet... // asfPrintError("Unsupported initial tie point type. Initial tie point must be for\n" "raster location (0,0) in the image.\n"); } short geographic_type; read_count = GTIFKeyGet (input_gtif, GeographicTypeGeoKey, &geographic_type, 0, 1); asfRequire (read_count == 1, "GTIFKeyGet failed.\n"); datum = UNKNOWN_DATUM; switch ( geographic_type ) { case GCS_WGS_84: datum = WGS84_DATUM; break; case GCS_NAD27: datum = NAD27_DATUM; break; case GCS_NAD83: datum = NAD83_DATUM; break; default: asfPrintError ("Unsupported GeographicTypeGeoKey value in GeoTIFF file"); break; } spheroid_type_t spheroid = datum_spheroid (datum); // NOTE: For geographic geotiffs, all angular units are converted to decimal // degrees upon ingest, hence the setting of mp->units to "degrees" here. // the angular_units variable contains, at this point, the type of unit that is // used within the geotiff itself, i.e. Angular_Arc_Second, Angular_Degree, etc. strcpy (mp->units, "degrees"); mp->type = LAT_LONG_PSEUDO_PROJECTION; mp->hem = mg->center_latitude > 0.0 ? 'N' : 'S'; mp->spheroid = spheroid; // These fields should be the same as the ones in the general block. mp->re_major = mg->re_major; mp->re_minor = mg->re_minor; mp->datum = datum; mp->height = 0.0; // Set to the mean from the statistics later (for DEMs) } else if (!geotiff_data_exists) { asfPrintWarning("Projection parameters missing in the GeoTIFF\n" "file. Projection parameters may be incomplete unless.\n" "they are found in the associated .aux file (if it exists.)\n"); } /*****************************************************/ /***** CHECK TO SEE IF THIS IS AN ARCGIS GEOTIFF *****/ /* */ if (model_type == ModelTypeProjected && isArcgisGeotiff(inFileName)) { // If the TIFF is an ArcGIS / IMAGINE GeoTIFF, then read the // projection parameters from the aux file (if it exists) asfPrintStatus("Checking ArcGIS GeoTIFF auxiliary file (.aux) for\n" "projection parameters...\n"); int ret = readArcgisAuxProjectionParameters(inFileName, mp); if (mp->datum != UNKNOWN_DATUM) { datum = mp->datum; } if (ret == 0) { asfPrintStatus("\nSUCCESS ...Found projection parameters in the .aux file.\n" "(These will supercede any found in the TIFF - which should have been\n" " the same anyway.)\n\n"); } } /* */ /*****************************************************/ asfPrintStatus("\nLoading input TIFF/GeoTIFF file into %d-banded %s image structure...\n\n", num_bands, (data_type == BYTE) ? "8-bit byte" : (data_type == INTEGER16) ? "16-bit integer" : (data_type == INTEGER32) ? "32-bit integer" : (data_type == REAL32) ? "32-bit float" : "unknown(?)"); // Get the raster width and height of the image. uint32 width; uint32 height; TIFFGetField(input_tiff, TIFFTAG_IMAGELENGTH, &height); TIFFGetField(input_tiff, TIFFTAG_IMAGEWIDTH, &width); if (height <= 0 || width <= 0) { asfPrintError("Invalid height and width parameters in TIFF file,\n" "Height = %ld, Width = %ld\n", height, width); } /***** FILL IN THE REST OF THE META DATA (Projection parms should already exist) *****/ /* */ char image_data_type[256]; mg->data_type = data_type; int is_usgs_seamless_geotiff = 0; if (angular_units == Angular_Degree && citation && strncmp(citation, "IMAGINE GeoTIFF Support", 23) == 0) { // This is a good guess since the only source of lat/long geotiffs that I know of // are the USGS Seamless Server geotiffs. Note that the image_data_type setting // will be overridden by the parameter list if the caller specified something. // // Note that even if this guess is wrong, it should still work fine for other // angular degree geotiffs except that the image_data_type and sensor string may // be misleading ...this won't affect processing by any of our tools. asfPrintStatus("\nGeoTIFF contains lat/long in decimal degrees. Assuming this is a\n" "USGS Seamless Server (or compatible) type of DEM GeoTIFF with pixel\n" "size of 30, 60, 90, or 190 meters, i.e. SRTM, NED, DTED, etcetera...\n"); strcpy(mg->sensor, "USGS Seamless data (e.g., NED, SRTM)"); strcpy(image_data_type, "DEM"); mg->image_data_type = DEM; is_usgs_seamless_geotiff = 1; } else if (angular_units == Angular_Degree || angular_units == Angular_Arc_Second) { // All other angular units asfPrintStatus("\nGeoTIFF contains lat/long in %s. Assuming this is a\n" "USGS Seamless Server (or compatible) type of DEM GeoTIFF with pixel\n" "size of 30, 60, 90, 190 meters, i.e. SRTM, NED, DTED, etcetera...\n", angular_units_to_string(angular_units)); strcpy(mg->sensor, "USGS Seamless data (e.g., NED, SRTM)"); is_usgs_seamless_geotiff = 1; // This will turn on conversion of pixel size from degrees to meters } else { strcpy(mg->sensor, "GEOTIFF"); strcpy(mg->sensor_name, "GEOTIFF"); } strcpy(mg->basename, inFileName); // Get the image data type from the variable arguments list char *pTmpChar=NULL; va_start(ap, ignore); // 'ignore' is the last argument before ", ..." pTmpChar = (char *)va_arg(ap, char *); if (pTmpChar != NULL && strlen(pTmpChar) >= 3 && (strncmp(uc(pTmpChar), "DEM", 3) == 0 || strncmp(uc(pTmpChar), "MASK", 4) == 0)) { strcpy(image_data_type, uc(pTmpChar)); } else { if (is_usgs_seamless_geotiff) { strcpy(image_data_type, "DEM"); } else if (geotiff_data_exists) { strcpy(image_data_type, "GEOCODED_IMAGE"); } else { strcpy(image_data_type, "IMAGE"); } } va_end(ap); if (strncmp(image_data_type, "DEM", 3) == 0) { mg->image_data_type = DEM; } else if (strncmp(image_data_type, "MASK", 4) == 0) { mg->image_data_type = MASK; } else if (strncmp(image_data_type, "AMPLITUDE_IMAGE", 15) == 0) { mg->image_data_type = AMPLITUDE_IMAGE; } else if (strncmp(image_data_type, "GEOCODED_IMAGE", 15) == 0) { mg->image_data_type = GEOCODED_IMAGE; } else if (strncmp(image_data_type, "IMAGE", 5) == 0) { mg->image_data_type = IMAGE; } mg->line_count = height; mg->sample_count = width; mg->start_line = 0; mg->start_sample = 0; float base_x_pixel_scale = pixel_scale[0]; float base_y_pixel_scale = pixel_scale[1]; if (is_usgs_seamless_geotiff) { int x_pixel_size_meters = MAGIC_UNSET_INT; int y_pixel_size_meters = MAGIC_UNSET_INT; // Convert angular units to decimal degrees if necessary switch(angular_units) { case Angular_Arc_Second: base_x_pixel_scale *= ARCSECONDS2DEGREES; base_y_pixel_scale *= ARCSECONDS2DEGREES; break; case Angular_Degree: default: break; } // Convert decimal degrees to (approximate) pixel resolution in meters // (per STRM & DTED standards) // 30m = 1 arcsec = 0.0002777777 degrees, // 60m = 2 arcsec = 0.0005555555 degrees, // 90m = 3 arcsec = 0.0008333333 degrees, // 180m = 6 arcsec = 0.0016666667 degrees, if (FLOAT_COMPARE_TOLERANCE(base_x_pixel_scale, 0.0002777, 0.000001)) { x_pixel_size_meters = 30.0; } else if (FLOAT_COMPARE_TOLERANCE(base_x_pixel_scale, 0.0005555, 0.000001)) { x_pixel_size_meters = 60.0; } else if (FLOAT_COMPARE_TOLERANCE(base_x_pixel_scale, 0.0008333, 0.000001)) { x_pixel_size_meters = 90.0; } else if (FLOAT_COMPARE_TOLERANCE(base_x_pixel_scale, 0.0016667, 0.000001)) { x_pixel_size_meters = 180.0; } else { // If not a standard size, then hack-calc it... // // We are supposed to put {x,y}_pixel_size in meters, so we need to convert // the pixel scale in degrees to meters ...and we don't have platform position // or height information! // // So, we are cheating a bit here, forcing the result to be to the nearest // 10m. This is ok since USGS DEMs are in 30, 60, or 90 meters. And if this // cheat is wrong ...it should still be ok since the one where accuracy is // important is the value in the projection block, this one is used by geocode // when deciding how large the pixels should be in the output. x_pixel_size_meters = 10*(int)(11131.95 * pixel_scale[0] + .5); // Sanity check on the pixel size cheat... if (x_pixel_size_meters != 30 && x_pixel_size_meters != 60 && x_pixel_size_meters != 90 && x_pixel_size_meters != 180) { asfPrintWarning("Unexpected x pixel size: %dm.\n" "USGS Seamless/NED/DTED data should be 30, 60, 90, or 180m\n", x_pixel_size_meters); } } if (FLOAT_COMPARE_TOLERANCE(base_y_pixel_scale, 0.0002777, 0.000001)) { y_pixel_size_meters = 30.0; } else if (FLOAT_COMPARE_TOLERANCE(base_y_pixel_scale, 0.0005555, 0.000001)) { y_pixel_size_meters = 60.0; } else if (FLOAT_COMPARE_TOLERANCE(base_y_pixel_scale, 0.0008333, 0.000001)) { y_pixel_size_meters = 90.0; } else if (FLOAT_COMPARE_TOLERANCE(base_y_pixel_scale, 0.0016667, 0.000001)) { y_pixel_size_meters = 180.0; } else { y_pixel_size_meters = 10*(int)(11131.95 * pixel_scale[1] + .5); if (y_pixel_size_meters != 30 && y_pixel_size_meters != 60 && y_pixel_size_meters != 90 && y_pixel_size_meters != 180) { asfPrintWarning("Unexpected y pixel size: %dm.\n" "USGS Seamless/NED/DTED data should be 30, 60, 90, or 180m\n", y_pixel_size_meters); } } mg->x_pixel_size = x_pixel_size_meters; mg->y_pixel_size = y_pixel_size_meters; } else if (linear_units == Linear_Foot || linear_units == Linear_Foot_US_Survey || linear_units == Linear_Foot_Modified_American || linear_units == Linear_Foot_Clarke || linear_units == Linear_Foot_Indian) { // Hack: The exact number for unit 'ft' needs to be extracted from the file base_x_pixel_scale *= 0.3048; base_y_pixel_scale *= 0.3048; mg->x_pixel_size = base_x_pixel_scale; mg->y_pixel_size = base_y_pixel_scale; asfPrintWarning("Units converted from feet to meters by adjusting the pixel size.\n" "Azimuth pixel size changed from %.3lf ft to %.3lf m.\n" "Range pixel size changed from %.3lf ft to %.3lf m.\n", pixel_scale[0], base_x_pixel_scale, pixel_scale[1], base_y_pixel_scale); } else { mg->x_pixel_size = pixel_scale[0]; mg->y_pixel_size = pixel_scale[1]; } // For now we are going to insist that the meters per pixel in the // X and Y directions are identical(ish). I believe asf_geocode at // least would work for non-square pixel dimensions, with the // caveats that output pixels would still be square, and would have // default size derived solely from the input pixel size if (fabs(mg->x_pixel_size - mg->y_pixel_size) > 0.0001) { char msg[256]; sprintf(msg, "Pixel size is (x,y): (%lf, %lf)\n", mg->x_pixel_size, mg->y_pixel_size); asfPrintStatus(msg); asfPrintWarning("Found non-square pixels: x versus y pixel size differs\n" "by more than 0.0001 <units>\n"); } // Image raster coordinates of tie point. double raster_tp_x = tie_point[0]; double raster_tp_y = tie_point[1]; // Note: [2] is zero for 2D space // Coordinates of tie point in projection space. // NOTE: These are called tp_lon and tp_lat ...but the tie points // will be in either linear units (meters typ.) *OR* lat/long depending // on what type of image data is in the file, e.g. map-projected geographic or // geocentric respectively (but we only support map-projected at this point.) double tp_lon = tie_point[3]; // x double tp_lat = tie_point[4]; // y, Note: [5] is zero for 2D space // Calculate center of image data ...using linear meters or decimal degrees double center_x = MAGIC_UNSET_DOUBLE; double center_y = MAGIC_UNSET_DOUBLE; if (linear_units == Linear_Meter || linear_units == Linear_Foot || linear_units == Linear_Foot_US_Survey || linear_units == Linear_Foot_Modified_American || linear_units == Linear_Foot_Clarke || linear_units == Linear_Foot_Indian) { // NOTE: center_x and center_y are in meters (map projection coordinates) // and are converted to lat/lon for center latitude/longitude below. Therefore, // since geographic geotiffs already contain angular measure, they don't need // a center_x, center_y calculated. // FIXME: Is tp_lon and tp_lat in degrees or meters for this case? center_x = (width / 2.0 - raster_tp_x) * base_x_pixel_scale + tp_lon; center_y = (height / 2.0 - raster_tp_y) * (-base_y_pixel_scale) + tp_lat; } // If the datum and/or spheroid are unknown at this point, then fill // them out, and the major/minor axis, as best we can. if (datum != UNKNOWN_DATUM && mp->spheroid == UNKNOWN_SPHEROID) { // Guess the spheroid from the type of datum (a fairly safe guess) mp->spheroid = datum_spheroid(mp->datum); } if (datum == UNKNOWN_DATUM && mp->spheroid != UNKNOWN_SPHEROID) { // Can't guess a datum, so leave it be datum = spheroid_datum(mp->spheroid); } if (datum == UNKNOWN_DATUM && mp->spheroid == UNKNOWN_SPHEROID && mp && mp->re_major != MAGIC_UNSET_DOUBLE && mp->re_minor != MAGIC_UNSET_DOUBLE) { // If neither the datum nor spheroid are known, try to derive them from // the axis lengths in the map projection record mp->spheroid = axis_to_spheroid(mp->re_major, mp->re_minor); datum = spheroid_datum(mp->spheroid); } if (mp->spheroid != UNKNOWN_SPHEROID) { spheroid_axes_lengths (mp->spheroid, &mg->re_major, &mg->re_minor); } if (isArcgisGeotiff(inFileName) && mp->re_major != MAGIC_UNSET_DOUBLE && mp->re_minor != MAGIC_UNSET_DOUBLE) { // The ArcGIS metadata reader sets the projection parms, not the general // block parms, so copy them over... mg->re_major = mp->re_major; mg->re_minor = mp->re_minor; } if (!isArcgisGeotiff(inFileName) && (mp->startX == MAGIC_UNSET_DOUBLE || mp->startY == MAGIC_UNSET_DOUBLE || mp->perX == MAGIC_UNSET_DOUBLE || mp->perY == MAGIC_UNSET_DOUBLE)) { mp->startX = (0.0 - raster_tp_x) * mg->x_pixel_size + tp_lon; mp->startY = (0.0 - raster_tp_y) * (-mg->y_pixel_size) + tp_lat; mp->perX = mg->x_pixel_size; mp->perY = -mg->y_pixel_size; } else if (is_usgs_seamless_geotiff) { if (linear_units == Linear_Meter) { // FIXME: Is tp_lon and tp_lat in degrees or meters for this case? mp->startX = (0.0 - raster_tp_x) * base_x_pixel_scale + tp_lon; mp->startY = (0.0 - raster_tp_y) * (-base_y_pixel_scale) + tp_lat; } else if (angular_units == Angular_Degree) { mp->startX = (0.0 - raster_tp_x) * base_x_pixel_scale + tp_lon; mp->startY = (0.0 - raster_tp_y) * (-base_y_pixel_scale) + tp_lat; } else if (angular_units == Angular_Arc_Second) { mp->startX = (0.0 - raster_tp_x) * (base_x_pixel_scale * ARCSECONDS2DEGREES) + tp_lon; mp->startY = (0.0 - raster_tp_y) * (-(base_y_pixel_scale * ARCSECONDS2DEGREES)) + tp_lat; } mp->perX = pixel_scale[0]; mp->perY = -pixel_scale[1]; } // These fields should be the same as the ones in the general block. mp->re_major = mg->re_major; mp->re_minor = mg->re_minor; // Fill out the number of bands and the band names strcpy(mg->bands, ""); mg->band_count = num_bands; int *empty = (int*)CALLOC(num_bands, sizeof(int)); // Defaults to 'no empty bands' char *band_str; band_str = (char*)MALLOC(100*sizeof(char)); // '100' is the array length of mg->bands (see asf_meta.h) ...yes, I know. int num_found_bands; char *tmp_citation = (citation != NULL) ? STRDUP(citation) : NULL; int is_asf_geotiff = 0; if (tmp_citation) is_asf_geotiff = strstr(tmp_citation, "Alaska Satellite Fac") ? 1 : 0; get_bands_from_citation(&num_found_bands, &band_str, empty, tmp_citation, num_bands); meta_statistics *stats = NULL; double mask_value = MAGIC_UNSET_DOUBLE; if (!is_asf_geotiff) { asfPrintStatus("\nNo ASF-exported band names found in GeoTIFF citation tag.\n" "Band names will be assigned in numerical order.\n"); // Look for a non-standard GDAL tag that contains the no data value void *data; uint16 *counter; int ret = TIFFGetField(input_tiff, TIFFTAG_GDAL_NODATA, &counter, &data); if (ret) mg->no_data = atof((char *)data); else mg->no_data = MAGIC_UNSET_DOUBLE; } else { // This is an ASF GeoTIFF so we must check to see if any bands are empty (blank) // Since some blank-band GeoTIFFs exported by ASF tools do NOT have the list of // bands placed in the citation string, we will need to make a best-guess based // on band statistics... but only if the citation isn't cooperating. if (num_found_bands < 1) { asfPrintStatus("\nGathering image statistics (per available band)...\n"); switch(meta_out->general->data_type) { case BYTE: case INTEGER16: case INTEGER32: mask_value = UINT8_IMAGE_DEFAULT_MASK; break; case REAL32: mask_value = FLOAT_IMAGE_DEFAULT_MASK; break; default: mask_value = 0.0; break; } mg->no_data = mask_value; // If there are no band names in the citation, then collect stats and check for empty // bands that way stats = meta_statistics_init(num_bands); int is_dem = (mg->image_data_type == DEM) ? 1 : 0; if(!stats) asfPrintError("Out of memory. Cannot allocate statistics struct.\n"); int ii, nb; for (ii=0, nb=num_bands; ii<num_bands; ii++) { int ret; ret = tiff_image_band_statistics(input_tiff, meta_out, &stats->band_stats[ii], is_dem, num_bands, ii, bits_per_sample, sample_format, planar_config, 0, mask_value); if (ret != 0 || (stats->band_stats[ii].mean == stats->band_stats[ii].min && stats->band_stats[ii].mean == stats->band_stats[ii].max && stats->band_stats[ii].mean == stats->band_stats[ii].std_deviation)) { // Band data is blank, e.g. no variation ...all pixels the same asfPrintStatus("\nFound empty band (see statistics below):\n" " min = %f\n" " max = %f\n" " mean = %f\n" " sdev = %f\n\n", stats->band_stats[ii].min, stats->band_stats[ii].max, stats->band_stats[ii].mean, stats->band_stats[ii].std_deviation); ignore[ii] = 1; // EMPTY BAND FOUND nb--; } else { ignore[ii] = 0; asfPrintStatus("\nBand Statistics:\n" " min = %f\n" " max = %f\n" " mean = %f\n" " sdev = %f\n\n", stats->band_stats[ii].min, stats->band_stats[ii].max, stats->band_stats[ii].mean, stats->band_stats[ii].std_deviation); } } } } if (is_usgs_seamless_geotiff) { // USGS Seamless geotiffs are DEMs, which means they are one-banded and the mean // data value is the average height in the image ...we need to calculate the stats // in this case, so we can populate mp->mean properly. // NOTE: Even though USGS DEMs are one-banded, this code is written generically for // any number of bands in an arcsec or angular degrees lat/long geotiff asfPrintStatus("\nCalculating average height for USGS Seamless (SRTM, NED, etc) or DTED DEM...\n\n"); stats = meta_statistics_init(num_bands); int is_dem = (mg->image_data_type == DEM) ? 1 : 0; if(!stats) asfPrintError("Out of memory. Cannot allocate statistics struct.\n"); int ii, nb; int ret = 0; for (ii=0, nb=num_bands; ii<num_bands; ii++) { ret = tiff_image_band_statistics(input_tiff, meta_out, &stats->band_stats[ii], is_dem, num_bands, ii, bits_per_sample, sample_format, planar_config, 0, mask_value); asfPrintStatus("\nBand Statistics:\n" " min = %f\n" " max = %f\n" " mean = %f\n" " sdev = %f\n\n", stats->band_stats[ii].min, stats->band_stats[ii].max, stats->band_stats[ii].mean, stats->band_stats[ii].std_deviation); // Empty band? if (ret != 0) { asfPrintWarning("USGS Seamless (NED, SRTM, etc) or DTED DEM band %d appears to have no data.\n" "Setting the average height to 0.0m and continuing...\n", ii+1); mp->height = 0.0; ignore[ii] = 1; } else { mp->height = stats->band_stats[0].mean; ignore[ii] = 0; } } } if ( num_found_bands > 0 && strlen(band_str) > 0) { // If a valid list of bands were in the citation string, then let the empty[] array, // which indicates which bands in the TIFF were listed as 'empty' overrule the // ignore[] array since it was just a best-guess based on band statistics // // Note: The ignore[] array will be used when writing the binary file so that empty // bands in the TIFF won't be written to the output file int band_no, num_empty = 0; for (band_no=0; band_no<num_bands; band_no++) { ignore[band_no] = empty[band_no]; num_empty += ignore[band_no] ? 1 : 0; } // Note: mg->band_count is set to the number of found bands after the // binary file is written ...if you do it before, then put_band_float_line() // will fail. strcpy(mg->bands, band_str); mg->band_count -= num_empty; } else { // Use the default band names if none were found in the citation string // Note: For the case where there is no list of band names // in the citation string, we are either importing somebody // else's geotiff, or we are importing one of our older ones. // The only way, in that case, to know if a band is empty is // to rely on the band statistics from above. The results // of this analysis is stored in the 'ignore[<band_no>]' array. // Note: num_bands is from the samples per pixel TIFF tag and is // the maximum number of valid (non-ignored) bands in the file int band_no, tmp_num_bands = num_bands; for (band_no=0; band_no<tmp_num_bands; band_no++) { if (ignore[band_no]) { // Decrement the band count for each ignored band num_bands--; } else { // Band is not ignored, so give it a band name if (band_no == 0) { sprintf(mg->bands, "%s", bands[band_no]); } else { sprintf(mg->bands, "%s,%s", mg->bands, bands[band_no]); } } } mg->band_count = num_bands; } FREE(band_str); if (mg->band_count <= 0 || strlen(mg->bands) <= 0) { asfPrintError("GeoTIFF file must contain at least one non-empty color channel (band)\n"); } // Populate band stats if it makes sense if (((is_asf_geotiff && num_found_bands < 1) || is_usgs_seamless_geotiff) && stats) { // If this is an ASF GeoTIFF and no band names were found in the citation string, // then we HAD to have tried to identify blank bands with statistics ...if so, then // we may as well save the stats results in the metadata so some other processing // step can use them if it needs them (without having to recalculate them) // char **band_names=NULL; if (strlen(mg->bands) && strncmp(mg->bands, MAGIC_UNSET_STRING, strlen(MAGIC_UNSET_STRING)) != 0) { band_names = extract_band_names(mg->bands, mg->band_count); } int bn; meta_out->stats = meta_statistics_init(num_bands); meta_statistics *mst = meta_out->stats; if (mst) { int ii; for (ii=0, bn=0; ii<num_bands; ii++) { if (!ignore[ii]) { if (band_names && band_names[bn] != NULL) { strcpy(mst->band_stats[bn].band_id, band_names[bn]); } else { sprintf(mst->band_stats[bn].band_id, "%02d", bn + 1); } mst->band_stats[bn].min = stats->band_stats[ii].min; mst->band_stats[bn].max = stats->band_stats[ii].max; mst->band_stats[bn].mean = stats->band_stats[ii].mean; mst->band_stats[bn].rmse = meta_is_valid_double(stats->band_stats[ii].rmse) ? stats->band_stats[ii].rmse : stats->band_stats[ii].std_deviation; mst->band_stats[bn].std_deviation = stats->band_stats[ii].std_deviation; mst->band_stats[bn].mask = mask_value; bn++; } } } else asfPrintError("Out of memory. Cannot allocate statistics struct.\n"); } // Calculate the center latitude and longitude now that the projection // parameters are stored. double center_latitude; double center_longitude; double dummy_var; meta_projection proj; // Copy all fields just in case of future code rearrangements... if (!is_usgs_seamless_geotiff) { copy_proj_parms (&proj, mp); proj_to_latlon(&proj,center_x, center_y, 0.0, &center_latitude, &center_longitude, &dummy_var); mg->center_latitude = R2D*center_latitude; mg->center_longitude = R2D*center_longitude; } else { mg->center_latitude = (height / 2.0 - raster_tp_y) * mp->perY + tp_lat; mg->center_longitude = (width / 2.0 - raster_tp_x) * mp->perX + tp_lon; mp->hem = (mg->center_latitude > 0.0) ? 'N' : 'S'; } // Set up the location block if (is_usgs_seamless_geotiff) { ml->lon_start_near_range = mp->startX; ml->lat_start_near_range = mp->startY; ml->lon_start_far_range = mp->startX + mp->perX * width; ml->lat_start_far_range = mp->startY; ml->lon_end_near_range = mp->startX; ml->lat_end_near_range = mp->startY + mp->perY * height; ml->lon_end_far_range = mp->startX + mp->perX * width; ml->lat_end_far_range = mp->startY + mp->perY * height; } else { double lat, lon; proj_to_latlon(&proj, mp->startX, mp->startY, 0.0, &lat, &lon, &dummy_var); ml->lat_start_near_range = R2D*lat; ml->lon_start_near_range = R2D*lon; proj_to_latlon(&proj, mp->startX + mp->perX * width, mp->startY, 0.0, &lat, &lon, &dummy_var); ml->lat_start_far_range = R2D*lat; ml->lon_start_far_range = R2D*lon; proj_to_latlon(&proj, mp->startX, mp->startY + mp->perY * height, 0.0, &lat, &lon, &dummy_var); ml->lat_end_near_range = R2D*lat; ml->lon_end_near_range = R2D*lon; proj_to_latlon(&proj, mp->startX + mp->perX * width, mp->startY + mp->perY * height, 0.0, &lat, &lon, &dummy_var); ml->lat_end_far_range = R2D*lat; ml->lon_end_far_range = R2D*lon; } // Clean up GTIFFree(input_gtif); XTIFFClose(input_tiff); if(stats)FREE(stats); FREE (tmp_citation); FREE (citation); FREE (tie_point); FREE (pixel_scale); for (band_num = 0; band_num < MAX_BANDS; band_num++) { FREE(bands[band_num]); } return meta_out; } // Checking routine for projection parameter input. void check_projection_parameters(meta_projection *mp) { project_parameters_t *pp = &mp->param; // FIXME: Hughes datum stuff commented out for now ...until Hughes is implemented in the trunk if (mp->datum == HUGHES_DATUM && mp->type != POLAR_STEREOGRAPHIC) { asfPrintError("Hughes ellipsoid is only supported for polar stereographic projections.\n"); } switch (mp->type) { case UNIVERSAL_TRANSVERSE_MERCATOR: // Tests for outside of allowed ranges errors: // // Valid UTM projections: // // WGS84 + zone 1 thru 60 + N or S hemisphere // NAD83 + zone 2 thru 23 + N hemisphere // NAD27 + zone 2 thru 22 + N hemisphere // if (!meta_is_valid_int(pp->utm.zone)) { asfPrintError("Invalid zone number found (%d).\n", pp->utm.zone); } if (!meta_is_valid_double(pp->utm.lat0) || pp->utm.lat0 != 0.0) { asfPrintWarning("Invalid Latitude of Origin found (%.4f).\n" "Setting Latitude of Origin to 0.0\n", pp->utm.lat0); pp->utm.lat0 = 0.0; } if (pp->utm.lon0 != utm_zone_to_central_meridian(pp->utm.zone)) { asfPrintWarning("Invalid Longitude of Origin (%.4f) found\n" "for the given zone (%d).\n" "Setting Longitude of Origin to %f for zone %d\n", utm_zone_to_central_meridian(pp->utm.zone), pp->utm.zone); pp->utm.lon0 = utm_zone_to_central_meridian(pp->utm.zone); } switch(mp->datum) { case NAD27_DATUM: if (pp->utm.zone < 2 || pp->utm.zone > 22) { asfPrintError("Zone '%d' outside the supported range (2 to 22) for NAD27...\n" " WGS 84, Zone 1 thru 60, Latitudes between -90 and +90\n" " NAD83, Zone 2 thru 23, Latitudes between 0 and +90\n" " NAD27, Zone 2 thru 22, Latitudes between 0 and +90\n\n", pp->utm.zone); } break; case NAD83_DATUM: if (pp->utm.zone < 2 || pp->utm.zone > 23) { asfPrintError("Zone '%d' outside the supported range (2 to 23) for NAD83...\n" " WGS 84, Zone 1 thru 60, Latitudes between -90 and +90\n" " NAD83, Zone 2 thru 23, Latitudes between 0 and +90\n" " NAD27, Zone 2 thru 22, Latitudes between 0 and +90\n\n", pp->utm.zone); } break; case WGS84_DATUM: if (pp->utm.zone < 1 || pp->utm.zone > 60) { asfPrintError("Zone '%d' outside the valid range of (1 to 60) for WGS-84\n", pp->utm.zone); } break; case ITRF97_DATUM: if (pp->utm.zone < 1 || pp->utm.zone > 60) { asfPrintError("Zone '%d' outside the valid range of (1 to 60) for ITRF-97\n", pp->utm.zone); } break; default: asfPrintError("Unrecognized or unsupported datum found in projection parameters.\n"); break; } if (!meta_is_valid_double(pp->utm.lon0) || pp->utm.lon0 < -180 || pp->utm.lon0 > 180) { asfPrintError("Longitude of Origin (%.4f) undefined or outside the defined range " "(-180 deg to 180 deg)\n", pp->utm.lon0); } if (!meta_is_valid_double(pp->utm.lat0) || pp->utm.lat0 != 0.0) { asfPrintError("Latitude of Origin (%.4f) undefined or invalid (should be 0.0)\n", pp->utm.lat0); } if (!meta_is_valid_double(pp->utm.scale_factor) || !FLOAT_EQUIVALENT(pp->utm.scale_factor, 0.9996)) { asfPrintError("Scale factor (%.4f) undefined or different from default value (0.9996)\n", pp->utm.scale_factor); } if (!meta_is_valid_double(pp->utm.false_easting) || !FLOAT_EQUIVALENT(pp->utm.false_easting, 500000)) { asfPrintError("False easting (%.1f) undefined or different from default value (500000)\n", pp->utm.false_easting); } if (mp->hem == 'N') { if (!meta_is_valid_double(pp->utm.false_northing) || !FLOAT_EQUIVALENT(pp->utm.false_northing, 0)) { asfPrintError("False northing (%.1f) undefined or different from default value (0)\n", pp->utm.false_northing); } } else { if (!meta_is_valid_double(pp->utm.false_northing) || !FLOAT_EQUIVALENT(pp->utm.false_northing, 10000000)) { asfPrintError("False northing (%.1f) undefined or different from default value (10000000)\n", pp->utm.false_northing); } } break; case POLAR_STEREOGRAPHIC: // Outside range tests if (!meta_is_valid_double(pp->ps.slat) || pp->ps.slat < -90 || pp->ps.slat > 90) { asfPrintError("Latitude of origin (%.4f) undefined or outside the defined range " "(-90 deg to 90 deg)\n", pp->ps.slat); } if (!meta_is_valid_double(pp->ps.slon) || pp->ps.slon < -180 || pp->ps.slon > 180) { asfPrintError("Central meridian (%.4f) undefined or outside the defined range " "(-180 deg to 180 deg)\n", pp->ps.slon); } // Distortion test - only areas with a latitude above 60 degrees North or // below -60 degrees South are permitted if (!meta_is_valid_int(pp->ps.is_north_pole) || (pp->ps.is_north_pole != 0 && pp->ps.is_north_pole != 1)) { asfPrintError("Invalid north pole flag (%s) found.\n", pp->ps.is_north_pole == 0 ? "SOUTH" : pp->ps.is_north_pole == 1 ? "NORTH" : "UNKNOWN"); } break; case ALBERS_EQUAL_AREA: // Outside range tests if (!meta_is_valid_double(pp->albers.std_parallel1) || pp->albers.std_parallel1 < -90 || pp->albers.std_parallel1 > 90) { asfPrintError("First standard parallel (%.4f) undefined or outside the defined range " "(-90 deg to 90 deg)\n", pp->albers.std_parallel1); } if (!meta_is_valid_double(pp->albers.std_parallel2) || pp->albers.std_parallel2 < -90 || pp->albers.std_parallel2 > 90) { asfPrintError("Second standard parallel (%.4f) undefined or outside the defined range " "(-90 deg to 90 deg)\n", pp->albers.std_parallel2); } if (!meta_is_valid_double(pp->albers.center_meridian) || pp->albers.center_meridian < -180 || pp->albers.center_meridian > 180) { asfPrintError("Central meridian (%.4f) undefined or outside the defined range " "(-180 deg to 180 deg)\n", pp->albers.center_meridian); } if (!meta_is_valid_double(pp->albers.orig_latitude) || pp->albers.orig_latitude < -90 || pp->albers.orig_latitude > 90) { asfPrintError("Latitude of origin (%.4f) undefined or outside the defined range " "(-90 deg to 90 deg)\n", pp->albers.orig_latitude); } break; case LAMBERT_CONFORMAL_CONIC: // Outside range tests if (!meta_is_valid_double(pp->lamcc.plat1) || pp->lamcc.plat1 < -90 || pp->lamcc.plat1 > 90) { asfPrintError("First standard parallel (%.4f) undefined or outside the defined range " "(-90 deg to 90 deg)\n", pp->lamcc.plat1); } if (!meta_is_valid_double(pp->lamcc.plat2) || pp->lamcc.plat2 < -90 || pp->lamcc.plat2 > 90) { asfPrintError("Second standard parallel '%.4f' outside the defined range " "(-90 deg to 90 deg)\n", pp->lamcc.plat2); } if (!meta_is_valid_double(pp->lamcc.lon0) || pp->lamcc.lon0 < -180 || pp->lamcc.lon0 > 180) { asfPrintError("Central meridian '%.4f' outside the defined range " "(-180 deg to 180 deg)\n", pp->lamcc.lon0); } if (!meta_is_valid_double(pp->lamcc.lat0) || pp->lamcc.lat0 < -90 || pp->lamcc.lat0 > 90) { asfPrintError("Latitude of origin '%.4f' outside the defined range " "(-90 deg to 90 deg)\n", pp->lamcc.lat0); } break; case LAMBERT_AZIMUTHAL_EQUAL_AREA: // Outside range tests if (!meta_is_valid_double(pp->lamaz.center_lon) || pp->lamaz.center_lon < -180 || pp->lamaz.center_lon > 180) { asfPrintError("Central meridian '%.4f' outside the defined range " "(-180 deg to 180 deg)\n", pp->lamaz.center_lon); } if (!meta_is_valid_double(pp->lamaz.center_lat) || pp->lamaz.center_lat < -90 || pp->lamaz.center_lat > 90) { asfPrintError("Latitude of origin '%.4f' outside the defined range " "(-90 deg to 90 deg)\n", pp->lamaz.center_lat); } break; case SINUSOIDAL: if (!meta_is_valid_double(pp->sin.longitude_center) || pp->sin.longitude_center < -180 || pp->sin.longitude_center > 180) { asfPrintError("Longitude center '%.4f' outside the defined range " "(-180 deg to 180 deg)\n", pp->lamaz.center_lon); } break; default: break; } } int band_float_image_write(FloatImage *oim, meta_parameters *omd, const char *outBaseName, int num_bands, int *ignore) { char *outName; int row, col, band, offset; float *buf; buf = (float*)MALLOC(sizeof(float)*omd->general->sample_count); outName = (char*)MALLOC(sizeof(char)*strlen(outBaseName) + 5); strcpy(outName, outBaseName); append_ext_if_needed(outName, ".img", ".img"); offset = omd->general->line_count; for (band=0; band < num_bands; band++) { if (num_bands > 1) { asfPrintStatus("Writing band %02d...\n", band+1); } else { asfPrintStatus("Writing binary image...\n"); } FILE *fp=(FILE*)FOPEN(outName, band > 0 ? "ab" : "wb"); if (fp == NULL) return 1; if (!ignore[band]) { for (row=0; row < omd->general->line_count; row++) { asfLineMeter(row, omd->general->line_count); for (col=0; col < omd->general->sample_count; col++) { buf[col] = float_image_get_pixel(oim, col, row+(offset*band)); } put_float_line(fp, omd, row, buf); } } else { asfPrintStatus(" Empty band found ...ignored\n"); } FCLOSE(fp); } FREE(buf); FREE(outName); return 0; } int band_byte_image_write(UInt8Image *oim_b, meta_parameters *omd, const char *outBaseName, int num_bands, int *ignore) { char *outName; int row, col, band, offset; float *buf; buf = (float*)MALLOC(sizeof(float)*omd->general->sample_count); outName = (char*)MALLOC(sizeof(char)*strlen(outBaseName) + 5); strcpy(outName, outBaseName); append_ext_if_needed(outName, ".img", ".img"); offset = omd->general->line_count; for (band=0; band < num_bands; band++) { if (num_bands > 1) { asfPrintStatus("Writing band %02d...\n", band+1); } else { asfPrintStatus("Writing binary image...\n"); } FILE *fp=(FILE*)FOPEN(outName, band > 0 ? "ab" : "wb"); if (fp == NULL) return 1; if (!ignore[band]) { for (row=0; row < omd->general->line_count; row++) { asfLineMeter(row, omd->general->line_count); for (col=0; col < omd->general->sample_count; col++) { int curr_row = row+(offset*band); buf[col] = (float)uint8_image_get_pixel(oim_b, col, curr_row); //row+(offset*band)); } put_float_line(fp, omd, row, buf); } } else { asfPrintStatus(" Empty band found ...ignored\n"); } FCLOSE(fp); } FREE(buf); FREE(outName); return 0; } int tiff_image_band_statistics (TIFF *tif, meta_parameters *omd, meta_stats *stats, int is_dem, int num_bands, int band_no, short bits_per_sample, short sample_format, short planar_config, int use_mask_value, double mask_value) { tiff_type_t tiffInfo; // Determine what type of TIFF this is (scanline/strip/tiled) get_tiff_type(tif, &tiffInfo); if (tiffInfo.imageCount > 1) { asfPrintWarning("Found multi-image TIFF file. Statistics will only be\n" "calculated from the bands in the first image in the file\n"); } if (tiffInfo.imageCount < 1) { asfPrintError ("TIFF file contains zero images\n"); } if (tiffInfo.format != SCANLINE_TIFF && tiffInfo.format != STRIP_TIFF && tiffInfo.format != TILED_TIFF) { asfPrintError("Unrecognized TIFF type\n"); } if (tiffInfo.volume_tiff) { asfPrintError("Multi-dimensional TIFF found ...only 2D TIFFs are supported.\n"); } // Minimum and maximum sample values as integers. double fmin = FLT_MAX; double fmax = -FLT_MAX; double cs=0.0; // Current sample value stats->mean = 0.0; double s = 0.0; uint32 scanlineSize = 0; uint32 sample_count = 0; // Samples considered so far. uint32 ii, jj; scanlineSize = TIFFScanlineSize(tif); if (scanlineSize <= 0) { return 1; } if (num_bands > 1 && planar_config != PLANARCONFIG_CONTIG && planar_config != PLANARCONFIG_SEPARATE) { return 1; } tdata_t *buf = _TIFFmalloc(scanlineSize); // If there is a mask value we are supposed to ignore, if ( use_mask_value ) { // iterate over all rows in the TIFF for ( ii = 0; ii < omd->general->line_count; ii++ ) { asfPercentMeter((double)ii/(double)omd->general->line_count); // Get a data line from the TIFF switch (tiffInfo.format) { case SCANLINE_TIFF: if (planar_config == PLANARCONFIG_CONTIG || num_bands == 1) { TIFFReadScanline(tif, buf, ii, 0); } else { // Planar configuration is band-sequential TIFFReadScanline(tif, buf, ii, band_no); } break; case STRIP_TIFF: ReadScanline_from_TIFF_Strip(tif, buf, ii, band_no); break; case TILED_TIFF: // if (planar_config == PLANARCONFIG_CONTIG || num_bands == 1) { // ReadScanline_from_TIFF_TileRow(tif, buf, ii, 0); // } // else { // Planar configuration is band-sequential ReadScanline_from_TIFF_TileRow(tif, buf, ii, band_no); // } break; default: asfPrintError("Invalid TIFF format found.\n"); break; } for (jj = 0 ; jj < omd->general->sample_count; jj++ ) { // iterate over each pixel sample in the scanline switch(bits_per_sample) { case 8: switch(sample_format) { case SAMPLEFORMAT_UINT: if (planar_config == PLANARCONFIG_CONTIG && num_bands > 1) { cs = (double)(((uint8*)(buf))[(jj*num_bands)+band_no]); // Current sample. } else { // Planar configuration is band-sequential or single-banded cs = (double)(((uint8*)(buf))[jj]); } break; case SAMPLEFORMAT_INT: if (planar_config == PLANARCONFIG_CONTIG && num_bands > 1) { cs = (double)(((int8*)(buf))[(jj*num_bands)+band_no]); // Current sample. } else { // Planar configuration is band-sequential or single-banded cs = (double)(((int8*)(buf))[jj]); // Current sample. } break; default: // There is no such thing as an IEEE 8-bit floating point asfPrintError("Unexpected data type in GeoTIFF ...Cannot calculate statistics.\n"); return 1; break; } if ( !isnan(mask_value) && (gsl_fcmp (cs, mask_value, 0.00000000001) == 0 ) ) { continue; } break; case 16: switch(sample_format) { case SAMPLEFORMAT_UINT: if (planar_config == PLANARCONFIG_CONTIG && num_bands > 1) { cs = (double)(((uint16*)(buf))[(jj*num_bands)+band_no]); // Current sample. } else { // Planar configuration is band-sequential or single-banded cs = (double)(((uint16*)(buf))[jj]); // Current sample. } break; case SAMPLEFORMAT_INT: if (planar_config == PLANARCONFIG_CONTIG && num_bands > 1) { cs = (double)(((int16*)(buf))[(jj*num_bands)+band_no]); // Current sample. } else { // Planar configuration is band-sequential or single-banded cs = (double)(((uint16*)(buf))[jj]); // Current sample. } break; default: // There is no such thing as an IEEE 16-bit floating point asfPrintError("Unexpected data type in TIFF/GeoTIFF ...Cannot calculate statistics.\n"); return 1; break; } if ( !isnan(mask_value) && (gsl_fcmp (cs, mask_value, 0.00000000001) == 0 ) ) { continue; } break; case 32: switch(sample_format) { case SAMPLEFORMAT_UINT: if (planar_config == PLANARCONFIG_CONTIG && num_bands > 1) { cs = (double)(((uint32*)(buf))[(jj*num_bands)+band_no]); // Current sample. } else { // Planar configuration is band-sequential or single-banded cs = (double)(((uint32*)(buf))[jj]); // Current sample. } break; case SAMPLEFORMAT_INT: if (planar_config == PLANARCONFIG_CONTIG && num_bands > 1) { cs = (double)(((long*)(buf))[(jj*num_bands)+band_no]); // Current sample. } else { // Planar configuration is band-sequential or single-banded cs = (double)(((long*)(buf))[jj]); // Current sample. } break; case SAMPLEFORMAT_IEEEFP: if (planar_config == PLANARCONFIG_CONTIG && num_bands > 1) { cs = (double)(((float*)(buf))[(jj*num_bands)+band_no]); // Current sample. } else { // Planar configuration is band-sequential or single-banded cs = (double)(((float*)(buf))[jj]); // Current sample. } if (is_dem && cs < -10e10) { // Bad value removal for DEMs (really an adjustment, not a removal) // -> This only applies to USGS Seamless DEMs and REAL32 data type <- cs = -999.0; } break; default: asfPrintError("Unexpected data type in GeoTIFF ...Cannot calculate statistics.\n"); return 1; break; } if ( !isnan(mask_value) && (gsl_fcmp (cs, mask_value, 0.00000000001) == 0 ) ) { continue; } break; } if ( G_UNLIKELY (cs < fmin) ) { fmin = cs; } if ( G_UNLIKELY (cs > fmax) ) { fmax = cs; } double old_mean = stats->mean; stats->mean += (cs - stats->mean) / (sample_count + 1); s += (cs - old_mean) * (cs - stats->mean); sample_count++; } } asfPercentMeter(1.0); } else { // There is no mask value to ignore, so we do the same as the // above loop, but without the possible continue statement. for ( ii = 0; ii < omd->general->line_count; ii++ ) { asfPercentMeter((double)ii/(double)omd->general->line_count); // Get a data line from the TIFF switch (tiffInfo.format) { case SCANLINE_TIFF: if (planar_config == PLANARCONFIG_CONTIG || num_bands == 1) { TIFFReadScanline(tif, buf, ii, 0); } else { // Planar configuration is band-sequential TIFFReadScanline(tif, buf, ii, band_no); } break; case STRIP_TIFF: ReadScanline_from_TIFF_Strip(tif, buf, ii, band_no); break; case TILED_TIFF: // Planar configuration is band-sequential ReadScanline_from_TIFF_TileRow(tif, buf, ii, band_no); break; default: asfPrintError("Invalid TIFF format found.\n"); break; } for (jj = 0 ; jj < omd->general->sample_count; jj++ ) { // iterate over each pixel sample in the scanline switch(bits_per_sample) { case 8: switch(sample_format) { case SAMPLEFORMAT_UINT: if (planar_config == PLANARCONFIG_CONTIG && num_bands > 1) { cs = (double)(((uint8*)(buf))[(jj*num_bands)+band_no]); // Current sample. } else { // Planar configuration is band-sequential or single-banded cs = (double)(((uint8*)(buf))[jj]); } break; case SAMPLEFORMAT_INT: if (planar_config == PLANARCONFIG_CONTIG && num_bands > 1) { cs = (double)(((int8*)(buf))[(jj*num_bands)+band_no]); // Current sample. } else { // Planar configuration is band-sequential or single-banded cs = (double)(((int8*)(buf))[jj]); // Current sample. } break; default: // There is no such thing as an IEEE 8-bit floating point asfPrintError("Unexpected data type in GeoTIFF ...Cannot calculate statistics.\n"); return 1; break; } break; case 16: switch(sample_format) { case SAMPLEFORMAT_UINT: if (planar_config == PLANARCONFIG_CONTIG && num_bands > 1) { cs = (double)(((uint16*)(buf))[(jj*num_bands)+band_no]); // Current sample. } else { // Planar configuration is band-sequential or single-banded cs = (double)(((uint16*)(buf))[jj]); // Current sample. } break; case SAMPLEFORMAT_INT: if (planar_config == PLANARCONFIG_CONTIG && num_bands > 1) { cs = (double)(((int16*)(buf))[(jj*num_bands)+band_no]); // Current sample. } else { // Planar configuration is band-sequential or single-banded cs = (double)(((uint16*)(buf))[jj]); // Current sample. } break; default: // There is no such thing as an IEEE 16-bit floating point asfPrintError("Unexpected data type in GeoTIFF ...Cannot calculate statistics.\n"); return 1; break; } break; case 32: switch(sample_format) { case SAMPLEFORMAT_UINT: if (planar_config == PLANARCONFIG_CONTIG && num_bands > 1) { cs = (double)(((uint32*)(buf))[(jj*num_bands)+band_no]); // Current sample. } else { // Planar configuration is band-sequential or single-banded cs = (double)(((uint32*)(buf))[jj]); // Current sample. } break; case SAMPLEFORMAT_INT: if (planar_config == PLANARCONFIG_CONTIG && num_bands > 1) { cs = (double)(((long*)(buf))[(jj*num_bands)+band_no]); // Current sample. } else { // Planar configuration is band-sequential or single-banded cs = (double)(((long*)(buf))[jj]); // Current sample. } break; case SAMPLEFORMAT_IEEEFP: if (planar_config == PLANARCONFIG_CONTIG && num_bands > 1) { cs = (double)(((float*)(buf))[(jj*num_bands)+band_no]); // Current sample. } else { // Planar configuration is band-sequential or single-banded cs = (double)(((float*)(buf))[jj]); // Current sample. } if (is_dem && cs < -10e10) { // Bad value removal for DEMs (really an adjustment, not a removal) // -> This only applies to USGS Seamless DEMs and REAL32 data type <- cs = -999.0; } break; default: asfPrintError("Unexpected data type in GeoTIFF ...Cannot calculate statistics.\n"); return 1; break; } break; default: asfPrintError("Unexpected data type in GeoTIFF ...Cannot calculate statistics.\n"); return 1; break; } if ( G_UNLIKELY (cs < fmin) ) { fmin = cs; } if ( G_UNLIKELY (cs > fmax) ) { fmax = cs; } double old_mean = stats->mean; stats->mean += (cs - stats->mean) / (sample_count + 1); s += (cs - old_mean) * (cs - stats->mean); sample_count++; } } asfPercentMeter(1.0); } if (buf) _TIFFfree(buf); // Verify the new extrema have been found. //if (fmin == FLT_MAX || fmax == -FLT_MAX) if (gsl_fcmp (fmin, FLT_MAX, 0.00000000001) == 0 || gsl_fcmp (fmax, -FLT_MAX, 0.00000000001) == 0) return 1; stats->min = fmin; stats->max = fmax; stats->std_deviation = sqrt (s / (sample_count - 1)); // The new extrema had better be in the range supported range if (fabs(stats->mean) > FLT_MAX || fabs(stats->std_deviation) > FLT_MAX) return 1; return 0; } int geotiff_band_image_write(TIFF *tif, meta_parameters *omd, const char *outBaseName, int num_bands, int *ignore, short bits_per_sample, short sample_format, short planar_config) { char *outName; int num_ignored; uint32 row, col, band; float *buf; tsize_t scanlineSize; // Determine what type of TIFF this is (scanline/strip/tiled) tiff_type_t tiffInfo; get_tiff_type(tif, &tiffInfo); if (tiffInfo.imageCount > 1) { asfPrintWarning("Found multi-image TIFF file. Only the first image in the file\n" "will be exported.\n"); } if (tiffInfo.imageCount < 1) { asfPrintError ("TIFF file contains zero images\n"); } if (tiffInfo.format != SCANLINE_TIFF && tiffInfo.format != STRIP_TIFF && tiffInfo.format != TILED_TIFF) { asfPrintError("Unrecognized TIFF type\n"); } if (tiffInfo.volume_tiff) { asfPrintError("Multi-dimensional TIFF found ...only 2D TIFFs are supported.\n"); } buf = (float*)MALLOC(sizeof(float)*omd->general->sample_count); outName = (char*)MALLOC(sizeof(char)*strlen(outBaseName) + 5); strcpy(outName, outBaseName); append_ext_if_needed(outName, ".img", ".img"); if (num_bands > 1 && planar_config != PLANARCONFIG_CONTIG && planar_config != PLANARCONFIG_SEPARATE) { asfPrintError("Unexpected planar configuration found in TIFF file\n"); } scanlineSize = TIFFScanlineSize(tif); if (scanlineSize <= 0) { return 1; } tdata_t *tif_buf = _TIFFmalloc(scanlineSize); if (!tif_buf) { asfPrintError("Cannot allocate buffer for reading TIFF lines\n"); } for (band=0, num_ignored=0; band < num_bands; band++) { if (num_bands > 1) { asfPrintStatus("\nWriting band %02d...\n", band+1); } else { asfPrintStatus("\nWriting binary image...\n"); } FILE *fp=(FILE*)FOPEN(outName, band > 0 ? "ab" : "wb"); if (fp == NULL) return 1; if (!ignore[band]) { for (row=0; row < omd->general->line_count; row++) { asfLineMeter(row, omd->general->line_count); switch (tiffInfo.format) { case SCANLINE_TIFF: if (planar_config == PLANARCONFIG_CONTIG || num_bands == 1) { TIFFReadScanline(tif, tif_buf, row, 0); } else { // Planar configuration is band-sequential TIFFReadScanline(tif, tif_buf, row, band); } break; case STRIP_TIFF: ReadScanline_from_TIFF_Strip(tif, tif_buf, row, band); break; case TILED_TIFF: // Planar configuration is band-sequential ReadScanline_from_TIFF_TileRow(tif, tif_buf, row, band); break; default: asfPrintError("Invalid TIFF format found.\n"); break; } for (col=0; col < omd->general->sample_count; col++) { switch (bits_per_sample) { case 8: switch(sample_format) { case SAMPLEFORMAT_UINT: ((float*)buf)[col] = (float)(((uint8*)tif_buf)[col]); break; case SAMPLEFORMAT_INT: ((float*)buf)[col] = (float)(((int8*)tif_buf)[col]); break; default: // No such thing as an 8-bit IEEE float asfPrintError("Unexpected data type in TIFF file ...cannot write ASF-internal\n" "format file.\n"); break; } break; case 16: switch(sample_format) { case SAMPLEFORMAT_UINT: ((float*)buf)[col] = (float)(((uint16*)tif_buf)[col]); break; case SAMPLEFORMAT_INT: ((float*)buf)[col] = (float)(((int16*)tif_buf)[col]); break; default: // No such thing as an 16-bit IEEE float asfPrintError("Unexpected data type in TIFF file ...cannot write ASF-internal\n" "format file.\n"); break; } break; case 32: switch(sample_format) { case SAMPLEFORMAT_UINT: ((float*)buf)[col] = (float)(((uint32*)tif_buf)[col]); break; case SAMPLEFORMAT_INT: ((float*)buf)[col] = (float)(((long*)tif_buf)[col]); break; case SAMPLEFORMAT_IEEEFP: ((float*)buf)[col] = (float)(((float*)tif_buf)[col]); break; default: asfPrintError("Unexpected data type in TIFF file ...cannot write ASF-internal\n" "format file.\n"); break; } break; default: asfPrintError("Unexpected data type in TIFF file ...cannot write ASF-internal\n" "format file.\n"); break; } } put_band_float_line(fp, omd, band - num_ignored, (int)row, buf); } } else { asfPrintStatus(" Empty band found ...ignored\n"); num_ignored++; } FCLOSE(fp); } FREE(buf); FREE(outName); if (tif_buf) _TIFFfree(tif_buf); return 0; } void ReadScanline_from_TIFF_Strip(TIFF *tif, tdata_t buf, unsigned long row, int band) { int read_count; tiff_type_t t; tdata_t sbuf=NULL; tstrip_t strip; uint32 strip_row; // The row within the strip that contains the requested data row if (tif == NULL) { asfPrintError("TIFF file not open for read\n"); } get_tiff_type(tif, &t); uint32 strip_size = TIFFStripSize(tif); sbuf = _TIFFmalloc(strip_size); short planar_config; // TIFFTAG_PLANARCONFIG read_count = TIFFGetField(tif, TIFFTAG_PLANARCONFIG, &planar_config); if (read_count < 1) { asfPrintError("Cannot determine planar configuration from TIFF file.\n"); } short samples_per_pixel; read_count = TIFFGetField(tif, TIFFTAG_SAMPLESPERPIXEL, &samples_per_pixel); // Number of bands if (read_count < 1) { asfPrintError("Could not read the number of samples per pixel from TIFF file.\n"); } if (band < 0 || band > samples_per_pixel - 1) { asfPrintError("Invalid band number (%d). Band number should range from %d to %d.\n", 0, samples_per_pixel - 1); } uint32 height; read_count = TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &height); // Number of rows if (read_count < 1) { asfPrintError("Could not read the number of lines from TIFF file.\n"); } uint32 width; read_count = TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &width); // Number of pixels per row if (read_count < 1) { asfPrintError("Could not read the number of pixels per line from TIFF file.\n"); } short bits_per_sample; read_count = TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bits_per_sample); if (read_count < 1) { asfPrintError("Could not read the bits per sample from TIFF file.\n"); } short sample_format; read_count = TIFFGetField(tif, TIFFTAG_SAMPLEFORMAT, &sample_format); // int or float, signed or unsigned if (read_count < 1) { switch(bits_per_sample) { case 8: sample_format = SAMPLEFORMAT_UINT; break; case 16: sample_format = SAMPLEFORMAT_INT; break; case 32: sample_format = SAMPLEFORMAT_IEEEFP; break; default: asfPrintError("Could not read the sample format (data type) from TIFF file.\n"); break; } } short orientation; read_count = TIFFGetField(tif, TIFFTAG_ORIENTATION, &orientation); // top-left, left-top, bot-right, etc if (read_count < 1) { orientation = ORIENTATION_TOPLEFT; read_count = 1; } if (read_count && orientation != ORIENTATION_TOPLEFT) { asfPrintError("Unsupported orientation found (%s)\n", orientation == ORIENTATION_TOPRIGHT ? "TOP RIGHT" : orientation == ORIENTATION_BOTRIGHT ? "BOTTOM RIGHT" : orientation == ORIENTATION_BOTLEFT ? "BOTTOM LEFT" : orientation == ORIENTATION_LEFTTOP ? "LEFT TOP" : orientation == ORIENTATION_RIGHTTOP ? "RIGHT TOP" : orientation == ORIENTATION_RIGHTBOT ? "RIGHT BOTTOM" : orientation == ORIENTATION_LEFTBOT ? "LEFT BOTTOM" : "UNKNOWN"); } // Check for valid row number if (row < 0 || row >= height) { asfPrintError("Invalid row number (%d) found. Valid range is 0 through %d\n", row, height - 1); } // Reading a contiguous RGB strip results in a strip (of several rows) with rgb data // in each row, but reading a strip from a file with separate color planes results in // a strip with just the one color in each strip (and row) strip = TIFFComputeStrip(tif, row, band); strip_row = row - (strip * t.rowsPerStrip); tsize_t stripSize = TIFFStripSize(tif); uint32 bytes_per_sample = (bits_per_sample / 8); // This returns a decoded strip which contains 1 or more rows. The index calculated // below needs to take the row into account ...the strip_row is the row within a strip // assuming the first row in a strip is '0'. tsize_t bytes_read = TIFFReadEncodedStrip(tif, strip, sbuf, (tsize_t) -1); if (read_count && bytes_read > 0) { uint32 col; uint32 idx = 0; for (col = 0; col < width && (idx * bytes_per_sample) < stripSize; col++) { // NOTE: t.scanlineSize is in bytes (not pixels) if (planar_config == PLANARCONFIG_SEPARATE) { idx = strip_row * (t.scanlineSize / bytes_per_sample) + col*samples_per_pixel; } else { // PLANARCONFIG_CONTIG idx = strip_row * (t.scanlineSize / bytes_per_sample) + col*samples_per_pixel + band; } if (idx * bytes_per_sample >= stripSize) continue; // Prevents over-run if last strip or scanline (within a strip) is not complete switch (bits_per_sample) { case 8: switch (sample_format) { case SAMPLEFORMAT_UINT: ((uint8*)buf)[col] = (uint8)(((uint8*)sbuf)[idx]); break; case SAMPLEFORMAT_INT: ((int8*)buf)[col] = (int8)(((int8*)sbuf)[idx]); break; default: asfPrintError("Unexpected data type in TIFF file\n"); break; } break; case 16: switch (sample_format) { case SAMPLEFORMAT_UINT: ((uint16*)buf)[col] = (uint16)(((uint16*)sbuf)[idx]); break; case SAMPLEFORMAT_INT: ((int16*)buf)[col] = (int16)(((int16*)sbuf)[idx]); break; default: asfPrintError("Unexpected data type in TIFF file\n"); break; } break; case 32: switch (sample_format) { case SAMPLEFORMAT_UINT: ((uint32*)buf)[col] = (uint32)(((uint32*)sbuf)[idx]); break; case SAMPLEFORMAT_INT: ((long*)buf)[col] = (long)(((long*)sbuf)[idx]); break; case SAMPLEFORMAT_IEEEFP: ((float*)buf)[col] = (float)(((float*)sbuf)[idx]); break; default: asfPrintError("Unexpected data type in TIFF file\n"); break; } break; default: asfPrintError("Usupported bits per sample found in TIFF file\n"); break; } } } if (sbuf) _TIFFfree(sbuf); } void ReadScanline_from_TIFF_TileRow(TIFF *tif, tdata_t buf, unsigned long row, int band) { int read_count; tiff_type_t t; tdata_t tbuf=NULL; if (tif == NULL) { asfPrintError("TIFF file not open for read\n"); } get_tiff_type(tif, &t); if (t.format != TILED_TIFF) { asfPrintError("Programmer error: ReadScanline_from_TIFF_TileRow() called when the TIFF file\n" "was not a tiled TIFF.\n"); } tsize_t tileSize = TIFFTileSize(tif); if (tileSize > 0) { tbuf = _TIFFmalloc(tileSize); if (tbuf == NULL) { asfPrintError("Unable to allocate tiled TIFF scanline buffer\n"); } } else { asfPrintError("Invalid TIFF tile size in tiled TIFF.\n"); } short planar_config; // TIFFTAG_PLANARCONFIG read_count = TIFFGetField(tif, TIFFTAG_PLANARCONFIG, &planar_config); if (read_count < 1) { asfPrintError("Cannot determine planar configuration from TIFF file.\n"); } short samples_per_pixel; read_count = TIFFGetField(tif, TIFFTAG_SAMPLESPERPIXEL, &samples_per_pixel); // Number of bands if (read_count < 1) { asfPrintError("Could not read the number of samples per pixel from TIFF file.\n"); } if (band < 0 || band > samples_per_pixel - 1) { asfPrintError("Invalid band number (%d). Band number should range from %d to %d.\n", 0, samples_per_pixel - 1); } uint32 height; read_count = TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &height); // Number of bands if (read_count < 1) { asfPrintError("Could not read the number of lines from TIFF file.\n"); } uint32 width; read_count = TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &width); // Number of bands if (read_count < 1) { asfPrintError("Could not read the number of pixels per line from TIFF file.\n"); } short bits_per_sample; read_count = TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bits_per_sample); // Number of bands if (read_count < 1) { asfPrintError("Could not read the bits per sample from TIFF file.\n"); } short sample_format; read_count = TIFFGetField(tif, TIFFTAG_SAMPLEFORMAT, &sample_format); // Number of bands if (read_count < 1) { switch(bits_per_sample) { case 8: sample_format = SAMPLEFORMAT_UINT; break; case 16: sample_format = SAMPLEFORMAT_INT; break; case 32: sample_format = SAMPLEFORMAT_IEEEFP; break; default: asfPrintError("Could not read the sample format (data type) from TIFF file.\n"); break; } } short orientation; read_count = TIFFGetField(tif, TIFFTAG_ORIENTATION, &orientation); // top-left, left-top, bot-right, etc if (read_count < 1) { orientation = ORIENTATION_TOPLEFT; } if (read_count && orientation != ORIENTATION_TOPLEFT) { asfPrintError("Unsupported orientation found (%s)\n", orientation == ORIENTATION_TOPRIGHT ? "TOP RIGHT" : orientation == ORIENTATION_BOTRIGHT ? "BOTTOM RIGHT" : orientation == ORIENTATION_BOTLEFT ? "BOTTOM LEFT" : orientation == ORIENTATION_LEFTTOP ? "LEFT TOP" : orientation == ORIENTATION_RIGHTTOP ? "RIGHT TOP" : orientation == ORIENTATION_RIGHTBOT ? "RIGHT BOTTOM" : orientation == ORIENTATION_LEFTBOT ? "LEFT BOTTOM" : "UNKNOWN"); } // Check for valid row number if (row < 0 || row >= height) { asfPrintError("Invalid row number (%d) found. Valid range is 0 through %d\n", row, height - 1); } // Develop a buffer with a line of data from a single band in it // ttile_t tile; uint32 bytes_per_sample = bits_per_sample / 8; uint32 row_in_tile; if (width > 0 && height > 0 && samples_per_pixel > 0 && bits_per_sample % 8 == 0 && t.tileWidth > 0 && t.tileLength > 0) { uint32 tile_col; uint32 buf_col; uint32 bytes_read; for (tile_col = 0, buf_col = 0; tile_col < width; tile_col += t.tileWidth) { // NOTE: t.tileLength and t.tileWidth are in pixels (not bytes) // NOTE: TIFFReadTile() is a wrapper over TIFFComputeTile() and // TIFFReadEncodedTile() ...in other words, it automatically // takes into account whether the file has contigious (interlaced) // color bands or separate color planes, and automagically // decompresses the tile during the read. The return below, // is an uncompressed tile in raster format (row-order 2D array // in memory.) bytes_read = TIFFReadTile(tif, tbuf, tile_col, row, 0, band); uint32 num_preceding_tile_rows = floor(row / t.tileLength); row_in_tile = row - (num_preceding_tile_rows * t.tileLength); uint32 i; uint32 idx = 0; for (i = 0; i < t.tileWidth && buf_col < width && (idx * bytes_per_sample) < tileSize; i++) { if (planar_config == PLANARCONFIG_SEPARATE) { idx = row_in_tile * t.tileWidth + i; } else { // PLANARCONFIG_CONTIG idx = row_in_tile * (t.tileWidth * samples_per_pixel) + i * samples_per_pixel + band; } switch (bits_per_sample) { case 8: switch (sample_format) { case SAMPLEFORMAT_UINT: ((uint8*)buf)[buf_col] = ((uint8*)tbuf)[idx]; buf_col++; break; case SAMPLEFORMAT_INT: ((int8*)buf)[buf_col] = ((int8*)tbuf)[idx]; buf_col++; break; default: asfPrintError("Unexpected data type in TIFF file\n"); break; } break; case 16: switch (sample_format) { case SAMPLEFORMAT_UINT: ((uint16*)buf)[buf_col] = ((uint16*)tbuf)[idx]; buf_col++; break; case SAMPLEFORMAT_INT: ((int16*)buf)[buf_col] = ((int16*)tbuf)[idx]; buf_col++; break; default: asfPrintError("Unexpected data type in TIFF file\n"); break; } break; case 32: switch (sample_format) { case SAMPLEFORMAT_UINT: ((uint32*)buf)[buf_col] = ((uint32*)tbuf)[idx]; buf_col++; break; case SAMPLEFORMAT_INT: ((int32*)buf)[buf_col] = ((int32*)tbuf)[idx]; buf_col++; break; case SAMPLEFORMAT_IEEEFP: ((float*)buf)[buf_col] = ((float*)tbuf)[idx]; buf_col++; break; default: asfPrintError("Unexpected data type in TIFF file\n"); break; } break; default: asfPrintError("Usupported bits per sample found in TIFF file\n"); break; } } } } if (tbuf) _TIFFfree(tbuf); } int check_for_vintage_asf_utm_geotiff(const char *citation, int *geotiff_data_exists, short *model_type, short *raster_type, short *linear_units) { int ret=0; int zone=0, is_utm=0; datum_type_t datum=UNKNOWN_DATUM; char hem='\0'; if (citation && strstr(citation, "Alaska Satellite Facility")) { short pcs=0; is_utm = vintage_utm_citation_to_pcs(citation, &zone, &hem, &datum, &pcs); } if (is_utm && zone >=1 && zone <= 60 && (hem == 'N' || hem == 'S') && datum == WGS84_DATUM) { *model_type = ModelTypeProjected; *raster_type = RasterPixelIsArea; *linear_units = Linear_Meter; *geotiff_data_exists = 1; ret = 3; // As though the three geokeys were read successfully } return ret; } // Copied from libasf_import:keys.c ...Didn't want to introduce a dependency on // the export library or vice versa static int UTM_2_PCS(short *pcs, datum_type_t datum, unsigned long zone, char hem) { // The GeoTIFF standard defines the UTM zones numerically in a way that // let's us pick off the data mathematically (NNNzz where zz is the zone // number): // // For NAD83 datums, Zones 3N through 23N, NNN == 269 // For NAD27 datums, Zones 3N through 22N, NNN == 267 // For WGS72 datums, Zones 1N through 60N, NNN == 322 // For WGS72 datums, Zones 1S through 60S, NNN == 323 // For WGS84 datums, Zones 1N through 60N, NNN == 326 // For WGS84 datums, Zones 1S through 60S, NNN == 327 // For user-defined and unsupported UTM projections, NNN can be // a variety of other numbers (see the GeoTIFF Standard) // // NOTE: For NAD27 and NAD83, only the restricted range of zones // above is supported by the GeoTIFF standard. // // NOTE: For ALOS's ITRF97 datum, note that it is based on // WGS84 and subsituting WGS84 for ITRF97 because the GeoTIFF // standard does not contain a PCS for ITRF97 (or any ITRFxx) // will result in errors of less than one meter. So when // writing GeoTIFFs, we choose to use WGS84 when ITRF97 is // desired. // const short NNN_NAD27N = 267; const short NNN_NAD83N = 269; //const short NNN_WGS72N = 322; // Currently unsupported //const short NNN_WGS72S = 323; // Currently unsupported const short NNN_WGS84N = 326; const short NNN_WGS84S = 327; char uc_hem; int supportedUTM; int valid_Zone_and_Datum_and_Hemisphere; // Substitute WGS84 for ITRF97 per comment above if (datum == ITRF97_DATUM) { datum = WGS84_DATUM; } // Check for valid datum, hemisphere, and zone combination uc_hem = toupper(hem); valid_Zone_and_Datum_and_Hemisphere = ( (datum == NAD27_DATUM && uc_hem == 'N' && zone >= 3 && zone <= 22) || (datum == NAD83_DATUM && uc_hem == 'N' && zone >= 3 && zone <= 23) || (datum == WGS84_DATUM && zone >= 1 && zone <= 60) ) ? 1 : 0; // Build the key for ProjectedCSTypeGeoKey, GCS_WGS84 etc if (valid_Zone_and_Datum_and_Hemisphere) { supportedUTM = 1; switch (datum) { case NAD27_DATUM: *pcs = (short)zone + NNN_NAD27N * 100; break; case NAD83_DATUM: *pcs = (short)zone + NNN_NAD83N * 100; break; case WGS84_DATUM: if (uc_hem == 'N') { *pcs = (short)zone + NNN_WGS84N * 100; } else { *pcs = (short)zone + NNN_WGS84S * 100; } break; default: supportedUTM = 0; *pcs = 0; break; } } else { supportedUTM = 0; *pcs = 0; } return supportedUTM; } // If the UTM description is in citation, then pick the data out and return it int vintage_utm_citation_to_pcs(const char *citation, int *zone, char *hem, datum_type_t *datum, short *pcs) { int is_utm=0; int found_zone=0, found_utm=0; *zone=0; *hem='\0'; *datum=UNKNOWN_DATUM; *pcs=0; if (citation && strstr(citation, "Alaska Satellite Facility")) { char *s = STRDUP(citation); char *tokp; tokp = strtok(s, " "); do { if (strncmp(uc(tokp),"UTM",3) == 0) { found_utm = 1; found_zone=0; } else if (strncmp(uc(tokp),"ZONE",1) == 0) { if (*zone == 0) found_zone=1; } else if (found_zone && isdigit((int)*tokp)) { *zone = (int)strtol(tokp,(char**)NULL,10); found_zone=0; } else if (strlen(tokp) == 1 && (*(uc(tokp)) == 'N' || *(uc(tokp)) == 'S')) { *hem = *(uc(tokp)) == 'N' ? 'N' : 'S'; found_zone=0; } else if (strncmp(uc(tokp),"WGS84",5) == 0) { *datum = WGS84_DATUM; found_zone=0; } } while (tokp && (tokp = strtok(NULL, " "))); if (s) FREE (s); } if (found_utm && *zone >=1 && *zone <= 60 && (*hem == 'N' || *hem == 'S') && *datum == WGS84_DATUM) { is_utm=1; UTM_2_PCS(pcs, *datum, *zone, *hem); } else { is_utm = 0; *zone=0; *hem='\0'; *datum=UNKNOWN_DATUM; *pcs=0; } return is_utm; } void classify_geotiff(GTIF *input_gtif, short *model_type, short *raster_type, short *linear_units, short *angular_units, int *geographic_geotiff, int *geocentric_geotiff, int *map_projected_geotiff, int *geotiff_data_exists) { int read_count, vintage_asf_utm; char *citation = NULL; int citation_length; int typeSize; tagtype_t citation_type; ////// Defaults ////// *model_type = *raster_type = *linear_units = *angular_units = -1; // Invalid value *geographic_geotiff = *geocentric_geotiff = *map_projected_geotiff = *geotiff_data_exists = 0; // Fails //////////////////////////////////////////////////////////////////////////////////////// // Check for a vintage ASF type of geotiff (all projection info is in the citation, and the // normal projection geokeys are left unpopulated) citation_length = GTIFKeyInfo(input_gtif, GTCitationGeoKey, &typeSize, &citation_type); if (citation_length > 0) { citation = MALLOC ((citation_length) * typeSize); GTIFKeyGet (input_gtif, GTCitationGeoKey, citation, 0, citation_length); } else { citation_length = GTIFKeyInfo(input_gtif, PCSCitationGeoKey, &typeSize, &citation_type); if (citation_length > 0) { citation = MALLOC ((citation_length) * typeSize); GTIFKeyGet (input_gtif, PCSCitationGeoKey, citation, 0, citation_length); } } if (citation != NULL && strlen(citation) > 0) { vintage_asf_utm = check_for_vintage_asf_utm_geotiff(citation, geotiff_data_exists, model_type, raster_type, linear_units); if (vintage_asf_utm) { // Found a vintage ASF UTM geotiff *geographic_geotiff = *geocentric_geotiff = 0; *map_projected_geotiff = *geotiff_data_exists = 1; return; } } FREE(citation); //////////////////////////////////////////////////////////////////////////////////////// // Check for other types of geotiffs... // // Read the basic (normally required) classification parameters ...bail if we hit any // unsupported types int m = 0, r = 0, l = 0, a = 0; m = GTIFKeyGet (input_gtif, GTModelTypeGeoKey, model_type, 0, 1); if (m && *model_type == ModelTypeGeocentric) { asfPrintError("Geocentric (x, y, z) GeoTIFFs are unsupported (so far.)\n"); } if (m && *model_type != ModelTypeProjected && *model_type != ModelTypeGeographic) { asfPrintError("Unrecognized type of GeoTIFF encountered. Must be map-projected\n" "or geogaphic (lat/long)\n"); } r = GTIFKeyGet (input_gtif, GTRasterTypeGeoKey, raster_type, 0, 0); if (r && *raster_type != RasterPixelIsArea) { asfPrintWarning("GeoTIFFs with 'point' type raster pixels are unsupported (so far.)\nContinuing, however geolocations may be off by up to a pixel.\n"); } if (m && *model_type == ModelTypeProjected) { l = GTIFKeyGet (input_gtif, ProjLinearUnitsGeoKey, linear_units, 0, 1); } if (m && *model_type == ModelTypeGeographic) { a = GTIFKeyGet (input_gtif, GeogAngularUnitsGeoKey, angular_units, 0, 1); } if (a && *angular_units != Angular_Arc_Second && *angular_units != Angular_Degree) { // Temporarily choose not to support arcsec geotiffs ...needs more testing asfPrintError("Found a Geographic (lat/lon) GeoTIFF with an unsupported type of angular\n" "units (%s) in it.\n", angular_units_to_string(*angular_units)); } if (l && (*linear_units != Linear_Meter && *linear_units != Linear_Foot && *linear_units != Linear_Foot_US_Survey && *linear_units != Linear_Foot_Modified_American && *linear_units != Linear_Foot_Clarke && *linear_units != Linear_Foot_Indian)) { // Linear units was populated but wasn't a supported type... asfPrintError("Found a map-projected GeoTIFF with an unsupported type of linear\n" "units (%s) in it.\n", linear_units_to_string(*linear_units)); } read_count = m + r + l + a; ////////////////////////////////////////////////////////////////////////////////////////// // Attempt to classify the geotiff as a geographic, geocentric, or map-projected geotiff // and try to fill in missing information if necessary // Case: 3 valid keys found if (read_count == 3) { // Check for map-projected geotiff if (m && *model_type == ModelTypeProjected) { //////// // GeoTIFF is map-projected if (!r || (r && *raster_type != RasterPixelIsArea && *raster_type != RasterPixelIsPoint)) { asfPrintWarning("Invalid raster type found.\n" "Guessing RasterPixelIsArea and continuing...\n"); r = 1; *raster_type = RasterPixelIsArea; } if (r && *raster_type != RasterPixelIsArea) { asfPrintError("Only map-projected GeoTIFFs with pixels that represent area are supported."); } if (a && !l) { asfPrintWarning("Invalid map-projected GeoTIFF found ...angular units set to %s and\n" "linear units were not set. Guessing Linear_Meter units and continuing...\n", angular_units_to_string(*angular_units)); l = 1; *linear_units = Linear_Meter; a = 0; *angular_units = -1; } if (l && (*linear_units == Linear_Meter || *linear_units == Linear_Foot || *linear_units == Linear_Foot_US_Survey || *linear_units == Linear_Foot_Modified_American || *linear_units == Linear_Foot_Clarke || *linear_units == Linear_Foot_Indian)) { *geographic_geotiff = *geocentric_geotiff = 0; *map_projected_geotiff = *geotiff_data_exists = 1; return; } else { asfPrintError("Only map-projected GeoTIFFs with linear meters or with a linear foot unit are supported.\n"); } } else if (m && *model_type == ModelTypeGeographic) { //////// // GeoTIFF is geographic (lat/long, degrees or arc-seconds (typ)) // Ignore *raster_type ...it might be set to 'area', but that would be meaningless // *raster_type has no meaning in a lat/long GeoTIFF if (l && !a) { asfPrintWarning("Invalid Geographic (lat/lon) GeoTIFF found ...linear units set to %s and\n" "angular units were not set. Guessing Angular_Degree units and continuing...\n", linear_units_to_string(*linear_units)); a = 1; *angular_units = Angular_Degree; l = 0; *linear_units = -1; } if (a && (*angular_units == Angular_Degree || *angular_units == Angular_Arc_Second)) { *geographic_geotiff = *geotiff_data_exists = 1; *map_projected_geotiff = *geocentric_geotiff = 0; return; } else { asfPrintError("Found Geographic GeoTIFF with invalid or unsupported angular units (%s)\n" "Only geographic GeoTIFFs with angular degrees are supported.\n", angular_units_to_string(*angular_units)); } } else { // Should not get here asfPrintError("Invalid or unsupported model type\n"); } } // Case: 2 valid keys found, 1 key missing else if (read_count == 2) { // Only found 2 of 3 necessary parameters ...let's try to guess the 3rd if (*model_type != ModelTypeProjected && *model_type != ModelTypeGeographic) { // The model type is unknown, raster_type and linear_units are both known and // valid for their types if (*raster_type == RasterPixelIsArea && *linear_units == Linear_Meter) { // Guess map-projected asfPrintWarning("Missing model type definition in GeoTIFF. GeoTIFF contains area-type\n" "pixels and linear meters ...guessing the GeoTIFF is map-projected and\n" "attempting to continue...\n"); *model_type = ModelTypeProjected; *geographic_geotiff = *geocentric_geotiff = 0; *map_projected_geotiff = *geotiff_data_exists = 1; return; } else if (*angular_units == Angular_Degree || *angular_units == Angular_Arc_Second) { // Guess geographic asfPrintWarning("Missing model type definition in GeoTIFF. GeoTIFF contains angular\n" "units ...guessing the GeoTIFF is geographic (lat/long) and\n" "attempting to continue...\n"); *model_type = ModelTypeGeographic; *geographic_geotiff = *geotiff_data_exists = 1; *map_projected_geotiff = *geocentric_geotiff = 0; return; } else { asfPrintError("Found unsupported type of GeoTIFF or a GeoTIFF with too many missing keys.\n"); } } // End of guessing because the ModelType was unknown - Check unknown raster_type case else if (*raster_type != RasterPixelIsArea && *raster_type != RasterPixelIsPoint) { // Raster type is missing ...let's take a guess. Model type and linear // units are both known and valid for their types if (*model_type == ModelTypeProjected) { if (*linear_units != Linear_Meter) { asfPrintError("Only meters are supported for map-projected GeoTIFFs\n"); } // Guess pixel type is area asfPrintWarning("Missing raster type in GeoTIFF, but since the GeoTIFF is map-projected,\n" "guessing RasterPixelIsArea and attempting to continue...\n"); *raster_type = RasterPixelIsArea; *geographic_geotiff = *geocentric_geotiff = 0; *map_projected_geotiff = *geotiff_data_exists = 1; return; } else if (*model_type == ModelTypeGeographic) { // Guess pixel type is area if (*angular_units != Angular_Degree && *angular_units != Angular_Arc_Second) { asfPrintError("Only angular degrees are supported for geographic GeoTIFFs\n"); } *geographic_geotiff = *geotiff_data_exists = 1; *map_projected_geotiff = *geocentric_geotiff = 0; return; } else { asfPrintError("Found geocentric (x, y, z) type of GeoTIFF ...currently unsupported.\n"); } } // End of guessing because the RasterType was unknown else if (*linear_units != Linear_Meter && *angular_units != Angular_Degree && *angular_units != Angular_Arc_Second) { // Pixel unit type is missing ...let's take a guess. Model type and raster type are // known and valid for their types if (*model_type == ModelTypeProjected) { if (*raster_type != RasterPixelIsArea) { asfPrintError("Map projected GeoTIFFs with pixels that represent something\n" "other than area (meters etc) are not supported.\n"); } // Looks like a valid map projection. Guess linear meters for the units asfPrintWarning("Missing linear units in GeoTIFF. The GeoTIFF is map-projected and\n" "pixels represent area. Guessing linear meters for the units and attempting\n" "to continue...\n"); *linear_units = Linear_Meter; *angular_units = -1; *geographic_geotiff = *geocentric_geotiff = 0; *map_projected_geotiff = *geotiff_data_exists = 1; return; } else if (*model_type == ModelTypeGeographic) { // Looks like a valid geographic (lat/long) geotiff asfPrintWarning("Found geographic type GeoTIFF with missing linear units setting.\n" "Guessing angular degrees and attempting to continue...\n"); *angular_units = Angular_Degree; *linear_units = -1; *geographic_geotiff = *geotiff_data_exists = 1; *map_projected_geotiff = *geocentric_geotiff = 0; return; } else { asfPrintError("Found geocentric (x, y, z) GeoTIFF... Geographic GeoTIFFs are\n" "unsupported at this time.\n"); } } } // Case: 1 valid key found, 2 keys missing else if (read_count == 1) { // Only found 1 of 3 necessary parameters ...let's try to guess the other 2 (dangerous ground!) if (*model_type == ModelTypeProjected) { // Only the model type is known ...guess the rest asfPrintWarning("Both the raster type and linear units is missing in the GeoTIFF. The model\n" "type is map-projected, so guessing that the raster type is RasterPixelIsArea and\n" "that the linear units are in meters ...attempting to continue\n"); *raster_type = RasterPixelIsArea; *linear_units = Linear_Meter; *angular_units = -1; *geographic_geotiff = *geocentric_geotiff = 0; *map_projected_geotiff = *geotiff_data_exists = 1; return; } else if (*model_type == ModelTypeGeographic) { // Only the model type is known ...guess the rest asfPrintWarning("Both the raster type and linear units is missing in the GeoTIFF. The model\n" "type is geographic (lat/long), so guessing that the angular units are in decimal\n" "degrees ...attempting to continue\n"); *angular_units = Angular_Degree; *linear_units = -1; *geographic_geotiff = *geotiff_data_exists = 1; *map_projected_geotiff = *geocentric_geotiff = 0; return; } else if (*model_type == ModelTypeGeocentric) { asfPrintError("Geocentric (x, y, z) GeoTIFFs are not supported (yet.)\n"); } else if (*raster_type == RasterPixelIsArea) { // Only the raster type is known ...guess the rest asfPrintWarning("Both the model type and linear units is missing in the GeoTIFF. The raster\n" "type is RasterPixelIsArea, so guessing that the model type is map-projected and\n" "that the linear units are in meters ...attempting to continue\n"); *model_type = ModelTypeProjected; *linear_units = Linear_Meter; *angular_units = -1; *geographic_geotiff = *geocentric_geotiff = 0; *map_projected_geotiff = *geotiff_data_exists = 1; return; } else if (*raster_type == RasterPixelIsPoint) { // Only the raster type is known, but cannot guess the rest... bail. asfPrintError("Found invalid or unsupported GeoTIFF. Raster type is 'point' rather than\n" "area. The model type (map projected, geographic, geocentric) is unknown.\n" "And the linear units are unknown. Cannot guess what type of GeoTIFF this\n" "is. Aborting.\n"); } else if (*linear_units == Linear_Meter) { // Only linear units is known and it's meters. Guess map projected and pixels are // area pixels. asfPrintWarning("Found GeoTIFF with undefined model and raster type. Linear units\n" "is defined to be meters. Guessing that the GeoTIFF is map-projected and\n" "that pixels represent area. Attempting to continue...\n"); *model_type = ModelTypeProjected; *raster_type = RasterPixelIsArea; *geographic_geotiff = *geocentric_geotiff = 0; *map_projected_geotiff = *geotiff_data_exists = 1; return; } else if (*angular_units == Angular_Degree) { // Only linear units is known and it's angular degrees. Guess geographic and pixels // type is 'who cares' asfPrintWarning("Found GeoTIFF with undefined model and raster type. Linear units\n" "is defined to be angular degrees. Guessing that the GeoTIFF is geographic.\n" "Attempting to continue...\n"); *model_type = ModelTypeGeographic; *geographic_geotiff = *geotiff_data_exists = 1; *map_projected_geotiff = *geocentric_geotiff = 0; return; } else if (*angular_units == Angular_Arc_Second) { // Only linear units is known and it's angular degrees. Guess geographic and pixels // type is 'who cares' asfPrintWarning("Found GeoTIFF with undefined model and raster type. Linear units\n" "is defined to be angular degrees. Guessing that the GeoTIFF is geographic.\n" "Attempting to continue...\n"); *model_type = ModelTypeGeographic; *linear_units = -1; *geographic_geotiff = *geotiff_data_exists = 1; *map_projected_geotiff = *geocentric_geotiff = 0; return; } else { asfPrintError("Found unsupported or invalid GeoTIFF. Model type and raster type\n" "is undefined, and linear units are either undefined or of an unsupported\n" "type. Aborting...\n"); } } // Case: No valid keys found else { // All classification parameters are missing! *geographic_geotiff = *geocentric_geotiff = 0; *map_projected_geotiff = *geotiff_data_exists = 0; return; } } int check_for_datum_in_string(const char *citation, datum_type_t *datum) { int ret = 0; // not found return ret; } int check_for_ellipse_definition_in_geotiff(GTIF *input_gtif, spheroid_type_t *spheroid) { int ret = 0; // failure return ret; } char *angular_units_to_string(short angular_units) { return (angular_units == Angular_Radian) ? "Angular_Radian" : (angular_units == Angular_Degree) ? "Angular_Degree" : (angular_units == Angular_Arc_Minute) ? "Angular_Arc_Minute" : (angular_units == Angular_Arc_Second) ? "Angular_Arc_Second" : (angular_units == Angular_Grad) ? "Angular_Grad" : (angular_units == Angular_Gon) ? "Angular_Gon" : (angular_units == Angular_DMS) ? "Angular_DMS" : (angular_units == Angular_DMS_Hemisphere) ? "Angular_DMS_Hemisphere" : "Unrecognized unit"; } char *linear_units_to_string(short linear_units) { return (linear_units == Linear_Foot) ? "Linear_Foot" : (linear_units == Linear_Foot_US_Survey) ? "Linear_Foot_US_Survey" : (linear_units == Linear_Foot_Modified_American) ? "Linear_Foot_Modified_American" : (linear_units == Linear_Foot_Clarke) ? "Linear_Foot_Clarke" : (linear_units == Linear_Foot_Indian) ? "Linear_Foot_Indian" : (linear_units == Linear_Link) ? "Linear_Link" : (linear_units == Linear_Link_Benoit) ? "Linear_Link_Benoit" : (linear_units == Linear_Link_Sears) ? "Linear_Link_Sears" : (linear_units == Linear_Chain_Benoit) ? "Linear_Chain_Benoit" : (linear_units == Linear_Chain_Sears) ? "Linear_Chain_Sears" : (linear_units == Linear_Yard_Sears) ? "Linear_Yard_Sears" : (linear_units == Linear_Yard_Indian) ? "Linear_Yard_Indian" : (linear_units == Linear_Fathom) ? "Linear_Fathom" : (linear_units == Linear_Mile_International_Nautical) ? "Linear_Mile_International_Nautical" : "Unrecognized unit"; } void get_look_up_table_name(char *citation, char **look_up_table) { *look_up_table = (char *)MALLOC(256 * sizeof(char)); strcpy(*look_up_table, "UNKNOWN"); }
{ "alphanum_fraction": 0.5794700347, "avg_line_length": 42.9460249934, "ext": "c", "hexsha": "004e54c3fad96da70085d111d6a24d639c924375", "lang": "C", "max_forks_count": 7, "max_forks_repo_forks_event_max_datetime": "2020-05-15T08:01:09.000Z", "max_forks_repo_forks_event_min_datetime": "2017-04-26T18:18:33.000Z", "max_forks_repo_head_hexsha": "c9065400a64c87be46418ab32e3a251ca2f55fd5", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "glshort/MapReady", "max_forks_repo_path": "src/libasf_import/import_generic_geotiff.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "c9065400a64c87be46418ab32e3a251ca2f55fd5", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "glshort/MapReady", "max_issues_repo_path": "src/libasf_import/import_generic_geotiff.c", "max_line_length": 159, "max_stars_count": 3, "max_stars_repo_head_hexsha": "c9065400a64c87be46418ab32e3a251ca2f55fd5", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "glshort/MapReady", "max_stars_repo_path": "src/libasf_import/import_generic_geotiff.c", "max_stars_repo_stars_event_max_datetime": "2021-07-28T01:51:22.000Z", "max_stars_repo_stars_event_min_datetime": "2017-12-31T05:33:28.000Z", "num_tokens": 41451, "size": 161520 }
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <assert.h> #include <gsl/gsl_math.h> #include <gsl/gsl_integration.h> #include <gsl/gsl_spline.h> #include <gsl/gsl_sort.h> #include <gsl/gsl_roots.h> #include <gsl/gsl_sf_bessel.h> #include "cosmocalc.h" #include "weaklens.h" double chiLim; static void comp_lens_power_spectrum(lensPowerSpectra lps); double lens_power_spectrum(double ell, lensPowerSpectra lps) { if(lps->initFlag == 1 || lps->currCosmoNum != cosmoData.cosmoNum || lps->currWLNum != wlData.wlNum) { lps->initFlag = 0; lps->currCosmoNum = cosmoData.cosmoNum; lps->currWLNum = wlData.wlNum; comp_lens_power_spectrum(lps); } return exp(gsl_spline_eval(lps->spline,log(ell),lps->accel)); } double nonlinear_powspec_for_lens(double k, double a) { return nonlinear_powspec(k,a); } static double lenskern(double chi, double chis) { if(chi > chis) return 0.0; else return 1.5*cosmoData.OmegaM*(100.0/CSOL)*(100.0/CSOL)/acomvdist(chi)*(chis-chi)/chis; } static double lenspk_integrand(double chi, void *p) { lensPowerSpectra lps = (lensPowerSpectra) p; double sn = lps->sn; if(chi == 0.0 || chi < chiLim) return 0.0; else return lenskern(chi,lps->chis1)*lenskern(chi,lps->chis2)*(nonlinear_powspec_for_lens(lps->ell/chi,acomvdist(chi)) + sn); } static void comp_lens_power_spectrum(lensPowerSpectra lps) { #define WORKSPACE_NUM 100000 #define ABSERR 1e-12 #define RELERR 1e-12 #define TABLE_LENGTH 1000 gsl_integration_workspace *workspace; gsl_function F; double result,abserr; double logltab[TABLE_LENGTH]; double logpkltab[TABLE_LENGTH]; double chimax; int i; //fill in bin information chiLim = lps->chiLim; if(lps->chis1 > lps->chis2) chimax = lps->chis1; else chimax = lps->chis2; fprintf(stderr,"doing lens pk - chiLim = %lf, chiMax = %lf\n",chiLim,chimax); //init workspace = gsl_integration_workspace_alloc((size_t) WORKSPACE_NUM); F.function = &lenspk_integrand; F.params = lps; //make table double lnlmin = log(wlData.lmin); double lnlmax = log(wlData.lmax); for(i=0;i<TABLE_LENGTH;++i) { logltab[i] = i*(lnlmax-lnlmin)/(TABLE_LENGTH-1) + lnlmin; lps->ell = exp(logltab[i]); gsl_integration_qag(&F,0.0,chimax,ABSERR,RELERR,(size_t) WORKSPACE_NUM,GSL_INTEG_GAUSS51,workspace,&result,&abserr); logpkltab[i] = log(result); } //free gsl_integration_workspace_free(workspace); //init splines and accels if(lps->spline != NULL) gsl_spline_free(lps->spline); lps->spline = gsl_spline_alloc(gsl_interp_akima,(size_t) (TABLE_LENGTH)); gsl_spline_init(lps->spline,logltab,logpkltab,(size_t) (TABLE_LENGTH)); if(lps->accel != NULL) gsl_interp_accel_reset(lps->accel); else lps->accel = gsl_interp_accel_alloc(); #undef TABLE_LENGTH #undef ABSERR #undef RELERR #undef WORKSPACE_NUM } lensPowerSpectra init_lens_power_spectrum(double zs1, double zs2) { lensPowerSpectra lps; lps = (lensPowerSpectra)malloc(sizeof(_lensPowerSpectra)); assert(lps != NULL); lps->initFlag = 1; lps->zs1 = zs1; lps->zs2 = zs2; lps->chis1 = comvdist(1.0/(1.0 + zs1)); lps->chis2 = comvdist(1.0/(1.0 + zs2)); lps->spline = NULL; lps->accel = NULL; return lps; } void free_lens_power_spectrum(lensPowerSpectra lps) { if(lps->spline != NULL) gsl_spline_free(lps->spline); if(lps->accel != NULL) gsl_interp_accel_free(lps->accel); free(lps); } //////////////////////////////////////// // corr. funcs! ////////////////////////////////////// static double lenscfp_integrand(double ell, void *p) { lensCorrFunc lcf = (lensCorrFunc) p; return ell/2.0/M_PI*lens_power_spectrum(ell,lcf->lps)*gsl_sf_bessel_J0(ell*lcf->theta/60.0/180.0*M_PI); } static double lenscfm_integrand(double ell, void *p) { lensCorrFunc lcf = (lensCorrFunc) p; return ell/2.0/M_PI*lens_power_spectrum(ell,lcf->lps)*gsl_sf_bessel_Jn(4,ell*lcf->theta/60.0/180.0*M_PI); } static void comp_lens_corr_funcs(lensCorrFunc lcf) { #define WORKSPACE_NUM 100000 #define ABSERR 1e-12 #define RELERR 1e-12 #define TABLE_LENGTH 1000 gsl_integration_workspace *workspace; gsl_function F; double result,abserr; double logttab[TABLE_LENGTH]; double logcfptab[TABLE_LENGTH]; double logcfmtab[TABLE_LENGTH]; int i; double lntmin; double lntmax; //init workspace = gsl_integration_workspace_alloc((size_t) WORKSPACE_NUM); F.params = lcf; lntmin = log(wlData.tmin); lntmax = log(wlData.tmax); //make tables F.function = &lenscfp_integrand; for(i=0;i<TABLE_LENGTH;++i) { logttab[i] = i*(lntmax-lntmin)/(TABLE_LENGTH-1) + lntmin; lcf->theta = exp(logttab[i]); gsl_integration_qag(&F,wlData.lmin,wlData.lmax,ABSERR,RELERR,(size_t) WORKSPACE_NUM,GSL_INTEG_GAUSS51,workspace,&result,&abserr); logcfptab[i] = log(result); } F.function = &lenscfm_integrand; for(i=0;i<TABLE_LENGTH;++i) { logttab[i] = i*(lntmax-lntmin)/(TABLE_LENGTH-1) + lntmin; lcf->theta = exp(logttab[i]); gsl_integration_qag(&F,wlData.lmin,wlData.lmax,ABSERR,RELERR,(size_t) WORKSPACE_NUM,GSL_INTEG_GAUSS51,workspace,&result,&abserr); logcfmtab[i] = log(result); } //free gsl_integration_workspace_free(workspace); //init splines and accels if(lcf->splineP != NULL) gsl_spline_free(lcf->splineP); lcf->splineP = gsl_spline_alloc(gsl_interp_akima,(size_t) (TABLE_LENGTH)); gsl_spline_init(lcf->splineP,logttab,logcfptab,(size_t) (TABLE_LENGTH)); if(lcf->accelP != NULL) gsl_interp_accel_reset(lcf->accelP); else lcf->accelP = gsl_interp_accel_alloc(); if(lcf->splineM != NULL) gsl_spline_free(lcf->splineM); lcf->splineM = gsl_spline_alloc(gsl_interp_akima,(size_t) (TABLE_LENGTH)); gsl_spline_init(lcf->splineM,logttab,logcfmtab,(size_t) (TABLE_LENGTH)); if(lcf->accelM != NULL) gsl_interp_accel_reset(lcf->accelM); else lcf->accelM = gsl_interp_accel_alloc(); #undef TABLE_LENGTH #undef ABSERR #undef RELERR #undef WORKSPACE_NUM } double lens_corr_func_minus(double theta, lensCorrFunc lcf) { if(lcf->initFlag == 1 || lcf->currCosmoNum != cosmoData.cosmoNum || lcf->currWLNum != wlData.wlNum) { lcf->initFlag = 0; lcf->currCosmoNum = cosmoData.cosmoNum; lcf->currWLNum = wlData.wlNum; comp_lens_corr_funcs(lcf); } return exp(gsl_spline_eval(lcf->splineM,log(theta),lcf->accelM)); } double lens_corr_func_plus(double theta, lensCorrFunc lcf) { if(lcf->initFlag == 1 || lcf->currCosmoNum != cosmoData.cosmoNum || lcf->currWLNum != wlData.wlNum) { lcf->initFlag = 0; lcf->currCosmoNum = cosmoData.cosmoNum; lcf->currWLNum = wlData.wlNum; comp_lens_corr_funcs(lcf); } return exp(gsl_spline_eval(lcf->splineP,log(theta),lcf->accelP)); } lensCorrFunc init_lens_corr_func(lensPowerSpectra lps) { lensCorrFunc lcf; lcf = (lensCorrFunc)malloc(sizeof(_lensCorrFunc)); assert(lcf != NULL); lcf->initFlag = 1; lcf->lps = lps; lcf->splineM = NULL; lcf->accelM = NULL; lcf->splineP = NULL; lcf->accelP = NULL; return lcf; } void free_lens_corr_func(lensCorrFunc lcf) { if(lcf->splineM != NULL) gsl_spline_free(lcf->splineM); if(lcf->accelM != NULL) gsl_interp_accel_free(lcf->accelM); if(lcf->splineP != NULL) gsl_spline_free(lcf->splineP); if(lcf->accelP != NULL) gsl_interp_accel_free(lcf->accelP); free(lcf); }
{ "alphanum_fraction": 0.684154883, "avg_line_length": 25.4781144781, "ext": "c", "hexsha": "44ff672b1b1a51d6b925b640610e2b0178488f6f", "lang": "C", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2017-08-11T17:31:51.000Z", "max_forks_repo_forks_event_min_datetime": "2017-07-14T12:17:31.000Z", "max_forks_repo_head_hexsha": "aa7d7cb58f05a36d446e02b45a9117d93eb16556", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "beckermr/cosmocalc", "max_forks_repo_path": "src/weaklens.c", "max_issues_count": 1, "max_issues_repo_head_hexsha": "aa7d7cb58f05a36d446e02b45a9117d93eb16556", "max_issues_repo_issues_event_max_datetime": "2016-04-05T19:36:21.000Z", "max_issues_repo_issues_event_min_datetime": "2016-04-05T19:10:45.000Z", "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "beckermr/cosmocalc", "max_issues_repo_path": "src/weaklens.c", "max_line_length": 135, "max_stars_count": null, "max_stars_repo_head_hexsha": "aa7d7cb58f05a36d446e02b45a9117d93eb16556", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "beckermr/cosmocalc", "max_stars_repo_path": "src/weaklens.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2550, "size": 7567 }
/** * * @file qwrapper_spotrf.c * * PLASMA core_blas quark wrapper * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @version 2.6.0 * @author Hatem Ltaief * @author Mathieu Faverge * @author Jakub Kurzak * @date 2010-11-15 * @generated s Tue Jan 7 11:44:56 2014 * **/ #include <lapacke.h> #include "common.h" /***************************************************************************//** * **/ void QUARK_CORE_slauum(Quark *quark, Quark_Task_Flags *task_flags, PLASMA_enum uplo, int n, int nb, float *A, int lda) { DAG_CORE_LAUUM; QUARK_Insert_Task(quark, CORE_slauum_quark, task_flags, sizeof(PLASMA_enum), &uplo, VALUE, sizeof(int), &n, VALUE, sizeof(float)*nb*nb, A, INOUT, sizeof(int), &lda, VALUE, 0); } /***************************************************************************//** * **/ #if defined(PLASMA_HAVE_WEAK) #pragma weak CORE_slauum_quark = PCORE_slauum_quark #define CORE_slauum_quark PCORE_slauum_quark #endif void CORE_slauum_quark(Quark *quark) { PLASMA_enum uplo; int N; float *A; int LDA; quark_unpack_args_4(quark, uplo, N, A, LDA); LAPACKE_slauum_work(LAPACK_COL_MAJOR, lapack_const(uplo), N, A, LDA); }
{ "alphanum_fraction": 0.5417827298, "avg_line_length": 27.0943396226, "ext": "c", "hexsha": "970139f3816b8c32908f773f0b05cbabba0b66cb", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "bcc99c164a256bc7df7c936b9c43afd38c12aea2", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "zhuangsc/Plasma-ompss1", "max_forks_repo_path": "core_blas-qwrapper/qwrapper_slauum.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "bcc99c164a256bc7df7c936b9c43afd38c12aea2", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "zhuangsc/Plasma-ompss1", "max_issues_repo_path": "core_blas-qwrapper/qwrapper_slauum.c", "max_line_length": 80, "max_stars_count": null, "max_stars_repo_head_hexsha": "bcc99c164a256bc7df7c936b9c43afd38c12aea2", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "zhuangsc/Plasma-ompss1", "max_stars_repo_path": "core_blas-qwrapper/qwrapper_slauum.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 404, "size": 1436 }
#ifndef AMICI_MISC_H #define AMICI_MISC_H #include "amici/defines.h" #include <sunmatrix/sunmatrix_sparse.h> // SUNMatrixContent_Sparse #include <algorithm> #include <vector> #include <memory> #include <regex> #include <gsl/gsl-lite.hpp> namespace amici { /** * @brief creates a slice from existing data * * @param data to be sliced * @param index slice index * @param size slice size * @return span of the slice */ gsl::span<realtype> slice(std::vector<realtype> &data, int index, unsigned size); /** * @brief Remove parameter scaling according to the parameter scaling in pscale * * All vectors must be of same length. * * @param bufferScaled scaled parameters * @param pscale parameter scaling * @param bufferUnscaled unscaled parameters are written to the array */ void unscaleParameters(gsl::span<const realtype> bufferScaled, gsl::span<const ParameterScaling> pscale, gsl::span<realtype> bufferUnscaled); /** * @brief Remove parameter scaling according to `scaling` * * @param scaledParameter scaled parameter * @param scaling parameter scaling * * @return Unscaled parameter */ double getUnscaledParameter(double scaledParameter, ParameterScaling scaling); /** * @brief Apply parameter scaling according to `scaling` * @param unscaledParameter * @param scaling parameter scaling * @return Scaled parameter */ double getScaledParameter(double unscaledParameter, ParameterScaling scaling); /** * @brief Apply parameter scaling according to `scaling` * @param bufferUnscaled * @param pscale parameter scaling * @param bufferScaled destination */ void scaleParameters(gsl::span<const realtype> bufferUnscaled, gsl::span<const ParameterScaling> pscale, gsl::span<realtype> bufferScaled); /** * @brief Returns the current backtrace as std::string * @param maxFrames Number of frames to include * @return Backtrace */ std::string backtraceString(int maxFrames); /** * @brief Convert std::regex_constants::error_type to string * @param err_type error type * @return Error type as string */ std::string regexErrorToString(std::regex_constants::error_type err_type); /** * @brief Format printf-style arguments to std::string * @param fmt Format string * @param ap Argument list pointer * @return Formatted String */ std::string printfToString(const char *fmt, va_list ap); } // namespace amici #ifndef __cpp_lib_make_unique // custom make_unique while we are still using c++11 namespace std { template<typename T, typename... Args> std::unique_ptr<T> make_unique(Args&&... args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } } #endif #endif // AMICI_MISC_H
{ "alphanum_fraction": 0.7108390349, "avg_line_length": 25.018018018, "ext": "h", "hexsha": "6284a1d8f9d7a25740ce09447fb93600c590be2e", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a0407673453d6e18a9abec5b6f73758dd09f7aaf", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "paszkow/AMICI", "max_forks_repo_path": "include/amici/misc.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "a0407673453d6e18a9abec5b6f73758dd09f7aaf", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "paszkow/AMICI", "max_issues_repo_path": "include/amici/misc.h", "max_line_length": 80, "max_stars_count": null, "max_stars_repo_head_hexsha": "a0407673453d6e18a9abec5b6f73758dd09f7aaf", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "paszkow/AMICI", "max_stars_repo_path": "include/amici/misc.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 637, "size": 2777 }
#if !defined(FINDFILEEXT_H_INCLUDED) #define FINDFILEEXT_H_INCLUDED #include "FileEnumerator.h" #include "Utils.h" #include <filesystem> #include <gsl/gsl> #include <iosfwd> #include <map> #include <string> #include <vector> class FindFileExt { public: static int usage(::std::ostream& strm, const ::std::string& progName, const char* pMsg); FindFileExt(::gsl::span<const char*const> args); int run(); FindFileExt(const FindFileExt&) = delete; FindFileExt& operator=(const FindFileExt&) = delete; FindFileExt(FindFileExt&&) = delete; FindFileExt& operator=(FindFileExt&&) = delete; PRIVATE_EXCEPT_IN_TEST: using Path = ::std::filesystem::path; using StrToCountMap = ::std::map< ::std::string, size_t >; using PathList = ::std::vector<Path>; void countFiles(); void reportExtension(const StrToCountMap::value_type& extToCountMapping); bool m_includeCounts; bool m_outputAsWildcards; FileEnumerator m_fileEnumerator; StrToCountMap m_extToCountMap; PathList m_noExtList; }; #endif // FINDFILEEXT_H_INCLUDED
{ "alphanum_fraction": 0.7451923077, "avg_line_length": 23.6363636364, "ext": "h", "hexsha": "4b10e98be75024e4c753159b7d1cd9a408e49d7b", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "bd20d59d4d9fcc0d7d82a3031106d56ad10aad16", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "IanEmmons/CmdLineUtil", "max_forks_repo_path": "FindFileExt.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "bd20d59d4d9fcc0d7d82a3031106d56ad10aad16", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "IanEmmons/CmdLineUtil", "max_issues_repo_path": "FindFileExt.h", "max_line_length": 74, "max_stars_count": null, "max_stars_repo_head_hexsha": "bd20d59d4d9fcc0d7d82a3031106d56ad10aad16", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "IanEmmons/CmdLineUtil", "max_stars_repo_path": "FindFileExt.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 280, "size": 1040 }
/* min/convergence.c * * Copyright (C) 1996, 1997, 1998, 1999, 2000 Brian Gough * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <config.h> #include <gsl/gsl_math.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_min.h> int gsl_min_test_interval (double x_lower, double x_upper, double epsabs, double epsrel) { const double lower = x_lower; const double upper = x_upper; const double abs_lower = fabs(lower) ; const double abs_upper = fabs(upper) ; double min_abs, tolerance; if (epsrel < 0.0) GSL_ERROR ("relative tolerance is negative", GSL_EBADTOL); if (epsabs < 0.0) GSL_ERROR ("absolute tolerance is negative", GSL_EBADTOL); if (lower > upper) GSL_ERROR ("lower bound larger than upper_bound", GSL_EINVAL); if ((lower > 0 && upper > 0) || (lower < 0 && upper < 0)) { min_abs = GSL_MIN_DBL(abs_lower, abs_upper) ; } else { min_abs = 0; } tolerance = epsabs + epsrel * min_abs ; if (fabs(upper - lower) < tolerance) return GSL_SUCCESS; return GSL_CONTINUE ; }
{ "alphanum_fraction": 0.6928817451, "avg_line_length": 28.0967741935, "ext": "c", "hexsha": "8bb630d1b6a2618ed43e001f6fa5ad92d83d37d3", "lang": "C", "max_forks_count": 40, "max_forks_repo_forks_event_max_datetime": "2022-03-03T23:23:37.000Z", "max_forks_repo_forks_event_min_datetime": "2015-02-26T15:31:16.000Z", "max_forks_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "manggoguy/parsec-modified", "max_forks_repo_path": "pkgs/libs/gsl/src/min/convergence.c", "max_issues_count": 12, "max_issues_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_issues_repo_issues_event_max_datetime": "2022-03-13T03:54:24.000Z", "max_issues_repo_issues_event_min_datetime": "2020-12-15T08:30:19.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "manggoguy/parsec-modified", "max_issues_repo_path": "pkgs/libs/gsl/src/min/convergence.c", "max_line_length": 84, "max_stars_count": 64, "max_stars_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "manggoguy/parsec-modified", "max_stars_repo_path": "pkgs/libs/gsl/src/min/convergence.c", "max_stars_repo_stars_event_max_datetime": "2022-03-24T13:26:53.000Z", "max_stars_repo_stars_event_min_datetime": "2015-03-06T00:30:56.000Z", "num_tokens": 476, "size": 1742 }
// Copyright (C) 2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 #ifndef _HEBench_ClearText_EltMult_H_7e5fa8c2415240ea93eff148ed73539b #define _HEBench_ClearText_EltMult_H_7e5fa8c2415240ea93eff148ed73539b #include <gsl/gsl> #include "hebench/api_bridge/cpp/hebench.hpp" #include "clear_benchmark.h" template <class T> class EltMult_Benchmark : public ClearTextBenchmark { private: HEBERROR_DECLARE_CLASS_NAME(EltMult_Benchmark) public: EltMult_Benchmark(hebench::cpp::BaseEngine &engine, const hebench::APIBridge::BenchmarkDescriptor &bench_desc, const hebench::APIBridge::WorkloadParams &bench_params); ~EltMult_Benchmark() override; hebench::APIBridge::Handle encode(const hebench::APIBridge::DataPackCollection *p_parameters) override; void decode(hebench::APIBridge::Handle encoded_data, hebench::APIBridge::DataPackCollection *p_native) override; hebench::APIBridge::Handle load(const hebench::APIBridge::Handle *p_local_data, std::uint64_t count) override; void store(hebench::APIBridge::Handle remote_data, hebench::APIBridge::Handle *p_local_data, std::uint64_t count) override; hebench::APIBridge::Handle operate(hebench::APIBridge::Handle h_remote_packed, const hebench::APIBridge::ParameterIndexer *p_param_indexers) override; protected: std::uint64_t m_vector_size; private: static void eltMult(gsl::span<T> &result, const gsl::span<const T> &V0, const gsl::span<const T> &V1, std::size_t element_count); }; #include "inl/bench_eltmult.inl" #endif // defined _HEBench_ClearText_EltMult_H_7e5fa8c2415240ea93eff148ed73539b
{ "alphanum_fraction": 0.7309673726, "avg_line_length": 36.3958333333, "ext": "h", "hexsha": "a290ca217f158c4c63f547e80bb806d7537ed47a", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-11-05T18:01:48.000Z", "max_forks_repo_forks_event_min_datetime": "2021-11-05T18:01:48.000Z", "max_forks_repo_head_hexsha": "83e4398d9271f3e077bb4dfc0a8fb04ce36e23f6", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "hebench/backend-cpu-cleartext", "max_forks_repo_path": "benchmarks/Vector/EltwiseMult/include/bench_eltmult.h", "max_issues_count": 2, "max_issues_repo_head_hexsha": "83e4398d9271f3e077bb4dfc0a8fb04ce36e23f6", "max_issues_repo_issues_event_max_datetime": "2021-12-16T23:37:53.000Z", "max_issues_repo_issues_event_min_datetime": "2021-12-06T19:37:42.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "hebench/backend-cpu-cleartext", "max_issues_repo_path": "benchmarks/Vector/EltwiseMult/include/bench_eltmult.h", "max_line_length": 116, "max_stars_count": 1, "max_stars_repo_head_hexsha": "83e4398d9271f3e077bb4dfc0a8fb04ce36e23f6", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "hebench/backend-cpu-cleartext", "max_stars_repo_path": "benchmarks/Vector/EltwiseMult/include/bench_eltmult.h", "max_stars_repo_stars_event_max_datetime": "2022-02-28T17:57:32.000Z", "max_stars_repo_stars_event_min_datetime": "2022-02-28T17:57:32.000Z", "num_tokens": 464, "size": 1747 }
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_spline.h> int i; int max_lines=30; double xi, yi, x[30], y[30]; const char* getfield(char* line, int num) { const char* tok; for (tok = strtok(line, ","); tok && *tok; tok = strtok(NULL, ";\n")) { if (!--num) return tok; } return NULL; } void read_csv(char* filename, int max_lines){ FILE * f = fopen(filename, "r"); char line[1024]; int l = 0; fgets(line, 1024, f); while(fgets(line, 1024, f)){ char* tmp = strdup(line); double y_tmp = atof(getfield(tmp, 2)); double x_tmp = atof(getfield(tmp, 1)); if(l < max_lines){ x[l] = x_tmp; y[l] = y_tmp; } l++; free(tmp); } } int main (void) { read_csv("secret_fun.csv", max_lines); // printf ("#m=0,S=17\n"); // // for (i = 0; i < max_lines; i++) // { // // x[i] = i + 0.5 * sin (i); // // y[i] = i + cos (i * i); // printf ("%f %f \n", x[i], y[i]); // } // printf ("#m=1,S=0\n"); // { gsl_interp_accel *acc = gsl_interp_accel_alloc (); gsl_spline *spline = gsl_spline_alloc (gsl_interp_cspline, 30); gsl_spline_init (spline, x, y, 30); printf("x,y\n" ); for (xi = x[0]; xi < x[29]; xi += 0.01) { printf("%g, ", xi); yi = gsl_spline_eval (spline, xi, acc); printf ("%g\n", yi); } gsl_spline_free (spline); gsl_interp_accel_free (acc); } return 0; }
{ "alphanum_fraction": 0.5086483024, "avg_line_length": 19.2716049383, "ext": "c", "hexsha": "e18bc7ffab8b8790362d71dfaf013a342a0072d0", "lang": "C", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2019-05-29T10:19:18.000Z", "max_forks_repo_forks_event_min_datetime": "2019-04-10T09:35:51.000Z", "max_forks_repo_head_hexsha": "d2a492d210d3acfa1246de82c4a9b818956b4271", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "mprzewie/MOwNiT_2", "max_forks_repo_path": "lab5/interp.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "d2a492d210d3acfa1246de82c4a9b818956b4271", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "mprzewie/MOwNiT_2", "max_issues_repo_path": "lab5/interp.c", "max_line_length": 50, "max_stars_count": null, "max_stars_repo_head_hexsha": "d2a492d210d3acfa1246de82c4a9b818956b4271", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "mprzewie/MOwNiT_2", "max_stars_repo_path": "lab5/interp.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 521, "size": 1561 }
#include "../EngineTest.h" #include <Catch2> #include <gsl/gsl_math.h> TEST_CASE("Skew Function Evaluation Tests", "[skew]") { // SECTION("Empty Test"){ // requireIsEqual("skew()", "Insufficient Number of Arguments for Function: skew"); // } SECTION("`skew` Test 1"){ requireIsEqual("skew(-7.48, 1.3, -6.68, 7.61, -3.75)", 0.4478899957); } SECTION("`skew` Test 2"){ requireIsEqual("skew(-9.79, 6.39, -2.51, 5.03, -7.83, -5.82, 5.14)", 0.0115605537); } SECTION("`skew` Test 3"){ requireIsEqual("skew(-8.75, -8.67, 6.72, -4.01, -0.14, -2.32, 7.62, -7.36, 9.19)", 0.2380296624); } SECTION("`skew` Test 4"){ requireIsEqual("skew(8.28, -5.77, -5.76)", 0.3848995209); } SECTION("`skew` Test 5"){ requireIsEqual("skew(-7.03, 4.48, -2.21, 6.56, -3.27, 6.23, -3.57, 9.99, 0.48)", 0.0727451398); } SECTION("`skew` Test 6"){ requireIsEqual("skew(-5.52, -5.18, -0.97, -0.07)", 0.0281579987); } SECTION("`skew` Test 7"){ requireIsEqual("skew(5.24, -9.46, 1.35, -5.8, 4.4, 3.6)", -0.5210503622); } SECTION("`skew` Test 8"){ requireIsEqual("skew(4.94, -2.29, 7.81, 8.51, -5.59, -6.55, 5.24)", -0.2249073199); } SECTION("`skew` Test 9"){ requireIsEqual("skew(-3.73, -9.06, -4.36, 6.63, -4.59)", 0.6907748203); } SECTION("`skew` Test 10"){ requireIsEqual("skew(-5.79, -1.84, -6.12, -5.07, -2.07, -3.48, 9.85, -6.41)", 1.4695482561); } }
{ "alphanum_fraction": 0.5317460317, "avg_line_length": 27.4909090909, "ext": "h", "hexsha": "91410d124d2f74df2573a9d47464f55d8a0f8a1e", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "33cede17001e0a7038f99ea40dd6f9e433cf6454", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "antoniojkim/CalcPlusPlus", "max_forks_repo_path": "Tests/Tests/StatisticsTests/skewTests.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "33cede17001e0a7038f99ea40dd6f9e433cf6454", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "antoniojkim/CalcPlusPlus", "max_issues_repo_path": "Tests/Tests/StatisticsTests/skewTests.h", "max_line_length": 105, "max_stars_count": null, "max_stars_repo_head_hexsha": "33cede17001e0a7038f99ea40dd6f9e433cf6454", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "antoniojkim/CalcPlusPlus", "max_stars_repo_path": "Tests/Tests/StatisticsTests/skewTests.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 652, "size": 1512 }
#pragma once #include <gsl/span> namespace CesiumUtility { /** * @brief This function converts between span types. This function * has the same rules with C++ reintepret_cast * https://en.cppreference.com/w/cpp/language/reinterpret_cast. So please use it * carefully */ template <typename To, typename From> gsl::span<To> reintepretCastSpan(const gsl::span<From>& from) noexcept { return gsl::span<To>( reinterpret_cast<To*>(from.data()), from.size() * sizeof(From) / sizeof(To)); } } // namespace CesiumUtility
{ "alphanum_fraction": 0.712945591, "avg_line_length": 28.0526315789, "ext": "h", "hexsha": "84fddffb03ba72e236a0aef4ffec7fb324f54c43", "lang": "C", "max_forks_count": 66, "max_forks_repo_forks_event_max_datetime": "2022-03-31T13:38:41.000Z", "max_forks_repo_forks_event_min_datetime": "2021-03-30T15:14:32.000Z", "max_forks_repo_head_hexsha": "9493b9baebea601bd00d8139f2000e41ba4505ef", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "yieryi/cesium-native", "max_forks_repo_path": "CesiumUtility/include/CesiumUtility/SpanHelper.h", "max_issues_count": 256, "max_issues_repo_head_hexsha": "9493b9baebea601bd00d8139f2000e41ba4505ef", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:44:21.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-30T18:12:28.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "yieryi/cesium-native", "max_issues_repo_path": "CesiumUtility/include/CesiumUtility/SpanHelper.h", "max_line_length": 80, "max_stars_count": 154, "max_stars_repo_head_hexsha": "9493b9baebea601bd00d8139f2000e41ba4505ef", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "yieryi/cesium-native", "max_stars_repo_path": "CesiumUtility/include/CesiumUtility/SpanHelper.h", "max_stars_repo_stars_event_max_datetime": "2022-03-30T00:01:43.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-30T14:08:39.000Z", "num_tokens": 134, "size": 533 }
#pragma once #include "iparamlist.h" #include "optioninfo.h" #include "configaccess.h" #include "format.h" #include "streamreader.h" #include <sfun/string_utils.h> #include <cmdlime/errors.h> #include <cmdlime/customnames.h> #include <gsl/gsl> #include <vector> #include <sstream> #include <functional> #include <memory> namespace cmdlime::detail{ namespace str = sfun::string_utils; template <typename T> class ParamList : public IParamList{ public: ParamList(std::string name, std::string shortName, std::string type, std::function<std::vector<T>&()> paramListGetter) : info_(std::move(name), std::move(shortName), std::move(type)) , paramListGetter_(std::move(paramListGetter)) { } void setDefaultValue(const std::vector<T>& value) { hasValue_ = true; defaultValue_ = value; } OptionInfo& info() override { return info_; } const OptionInfo& info() const override { return info_; } private: bool read(const std::string& data) override { if (!isDefaultValueOverwritten_){ paramListGetter_().clear(); isDefaultValueOverwritten_ = true; } const auto dataParts = str::split(data, ","); for (const auto& part : dataParts){ auto stream = std::stringstream{part}; paramListGetter_().emplace_back(); if (!readFromStream(stream, paramListGetter_().back())) return false; } hasValue_ = true; return true; } bool hasValue() const override { return hasValue_; } bool isOptional() const override { return defaultValue_.has_value(); } std::string defaultValue() const override { if (!defaultValue_.has_value()) return {}; auto stream = std::stringstream{}; stream << "{"; auto firstVal = true; for (auto& val : defaultValue_.value()){ if (firstVal) stream << val; else stream << ", " << val; firstVal = false; } stream << "}"; return stream.str(); } private: OptionInfo info_; std::function<std::vector<T>&()> paramListGetter_; bool hasValue_ = false; std::optional<std::vector<T>> defaultValue_; bool isDefaultValueOverwritten_ = false; }; template <> inline bool ParamList<std::string>::read(const std::string& data) { const auto dataParts = str::split(data, ","); for (const auto& part : dataParts){ paramListGetter_().push_back(part); } hasValue_ = true; return true; } template<typename T, typename TConfig> class ParamListCreator{ using NameProvider = typename Format<ConfigAccess<TConfig>::format()>::nameProvider; public: ParamListCreator(TConfig& cfg, const std::string& varName, const std::string& type, std::function<std::vector<T>&()> paramListGetter) : cfg_(cfg) { Expects(!varName.empty()); Expects(!type.empty()); paramList_ = std::make_unique<ParamList<T>>(NameProvider::name(varName), NameProvider::shortName(varName), NameProvider::valueName(type), std::move(paramListGetter)); } ParamListCreator<T, TConfig>& operator<<(const std::string& info) { paramList_->info().addDescription(info); return *this; } ParamListCreator<T, TConfig>& operator<<(const Name& customName) { paramList_->info().resetName(customName.value()); return *this; } ParamListCreator<T, TConfig>& operator<<(const ShortName& customName) { static_assert(Format<ConfigAccess<TConfig>::format()>::shortNamesEnabled, "Current command line format doesn't support short names"); paramList_->info().resetShortName(customName.value()); return *this; } ParamListCreator<T, TConfig>& operator<<(const WithoutShortName&) { static_assert(Format<ConfigAccess<TConfig>::format()>::shortNamesEnabled, "Current command line format doesn't support short names"); paramList_->info().resetShortName({}); return *this; } ParamListCreator<T, TConfig>& operator<<(const ValueName& valueName) { paramList_->info().resetValueName(valueName.value()); return *this; } ParamListCreator<T, TConfig>& operator()(std::vector<T> defaultValue = {}) { defaultValue_ = std::move(defaultValue); paramList_->setDefaultValue(defaultValue_); return *this; } operator std::vector<T>() { ConfigAccess<TConfig>{cfg_}.addParamList(std::move(paramList_)); return defaultValue_; } private: std::unique_ptr<ParamList<T>> paramList_; std::vector<T> defaultValue_; TConfig& cfg_; }; template <typename T, typename TConfig> ParamListCreator<T, TConfig> makeParamListCreator(TConfig& cfg, const std::string& varName, const std::string& type, std::function<std::vector<T>&()> paramListGetter) { return ParamListCreator<T, TConfig>{cfg, varName, type, std::move(paramListGetter)}; } }
{ "alphanum_fraction": 0.5785406183, "avg_line_length": 28.5333333333, "ext": "h", "hexsha": "5dfe515bd3006a285e5a9faac7f66c7cc2155be3", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-05-22T00:36:08.000Z", "max_forks_repo_forks_event_min_datetime": "2021-05-22T00:36:08.000Z", "max_forks_repo_head_hexsha": "0058bffd31fd2a46374fd44c6730c2356bbaab43", "max_forks_repo_licenses": [ "MS-PL" ], "max_forks_repo_name": "GerHobbelt/hypertextcpp", "max_forks_repo_path": "thirdparty/cmdlime/include/cmdlime/detail/paramlist.h", "max_issues_count": 6, "max_issues_repo_head_hexsha": "0058bffd31fd2a46374fd44c6730c2356bbaab43", "max_issues_repo_issues_event_max_datetime": "2021-12-21T08:13:28.000Z", "max_issues_repo_issues_event_min_datetime": "2021-05-20T22:04:52.000Z", "max_issues_repo_licenses": [ "MS-PL" ], "max_issues_repo_name": "GerHobbelt/hypertextcpp", "max_issues_repo_path": "thirdparty/cmdlime/include/cmdlime/detail/paramlist.h", "max_line_length": 99, "max_stars_count": 77, "max_stars_repo_head_hexsha": "0058bffd31fd2a46374fd44c6730c2356bbaab43", "max_stars_repo_licenses": [ "MS-PL" ], "max_stars_repo_name": "GerHobbelt/hypertextcpp", "max_stars_repo_path": "thirdparty/cmdlime/include/cmdlime/detail/paramlist.h", "max_stars_repo_stars_event_max_datetime": "2022-02-13T21:37:54.000Z", "max_stars_repo_stars_event_min_datetime": "2021-05-20T18:05:54.000Z", "num_tokens": 1202, "size": 5564 }
#ifndef BASIC_RJMCMC_H #define BASIC_RJMCMC_H #include <gsl/gsl_randist.h> #include <gsl/gsl_rng.h> #include "basic_particle.h" #include "basic_changepoint.h" #include "multiple_processes_regime.h" class rj{ public: rj(const unsigned long int & start_time = 0, const unsigned long int & end_time = 1, const double & p = 1, const double & var_p = 0, const unsigned long int & burnin = 0, const unsigned long int & iterations = 1, const unsigned long int & thinning = 1, const unsigned long int & number_of_association_matrix_bins = 0, const vector< unsigned long int > & starting_changepoints = vector< unsigned long int >(0), const vector< unsigned long int > & separators = vector< unsigned long int >(0), const vector< unsigned long int > & trace_lengths = vector< unsigned long int >(0), const unsigned int & seed = 1, mult_process * pm_ptr = NULL, const unsigned long int & intercept = 0, const unsigned long int & diff = 1); // Constructor for normal RJMCMC void set_binary_burnin_iterations_thinning( const unsigned long int & binary_burnin, const unsigned long int & binary_iterations, const unsigned long int & binary_thinning ) {m_binary_burnin = binary_burnin; m_binary_iterations = binary_iterations; m_binary_thinning = binary_thinning; } void set_full_burnin_iterations_thinning( const unsigned long int & full_burnin, const unsigned long int & full_iterations, const unsigned long int & full_thinning ) {m_full_burnin = full_burnin; m_full_iterations = full_iterations; m_full_thinning = full_thinning; } void record_basic_samples(const bool & record = false, const unsigned int & number_of_bins = 100) { m_recording_basic_samples = record; m_number_of_changepoint_bins = number_of_bins; m_basic_MAP_log_posterior = -1e300; } void record_binary_samples(const bool & record = false, const unsigned int & number_of_bins = 100) { m_recording_binary_samples = record; m_number_of_changepoint_bins = number_of_bins; m_binary_MAP_log_posterior = -1e300; } void record_full_samples(const bool & record = false, const string & data_file = "", const unsigned int & number_of_bins = 100) { m_recording_full_samples = record; m_data_file = data_file; m_number_of_changepoint_bins = number_of_bins; m_full_MAP_log_posterior = m_particle.get_full_log_posterior(); m_full_MAP_particle = m_particle; m_full_MAP_dimension = m_particle.get_dimension(); m_recorded_full_birth_proposals = 0; m_recorded_full_birth_acceptances = 0; m_recorded_full_death_proposals = 0; m_recorded_full_death_acceptances = 0; m_recorded_full_move_proposals = 0; m_recorded_full_move_acceptances = 0; m_recorded_full_resample_proposals = 0; m_recorded_full_resample_acceptances = 0; m_recorded_full_unobserveds_proposals = 0; m_recorded_full_unobserveds_acceptances = 0;} double calculate_total_basic_log_likelihood(); double calculate_full_log_acceptance_probability(const double & u1); double calculate_process_full_log_I_prior(const double & process, const unsigned int & adding_index = -1, const unsigned int & proposed_regime = -1); double calculate_process_full_log_I_prior_without_cp(const double & process, const unsigned int & removing_cp, const unsigned int & proposed_regime = -1); double calculate_full_log_I_prior(const vector< unsigned int > & regime_vector, const size_t & number_of_regimes); vector< vector< double > > generate_regime_transition_matrix(const vector< unsigned int > & regime_vector, const size_t & number_of_regimes); double calculate_log_of_sum_Bq_ratio(const vector< double > & new_I_priors, const double & old_I_prior); void adding_basic_changepoint_setup(const unsigned int & trace_index); void removing_basic_changepoint_setup(const unsigned int & trace_index); void moving_basic_changepoint_setup(const unsigned int & trace_index); void basic_acceptance_procedure( const double & u1); void basic_recording_procedure(); void run_basic_simulation(); void write_basic_MAP_changepoints_to_file(const string & basic_MAP_cps_Filename); void write_binary_MAP_changepoints_to_file(const string & binary_MAP_cps_Filename); void write_full_MAP_changepoints_to_file(const string & full_MAP_cps_Filename); void write_basic_dimension_distribution_to_file(const string & basic_dimension_distribution_Filename); void write_binary_dimension_distribution_to_file(const string & binary_dimension_distribution_Filename); void write_full_dimension_distribution_to_file(const string & full_dimension_distribution_Filename); void write_full_effective_dimension_distribution_to_file(const string & full_effective_dimension_distribution_Filename); void write_basic_changepoints_distribution_to_file(const string & basic_changepoints_distribution_Filename); void write_binary_changepoints_distribution_to_file(const string & binary_changepoints_distribution_Filename); void write_full_changepoints_distribution_to_file(const string & full_changepoints_distribution_Filename, const unsigned long int number_of_iterations); void write_number_of_regimes_to_file(const string & number_of_regimes_Filename); void write_number_of_observed_regimes_to_file(const string & number_of_regimes_Filename); void write_basic_log_posterior_trace_to_file(const string & basic_log_posterior_trace_Filename); void write_binary_log_posterior_trace_to_file(const string & binary_log_posterior_trace_Filename); void write_full_log_posterior_trace_to_file(const string & full_log_posterior_trace_Filename); void write_basic_dimension_trace_to_file(const string & dimension_trace_Filename); void write_dimension_trace_to_file(const string & dimension_trace_Filename); void write_number_of_regimes_trace_to_file(const string & number_of_regimes_trace_Filename); void write_full_acceptance_probabilities_to_file(const string & acceptance_probabilities_Filename); void write_similarity_matrix_to_file(const string & similarity_matrix_Filename); void write_min_proportion_similarity_matrix_to_file(const string & min_proportion_similarity_matrix_Filename); void write_similarity_matrices_to_file(const string & similarity_matrices_Filename); void write_min_proportion_similarity_matrices_to_file(const string & min_proportion_similarity_matrices_Filename); void write_association_matrix_to_file(const string & association_matrix_Filename); void convert_basic_particle_to_binary_particle(const double & beta_alpha); void set_binary_marked_vectors(); double calculate_total_binary_log_likelihood(); void adding_binary_changepoint_setup(const unsigned int & trace_index); void removing_binary_changepoint_setup(const unsigned int & trace_index); void moving_binary_changepoint_setup(const unsigned int & trace_index); void resampling_binary_changepoint_setup(const unsigned int & trace_index); void binary_acceptance_procedure(const double & u1); void binary_recording_procedure(); void run_binary_simulation(); void convert_binary_particle_to_full_particle(const double & dirichlet_alpha, const double & rho); void set_full_marked_vectors();// const size_t & number_of_processes, const vector< vector< vector< double > > > & sufficient_statistics, const vector< vector< double > > & number_of_observations); void check_total_full_log_likelihood(particle & P); void check_adding_changepoint(const unsigned int & add_cp_index); void calculate_vector_descending_order(const size_t & length, vector< double > A, vector< unsigned int > & order); void adding_full_changepoint_setup(const unsigned int & trace_index); void removing_full_changepoint_setup(const unsigned int & trace_index); void moving_full_changepoint_setup(const unsigned int & trace_index); void resampling_full_changepoint_setup(const unsigned int & trace_index); void altering_unobserved_regimes_setup(); //void remove_unobserved_regimes_setup(); void full_acceptance_procedure(const double & u1); void full_recording_procedure(); void update_full_MAP(); void run_full_simulation(); particle get_particle() const {return m_particle;} protected: bool m_recording_basic_samples; bool m_recording_binary_samples; bool m_recording_full_samples; bool m_recording_association_matrix; string m_data_file; vector< unsigned int > m_recorded_basic_dimensions; vector< unsigned int > m_recorded_binary_dimensions; vector< unsigned int > m_recorded_full_dimensions; vector< unsigned int > m_recorded_full_effective_dimensions; vector< unsigned long int > m_recorded_basic_changepoints; vector< unsigned long int > m_recorded_binary_changepoints; vector< unsigned long int > m_recorded_full_changepoints; vector< vector< size_t > > m_recorded_number_of_regimes; double m_recorded_full_birth_acceptances; double m_recorded_full_birth_proposals; double m_recorded_full_death_acceptances; double m_recorded_full_death_proposals; double m_recorded_full_move_acceptances; double m_recorded_full_move_proposals; double m_recorded_full_resample_acceptances; double m_recorded_full_resample_proposals; double m_recorded_full_unobserveds_acceptances; double m_recorded_full_unobserveds_proposals; vector< vector< unsigned int > > m_recorded_number_of_observed_regimes; unsigned long int m_number_of_changepoint_bins; vector< double > m_recorded_basic_log_posteriors; vector< double > m_recorded_binary_log_posteriors; vector< double > m_recorded_full_log_posteriors; vector< vector< double > > m_recorded_similarity_matrix; vector< vector< double > > m_recorded_min_proportion_similarity_matrix; vector< vector< vector< double > > > m_recorded_similarity_matrices; vector< vector< vector< double > > > m_recorded_min_proportion_similarity_matrices; vector< vector< vector< double > > > m_association_matrices; // for each process, for each pair of points, calculate the number of times that they fall in the same regime. vector< unsigned long int > m_observations_in_each_trace; particle m_basic_MAP_particle; particle m_binary_MAP_particle; particle m_full_MAP_particle; double m_basic_MAP_log_posterior; double m_binary_MAP_log_posterior; double m_full_MAP_log_posterior; unsigned int m_basic_MAP_dimension; unsigned int m_binary_MAP_dimension; unsigned int m_full_MAP_dimension; unsigned long int m_start; unsigned long int m_end; double m_p; double m_var_p; unsigned long int m_basic_burnin; unsigned long int m_basic_iterations; unsigned long int m_basic_thinning; unsigned long int m_binary_burnin; unsigned long int m_binary_iterations; unsigned long int m_binary_thinning; unsigned long int m_full_burnin; unsigned long int m_full_iterations; unsigned long int m_full_thinning; unsigned long int m_number_of_association_matrix_bins; vector< unsigned long int > m_separators; unsigned long int m_diff; size_t m_number_of_traces; unsigned int m_seed; mult_process * m_pm_ptr; size_t m_number_of_processes; unsigned long int m_intercept; particle m_particle; unsigned int m_dimension; changepoint m_end_changepoint; double m_b_k; double m_d_k; double m_m_k; double m_r_k; double m_au_k; double m_ru_k; unsigned int m_h; double m_log_proposal_ratio; double m_log_likelihood_ratio; double m_log_k_prior_ratio; double m_log_full_I_prior_ratio; double m_log_regimes_prior_ratio; double m_log_acceptance_prob; double m_right_log_likelihood; double m_left_log_likelihood; int m_binary_left_index; unsigned int m_binary_right_index; int m_full_left_index; //needed? delete if not used unsigned int m_full_right_index; //needed? delete if not used vector< double > m_binary_right_log_likelihood; vector< double > m_binary_left_log_likelihood; vector< double > m_binary_merged_log_likelihood; vector< double > m_binary_left_log_likelihood_reverse; vector< double > m_binary_right_log_likelihood_reverse; vector< vector< double > > m_log_B; vector< vector< double > > m_log_B_reverse; vector< vector< double > > m_log_q; vector< vector< double > > m_log_q_reverse; vector< vector< double > > m_log_Bq; vector< vector< double > > m_log_Bq_reverse; vector< vector< unsigned int > > m_log_Bq_descending_order; vector< vector< unsigned int > > m_log_Bq_reverse_descending_order; vector< vector< double > > m_left_sufficient_statistics; // needed? vector< vector< double > > m_left_sufficient_statistics_reverse; // needed? vector< vector< double > > m_right_sufficient_statistics; vector< vector< double > > m_right_sufficient_statistics_reverse; vector< vector< double > > m_middle_sufficient_statistics; vector< double > m_previous_log_likelihoods_without_right_sufficient_statistics; vector< vector< double > > m_log_likelihoods_with_right_sufficient_statistics; vector< double > m_previous_log_likelihoods_with_right_sufficient_statistics_reverse; vector< double > m_actual_log_likelihoods_without_right_sufficient_statistics_reverse; vector< double > m_previous_log_likelihoods_without_right_sufficient_statistics_reverse; vector< vector< double > > m_log_likelihoods_with_right_sufficient_statistics_reverse; vector< double > m_previous_log_likelihoods_with_right_sufficient_statistics; vector< double > m_actual_log_likelihoods_without_right_sufficient_statistics; vector< double > m_previous_log_likelihoods_without_middle_sufficient_statistics; vector< double > m_previous_log_likelihoods_with_middle_sufficient_statistics; vector< double > m_actual_log_likelihoods_with_middle_sufficient_statistics; vector< double > m_actual_log_likelihoods_without_middle_sufficient_statistics; vector< int > m_altering_unobserved_regimes; vector< bool > m_removing_unobserved_regimes; vector< double > m_log_of_sum_Bq; vector< double > m_log_of_sum_Bq_reverse; double m_merged_log_likelihood; unsigned long int m_new_changepoint_position; bool m_tau_h_greater_than_tau_h_prime; changepoint m_adding_changepoint; const gsl_rng_type * r_type; gsl_rng * r; }; rj::rj(const unsigned long int & start_time, const unsigned long int & end_time, const double & p, const double & var_p, const unsigned long int & basic_burnin, const unsigned long int & basic_iterations, const unsigned long int & basic_thinning, const unsigned long int & number_of_association_matrix_bins, const vector< unsigned long int > & starting_changepoints, const vector< unsigned long int > & separators, const vector< unsigned long int > & trace_lengths, const unsigned int & seed, mult_process * pm_ptr, const unsigned long int & intercept, const unsigned long int & diff):m_start(start_time), m_end(end_time), m_p(p), m_var_p(var_p), m_basic_burnin(basic_burnin), m_basic_iterations(basic_iterations), m_basic_thinning(basic_thinning), m_recording_association_matrix(number_of_association_matrix_bins > 0), m_number_of_association_matrix_bins(number_of_association_matrix_bins), m_separators(separators), m_number_of_traces(separators.size() + 1), m_seed(seed), m_pm_ptr(pm_ptr), m_intercept(intercept), m_diff(diff) { changepoint intercept_changepoint(m_intercept); intercept_changepoint.set_log_likelihood(m_pm_ptr->calculate_log_likelihood(m_intercept, m_end + 1)); // calculate the number of observations in each trace if (trace_lengths.size() > 0) { m_observations_in_each_trace = trace_lengths; if (trace_lengths.size() != separators.size() + 1) { cerr << "sizes of the number of separators and the trace lengths don't match" << endl; } } else { m_observations_in_each_trace = vector< unsigned long int >(separators.size() + 1, 0); if (separators.size() > 0) { m_observations_in_each_trace[0] = (separators[0] - 1 - m_start) * m_diff; // as first observation at time 1 and separators[0] gives the time of the last observation. for (unsigned int trace_idx = 1; trace_idx < separators.size(); trace_idx++) { m_observations_in_each_trace[trace_idx] = (separators[trace_idx] - separators[trace_idx - 1] - 1) * m_diff; } m_observations_in_each_trace[separators.size()] = (end_time - separators[separators.size() - 1]) * m_diff; } else { m_observations_in_each_trace[0] = (end_time - m_start) * m_diff; } } m_number_of_processes = m_pm_ptr->get_number_of_processes(); // create the association matrix for each process if required if (m_recording_association_matrix) { m_association_matrices = vector< vector< vector< double > > >(m_number_of_processes, vector< vector< double > >(m_number_of_association_matrix_bins, vector< double >(m_number_of_association_matrix_bins, 0))); } // create the particle m_particle = particle(m_start, m_end, m_separators, intercept_changepoint, m_p, m_var_p); // add separator changepoints if (m_number_of_traces > 1) { m_particle.get_changepoint(-1).set_log_likelihood(m_pm_ptr->calculate_log_likelihood(m_intercept, m_separators[0])); changepoint new_separator_changepoint(m_separators[0]); if (m_number_of_traces == 2) { new_separator_changepoint.set_log_likelihood(m_pm_ptr->calculate_log_likelihood(m_separators[0], m_end + 1)); m_particle.add_separator_changepoint(new_separator_changepoint, 0); } else { new_separator_changepoint.set_log_likelihood(m_pm_ptr->calculate_log_likelihood(m_separators[0], m_separators[1])); m_particle.add_separator_changepoint(new_separator_changepoint, 0); for (unsigned int separator_index = 1; separator_index < m_separators.size() - 1; separator_index++) { changepoint new_separator_changepoint(m_separators[separator_index]); new_separator_changepoint.set_log_likelihood(m_pm_ptr->calculate_log_likelihood(m_separators[separator_index], m_separators[separator_index + 1])); m_particle.add_separator_changepoint(new_separator_changepoint, separator_index); } changepoint new_separator_changepoint(m_separators[m_separators.size() - 1]); new_separator_changepoint.set_log_likelihood(m_pm_ptr->calculate_log_likelihood(m_separators[m_separators.size() - 1], m_end + 1)); m_particle.add_separator_changepoint(new_separator_changepoint, static_cast< unsigned int >(m_separators.size() - 1)); } } // add starting changepoints if (0 < starting_changepoints.size()) { for (unsigned int ncp = 0; ncp < starting_changepoints.size(); ncp++) { if (m_particle.does_changepoint_exist_in_particle(starting_changepoints[ncp])) { cerr << "changepoint already exists in the particle" << endl; } m_new_changepoint_position = starting_changepoints[ncp]; changepoint new_changepoint(m_new_changepoint_position); unsigned int add_cp_index = m_particle.get_add_changepoint_index(); m_left_log_likelihood = m_pm_ptr->calculate_log_likelihood(m_particle.get_changepoint(add_cp_index - 1).get_position(), m_new_changepoint_position); if (add_cp_index == m_particle.get_dimension()) { m_right_log_likelihood = m_pm_ptr->calculate_log_likelihood(m_new_changepoint_position, m_end + 1); } else { m_right_log_likelihood = m_pm_ptr->calculate_log_likelihood(m_new_changepoint_position, m_particle.get_changepoint(add_cp_index).get_position()); } m_log_likelihood_ratio = m_left_log_likelihood + m_right_log_likelihood - m_particle.get_changepoint(add_cp_index - 1).get_log_likelihood(); m_log_k_prior_ratio = m_particle.calculate_and_get_add_cp_k_prior_ratio(); m_particle.increase_log_likelihood(m_log_likelihood_ratio); m_particle.increase_log_k_prior(m_log_k_prior_ratio); new_changepoint.set_log_likelihood(m_right_log_likelihood); m_particle.add_changepoint(m_particle.get_add_changepoint_index(), new_changepoint); m_particle.get_changepoint(m_particle.get_add_changepoint_index() - 1).set_log_likelihood(m_left_log_likelihood); } } m_end_changepoint = changepoint(m_end + 1); m_particle.set_log_likelihood(calculate_total_basic_log_likelihood()); m_particle.calculate_and_set_log_k_prior(); gsl_rng_env_setup(); r_type = gsl_rng_default; r = gsl_rng_alloc(r_type); gsl_rng_set(r, seed); } double rj::calculate_total_basic_log_likelihood() { if (m_particle.get_dimension() == 0) { return m_pm_ptr->calculate_log_likelihood(m_intercept, m_end + 1); } if (0.001 < abs(m_pm_ptr->calculate_log_likelihood(m_particle.get_changepoint(-1).get_position(), m_particle.get_changepoint(0).get_position()) - m_particle.get_changepoint(-1).get_log_likelihood())) { cerr << "actual - recorded = " << m_pm_ptr->calculate_log_likelihood(m_particle.get_changepoint(-1).get_position(), m_particle.get_changepoint(0).get_position()) - m_particle.get_changepoint(-1).get_log_likelihood() << endl; } double total_log_likelihood = m_pm_ptr->calculate_log_likelihood(m_particle.get_changepoint(-1).get_position(), m_particle.get_changepoint(0).get_position()); double cp_index_log_likelihood; for (unsigned int cp_index = 0; cp_index < m_particle.get_dimension() - 1; cp_index++) { cp_index_log_likelihood = m_pm_ptr->calculate_log_likelihood(m_particle.get_changepoint(cp_index).get_position(), m_particle.get_changepoint(cp_index + 1).get_position()); if (0.001 < abs(cp_index_log_likelihood - m_particle.get_changepoint(cp_index).get_log_likelihood())) { cerr << "actual - recorded = " << cp_index_log_likelihood - m_particle.get_changepoint(cp_index).get_log_likelihood() << endl; } total_log_likelihood += cp_index_log_likelihood; } total_log_likelihood += m_pm_ptr->calculate_log_likelihood(m_particle.get_changepoint(m_particle.get_dimension() - 1).get_position(), m_end + 1); if (0.001 < abs(m_pm_ptr->calculate_log_likelihood(m_particle.get_changepoint(m_particle.get_dimension() - 1).get_position(), m_end + 1) - m_particle.get_changepoint(m_particle.get_dimension() - 1).get_log_likelihood())) { cerr << "actual - recorded = " << m_pm_ptr->calculate_log_likelihood(m_particle.get_changepoint(m_particle.get_dimension() - 1).get_position(), m_end + 1) - m_particle.get_changepoint(m_particle.get_dimension() - 1).get_log_likelihood() << endl; } return total_log_likelihood; } // used to check that calculations are correct // calculate the log acceptance probability of the MH move that is being proposed (can be greater than 0, not doing min(1, x) bit) double rj::calculate_full_log_acceptance_probability(const double & u1) { double full_log_acceptance_probability = 0; if (u1 < m_b_k) { full_log_acceptance_probability += log(m_p) - log(1 - m_p); full_log_acceptance_probability += log(static_cast< double >(m_end - m_dimension)) - log(static_cast< double >(m_dimension + 1)); full_log_acceptance_probability += m_dimension == 0 ? -log(4.0) : 0.0; double old_I_prior_total = 0; for (unsigned int i = 0; i < m_number_of_processes; i++) { double old_I_prior = calculate_process_full_log_I_prior(i); old_I_prior_total += old_I_prior; size_t number_of_regimes = m_particle.get_number_of_regimes(i); vector< double > new_I_priors = vector< double >(number_of_regimes + 1); for (unsigned int reg = 0; reg < number_of_regimes; reg++) { new_I_priors[reg] = calculate_process_full_log_I_prior(i, m_particle.get_add_changepoint_index(), reg); if ((reg == number_of_regimes - 1) && (m_particle.is_regime_unobserved(i, number_of_regimes - 1))) { new_I_priors[reg] = -1e300; } } new_I_priors[number_of_regimes] = calculate_process_full_log_I_prior(i, m_particle.get_add_changepoint_index(), static_cast< unsigned int >(number_of_regimes)) + log(1 - m_particle.get_rho()); full_log_acceptance_probability += calculate_log_of_sum_Bq_ratio(new_I_priors, old_I_prior); } // check if the old_I_prior_total equals the recorded value of full_I_prior //cout << "full I prior comparison " << old_I_prior_total - m_particle.get_log_full_I_prior() << endl;; //if (old_I_prior_total - m_particle.get_log_full_I_prior() > 0.0000001 || old_I_prior_total - m_particle.get_log_full_I_prior() < -0.0000001) { // cout << "DD" << endl; //} } else if (u1 < m_d_k) { full_log_acceptance_probability += log(1 - m_p) - log(m_p); full_log_acceptance_probability += log(static_cast< double >(m_dimension)) - log(static_cast< double >(m_end - m_dimension + 1)); full_log_acceptance_probability += m_dimension == 1 ? log(4.0) : 0; for (unsigned int i = 0; i < m_number_of_processes; i++) { double old_I_prior = calculate_process_full_log_I_prior_without_cp(i, m_h); size_t number_of_regimes = m_particle.get_number_of_regimes(i) - (m_particle.removing_full_changepoint_leaves_highest_regime_unobserved(i, m_particle.get_full_I_i_j(m_h, i)) ? 1 : 0); vector< double > new_I_priors = vector< double >(number_of_regimes + 1); for (unsigned int reg = 0; reg < number_of_regimes; reg++) { new_I_priors[reg] = calculate_process_full_log_I_prior_without_cp(i, m_h, reg); if ((reg == number_of_regimes - 1) && (m_particle.is_regime_unobserved(i, number_of_regimes - 1))) { new_I_priors[reg] = -1e300; } } new_I_priors[number_of_regimes] = calculate_process_full_log_I_prior_without_cp(i, m_h, static_cast< unsigned int >(number_of_regimes)) + log(1 - m_particle.get_rho()); full_log_acceptance_probability -= calculate_log_of_sum_Bq_ratio(new_I_priors, old_I_prior); } } else if (u1 < m_m_k) { for (unsigned int i = 0; i < m_number_of_processes; i++) { double old_I_prior = calculate_process_full_log_I_prior_without_cp(i, m_h); size_t number_of_regimes = m_particle.get_number_of_regimes(i) - (m_particle.removing_full_changepoint_leaves_highest_regime_unobserved(i, m_particle.get_full_I_i_j(m_h, i)) ? 1 : 0); vector< double > new_I_priors = vector< double >(number_of_regimes + 1); for (unsigned int reg = 0; reg < number_of_regimes; reg++) { new_I_priors[reg] = calculate_process_full_log_I_prior_without_cp(i, m_h, reg); if ((reg == number_of_regimes - 1) && (m_particle.is_regime_unobserved(i, number_of_regimes - 1))) { new_I_priors[reg] = -1e300; } } new_I_priors[number_of_regimes] = calculate_process_full_log_I_prior_without_cp(i, m_h, static_cast< unsigned int >(number_of_regimes)) + log(1 - m_particle.get_rho()); full_log_acceptance_probability += calculate_log_of_sum_Bq_ratio(new_I_priors, old_I_prior) - calculate_log_of_sum_Bq_ratio(new_I_priors, old_I_prior); } } else if (u1 < m_r_k) { full_log_acceptance_probability += 0; } else if (u1 < m_au_k) { full_log_acceptance_probability += 0; } return full_log_acceptance_probability; } double rj::calculate_process_full_log_I_prior(const double & process, const unsigned int & adding_index, const unsigned int & proposed_regime) { vector< unsigned int > regime_vector = vector< unsigned int >(0); for (unsigned int cp_idx = 0; cp_idx < m_dimension; cp_idx++) { regime_vector.push_back(m_particle.get_full_I_i_j(cp_idx, process)); } size_t number_of_regimes = m_particle.get_number_of_regimes(process); // if we are calculating the full_log_I_prior for the process with a new regime added then add the new regime in if (adding_index != numeric_limits< unsigned int >::max()) { regime_vector.insert(regime_vector.begin() + adding_index, proposed_regime); if (proposed_regime == number_of_regimes) { number_of_regimes++; } } return calculate_full_log_I_prior(regime_vector, number_of_regimes); } double rj::calculate_process_full_log_I_prior_without_cp(const double & process, const unsigned int & removing_index, const unsigned int & proposed_regime) { vector< unsigned int > regime_vector = vector< unsigned int >(0); for (unsigned int cp_idx = 0; cp_idx < m_h; cp_idx++) { regime_vector.push_back(m_particle.get_full_I_i_j(cp_idx, process)); } size_t number_of_regimes = m_particle.get_number_of_regimes(process); if (m_particle.removing_full_changepoint_leaves_highest_regime_unobserved(process, m_particle.get_full_I_i_j(m_h, process))) { number_of_regimes--; } if (proposed_regime != numeric_limits< unsigned int >::max()) { regime_vector.push_back(proposed_regime); if (proposed_regime == number_of_regimes) { number_of_regimes++; } } for (unsigned int cp_idx = m_h + 1; cp_idx < m_dimension; cp_idx++) { regime_vector.push_back(m_particle.get_full_I_i_j(cp_idx, process)); } return calculate_full_log_I_prior(regime_vector, number_of_regimes); } double rj::calculate_full_log_I_prior(const vector< unsigned int > & regime_vector, const size_t & number_of_regimes) { vector< vector< double > > regime_transition_mat = generate_regime_transition_matrix(regime_vector, number_of_regimes); double num_reg = static_cast< double >(number_of_regimes), delta = m_particle.get_dirichlet_alpha(); double full_log_I_prior = num_reg * (gsl_sf_lngamma(num_reg * delta) - num_reg * gsl_sf_lngamma(delta)); for (unsigned int reg_0 = 0; reg_0 < number_of_regimes; reg_0++) { double iota_reg_0 = 0; for (unsigned int reg_1 = 0; reg_1 < number_of_regimes; reg_1++) { double iota_reg_0_reg_1 = regime_transition_mat[reg_0][reg_1]; full_log_I_prior += gsl_sf_lngamma(delta + iota_reg_0_reg_1); iota_reg_0 += iota_reg_0_reg_1; } full_log_I_prior -= gsl_sf_lngamma(num_reg * delta + iota_reg_0); } return full_log_I_prior; } vector< vector< double > > rj::generate_regime_transition_matrix(const vector< unsigned int > & regime_vector, const size_t & number_of_regimes) { vector< vector< double > > regime_transition_matrix = vector< vector< double > >(number_of_regimes, vector< double >(number_of_regimes, 0.0)); if (regime_vector.size() > 0) { regime_transition_matrix[0][regime_vector[0]]++; for (unsigned int idx = 0; idx < regime_vector.size() - 1; idx++) { regime_transition_matrix[regime_vector[idx]][regime_vector[idx + 1]]++; } } return regime_transition_matrix; } double rj::calculate_log_of_sum_Bq_ratio(const vector< double > & new_I_priors, const double & old_I_prior) { vector< double > a = new_I_priors; double log_of_sum_Bq = 0, max_a = a[0], temp_sum = 0; // subtract old_I_prior from each element and find the largest element for (unsigned int i = 0; i < a.size(); i++) { if (a[i] > max_a) { max_a = a[i]; } } log_of_sum_Bq += max_a - old_I_prior; for (unsigned int i = 0; i < a.size(); i++) { temp_sum += exp(a[i] - max_a); } log_of_sum_Bq += log(temp_sum); return log_of_sum_Bq; } void rj::adding_basic_changepoint_setup(const unsigned int & trace_index) { unsigned long int lower_position_bound = ((trace_index == 0) ? 0 : m_separators[trace_index - 1]); unsigned long int upper_position_bound = ((trace_index == m_number_of_traces - 1) ? m_end + 1 : m_separators[trace_index]); m_new_changepoint_position = gsl_rng_uniform_int(r, upper_position_bound - lower_position_bound - 1) + lower_position_bound + 1; while (m_particle.does_changepoint_exist_in_particle(m_new_changepoint_position)) { m_new_changepoint_position = gsl_rng_uniform_int(r, upper_position_bound - lower_position_bound - 1) + lower_position_bound + 1; } m_log_proposal_ratio = m_particle.calculate_and_get_add_cp_proposal_ratio(upper_position_bound - lower_position_bound - 1, trace_index, true); m_adding_changepoint = changepoint(m_new_changepoint_position); unsigned int add_cp_index = m_particle.get_add_changepoint_index(); m_left_log_likelihood = m_pm_ptr->calculate_log_likelihood(m_particle.get_changepoint(add_cp_index - 1).get_position(), m_new_changepoint_position); if (add_cp_index == m_dimension) { m_right_log_likelihood = m_pm_ptr->calculate_log_likelihood(m_new_changepoint_position, m_end + 1); } else { m_right_log_likelihood = m_pm_ptr->calculate_log_likelihood(m_new_changepoint_position, m_particle.get_changepoint(add_cp_index).get_position()); } m_log_likelihood_ratio = m_left_log_likelihood + m_right_log_likelihood - m_particle.get_changepoint(add_cp_index - 1).get_log_likelihood(); m_log_k_prior_ratio = m_particle.calculate_and_get_add_cp_k_prior_ratio(); m_log_acceptance_prob = m_log_proposal_ratio + m_log_k_prior_ratio + m_log_likelihood_ratio; } void rj::removing_basic_changepoint_setup(const unsigned int & trace_index) { int lower_index_bound = ((trace_index == 0) ? -1 : static_cast< int >(m_particle.get_separator_index(trace_index - 1))); unsigned int trace_dimension = m_particle.calculate_trace_dimension(trace_index); m_h = static_cast< unsigned int >(gsl_rng_uniform_int(r, trace_dimension) + lower_index_bound + 1); m_log_proposal_ratio = m_particle.calculate_and_get_remove_cp_proposal_ratio(trace_dimension, trace_index, true); if (m_h == m_dimension - 1) { m_merged_log_likelihood = m_pm_ptr->calculate_log_likelihood(m_particle.get_changepoint(m_h - 1).get_position(), m_end + 1); } else { m_merged_log_likelihood = m_pm_ptr->calculate_log_likelihood(m_particle.get_changepoint(m_h - 1).get_position(), m_particle.get_changepoint(m_h + 1).get_position()); } m_log_likelihood_ratio = m_merged_log_likelihood - m_particle.get_changepoint(m_h - 1).get_log_likelihood() - m_particle.get_changepoint(m_h).get_log_likelihood(); m_log_k_prior_ratio = m_particle.calculate_and_get_remove_cp_k_prior_ratio(); m_log_acceptance_prob = m_log_proposal_ratio + m_log_k_prior_ratio + m_log_likelihood_ratio; } void rj::moving_basic_changepoint_setup(const unsigned int & trace_index) { // choose which changepoint to move (we know that there is at least one that can be moved from our definitions of b_k, d_k, etc.) int lower_index_bound = ((trace_index == 0) ? -1 : static_cast< int >(m_particle.get_separator_index(trace_index - 1))); unsigned int trace_dimension = m_particle.calculate_trace_dimension(trace_index); m_h = static_cast< unsigned int >(gsl_rng_uniform_int(r, trace_dimension) + lower_index_bound + 1); unsigned long int tau_h_minus_1 = m_particle.get_changepoint(m_h - 1).get_position(); unsigned long int tau_h_plus_1; if (m_h == m_dimension - 1) { tau_h_plus_1 = m_end + 1; } else { tau_h_plus_1 = m_particle.get_changepoint(m_h + 1).get_position(); } m_new_changepoint_position = gsl_rng_uniform_int(r, tau_h_plus_1 - tau_h_minus_1 - 1) + tau_h_minus_1 + 1; if (tau_h_plus_1 - tau_h_minus_1 > 2) { //if there is somewhere to move the changepoint to. E.g. 16, 17, 18 move 17 - there is nowhere to move it to! while(m_particle.does_changepoint_exist_in_particle(m_new_changepoint_position, m_h, m_h)) { m_new_changepoint_position = gsl_rng_uniform_int(r, tau_h_plus_1 - tau_h_minus_1 - 1) + tau_h_minus_1 + 1; } } m_log_proposal_ratio = 0; m_log_k_prior_ratio = 0; m_left_log_likelihood = m_pm_ptr->calculate_log_likelihood(tau_h_minus_1, m_new_changepoint_position); m_right_log_likelihood = m_pm_ptr->calculate_log_likelihood(m_new_changepoint_position, tau_h_plus_1); m_log_likelihood_ratio = m_left_log_likelihood + m_right_log_likelihood - m_particle.get_changepoint(m_h - 1).get_log_likelihood() - m_particle.get_changepoint(m_h).get_log_likelihood(); m_log_acceptance_prob = m_log_likelihood_ratio; } void rj::basic_acceptance_procedure(const double & u1) { m_particle.increase_log_likelihood(m_log_likelihood_ratio); m_particle.increase_log_k_prior(m_log_k_prior_ratio); if (u1 < m_b_k) { m_adding_changepoint.set_log_likelihood(m_right_log_likelihood); m_particle.add_changepoint(m_particle.get_add_changepoint_index(), m_adding_changepoint); m_particle.get_changepoint(m_particle.get_add_changepoint_index() - 1).set_log_likelihood(m_left_log_likelihood); } else if (u1 < m_d_k) { m_particle.remove_changepoint(m_h); m_particle.get_changepoint(m_h - 1).set_log_likelihood(m_merged_log_likelihood); } else if (u1 < m_m_k) { m_particle.move_changepoint(m_h, m_new_changepoint_position, m_left_log_likelihood, m_right_log_likelihood); } } void rj::basic_recording_procedure() { m_recorded_basic_dimensions.push_back(m_dimension); vector< unsigned long int > changepoint_hist = m_particle.calculate_and_get_changepoint_histogram(m_number_of_changepoint_bins); size_t size_of_recorded_changepoints = m_recorded_basic_changepoints.size(); if (size_of_recorded_changepoints > 0) { //have we started recording changepoint histograms, or is this the first time? for (unsigned long int i = 0; i < size_of_recorded_changepoints; i++) { m_recorded_basic_changepoints[i] += changepoint_hist[i]; } } else { m_recorded_basic_changepoints = changepoint_hist; } m_recorded_basic_log_posteriors.push_back(m_particle.get_basic_log_posterior()); } void rj::run_basic_simulation(){ m_particle.print_likelihood(); cout << "the posterior is " << m_particle.get_basic_log_posterior() << endl << endl; //if (abs(m_particle.calculate_and_get_basic_log_posterior(m_number_of_processes) - m_particle.get_basic_log_posterior()) > 0.0001) { // cout << m_particle.calculate_and_get_basic_log_posterior(m_number_of_processes) - m_particle.get_basic_log_posterior() << endl; //} cout << "dimension: " << m_particle.get_dimension() << endl; double accepted_adds = 0, accepted_deaths = 0, accepted_moves = 0; double attempted_adds = 0, attempted_deaths = 0, attempted_moves = 0; for (unsigned long int iteration = 0; iteration < m_basic_burnin; iteration++) { m_dimension = m_particle.get_dimension(); unsigned int trace_index = static_cast< unsigned int >(gsl_rng_uniform_int(r, m_number_of_traces)); unsigned int trace_dimension = m_particle.calculate_trace_dimension(trace_index); if (trace_dimension > 0) { m_b_k = 1.0 / 3.0, m_d_k = 2.0 / 3.0, m_m_k = 1.0; } else { m_b_k = 1.0; } /*if (iteration == 100 || iteration == 250 || iteration == 500 || iteration == 750 || iteration == 1000 || iteration == 10000) { cout << "iteration: " << iteration << endl; cout << "dimension: " << m_dimension << endl; cout << "birth: " << accepted_adds << " / " << attempted_adds << " = " << accepted_adds / attempted_adds << endl; cout << "death: " << accepted_deaths << " / " << attempted_deaths << " = " << accepted_deaths / attempted_deaths << endl; cout << "move: " << accepted_moves << " / " << attempted_moves << " = " << accepted_moves / attempted_moves << endl; }*/ //m_particle.check_separator_changepoints(); double u1 = gsl_ran_flat(r, 0, 1); if (u1 < m_b_k) { //birth adding_basic_changepoint_setup(trace_index); attempted_adds += 1; } else if (u1 < m_d_k) { //death removing_basic_changepoint_setup(trace_index); attempted_deaths += 1; } else if (u1 < m_m_k) { //move moving_basic_changepoint_setup(trace_index); attempted_moves += 1; } if (m_log_acceptance_prob > 0 || (log(gsl_ran_flat(r, 0, 1)) < m_log_acceptance_prob)) { basic_acceptance_procedure(u1); if (u1 < m_b_k) { //birth accepted_adds += 1; } else if (u1 < m_d_k) { //death accepted_deaths += 1; } else if (u1 < m_m_k) { //move accepted_moves += 1; } } //if (abs(m_particle.calculate_and_get_basic_log_posterior(m_number_of_processes) - m_particle.get_basic_log_posterior()) > 0.0001) { // cout << iteration << '\t' << m_particle.calculate_and_get_basic_log_posterior(m_number_of_processes) - m_particle.get_basic_log_posterior() << endl; //} } for (unsigned long int iteration = 0; iteration < m_basic_iterations; iteration++) { m_dimension = m_particle.get_dimension(); unsigned int trace_index = static_cast< unsigned int >(gsl_rng_uniform_int(r, m_number_of_traces)); unsigned int trace_dimension = m_particle.calculate_trace_dimension(trace_index); if (trace_dimension > 0) { m_b_k = 1.0 / 3.0, m_d_k = 2.0 / 3.0, m_m_k = 1.0; } else { m_b_k = 1.0; } double u1 = gsl_ran_flat(r, 0, 1); if (u1 < m_b_k) { //birth adding_basic_changepoint_setup(trace_index); } else if (u1 < m_d_k) { //death removing_basic_changepoint_setup(trace_index); } else if (u1 < m_m_k) { //move moving_basic_changepoint_setup(trace_index); } if (m_log_acceptance_prob > 0 || (log(gsl_ran_flat(r, 0, 1)) < m_log_acceptance_prob)) { basic_acceptance_procedure(u1); } if (m_recording_basic_samples) { double log_posterior = m_particle.get_basic_log_posterior(); if (m_basic_MAP_log_posterior < log_posterior) { m_basic_MAP_particle = m_particle; m_basic_MAP_dimension = m_particle.get_dimension(); m_basic_MAP_log_posterior = log_posterior; } } if (m_recording_basic_samples && (iteration % m_basic_thinning == 0)) { //store sample basic_recording_procedure(); } } m_dimension = m_particle.get_dimension(); cout << "ending basic changepoint stage" << endl; m_particle.print_likelihood(); cout << "the posterior is " << m_particle.get_basic_log_posterior() << endl << endl; } void rj::write_basic_MAP_changepoints_to_file(const string & MAP_cps_Filename){ ofstream OutputStream(MAP_cps_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); for (unsigned int cp_index = 0; cp_index < m_basic_MAP_dimension; cp_index++) { OutputStream << m_basic_MAP_particle.get_changepoint(cp_index).get_position() << "\n"; } OutputStream.close(); } void rj::write_binary_MAP_changepoints_to_file(const string & MAP_cps_Filename){ ofstream OutputStream(MAP_cps_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); for (unsigned int cp_index = 0; cp_index < m_binary_MAP_dimension; cp_index++) { OutputStream << m_binary_MAP_particle.get_changepoint(cp_index).get_position() * m_pm_ptr->get_diff() << "\t"; for (unsigned int process = 0; process < m_number_of_processes; process++) { OutputStream << m_binary_MAP_particle.get_binary_I_i_j(cp_index, process) << "\t"; } OutputStream << endl; } OutputStream.close(); } void rj::write_full_MAP_changepoints_to_file(const string & MAP_cps_Filename) { ofstream OutputStream(MAP_cps_Filename, ios::out); OutputStream.precision(10); OutputStream << m_full_MAP_log_posterior << endl; OutputStream << m_full_MAP_dimension << endl; for (unsigned int cp_index = 0; cp_index < m_full_MAP_dimension; cp_index++) { OutputStream << m_full_MAP_particle.get_changepoint(cp_index).get_position() * m_pm_ptr->get_diff() << "\t"; for (unsigned int process = 0; process < m_number_of_processes; process++) { OutputStream << m_full_MAP_particle.get_full_I_i_j(cp_index, process) << "\t"; } OutputStream << endl; } OutputStream.close(); } void rj::write_basic_dimension_distribution_to_file( const string & dimension_distribution_Filename ){ ofstream OutputStream( dimension_distribution_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); unsigned long int max_dim = 0; for (size_t i = 0; i < m_recorded_basic_dimensions.size(); i++) { if (max_dim < m_recorded_basic_dimensions[i]){ max_dim = m_recorded_basic_dimensions[i]; } } vector< double > dimension_counts(max_dim + 1, 0.0); for (size_t i = 0; i < m_recorded_basic_dimensions.size(); i++) { dimension_counts[m_recorded_basic_dimensions[i]]++; } for (size_t j = 0; j <= max_dim; j++) { dimension_counts[j] /= static_cast< double >(m_recorded_basic_dimensions.size()); } for (unsigned int k = 0; k <= max_dim; k++) { OutputStream << k << "\t" << dimension_counts[k] << "\n"; } OutputStream.close(); } void rj::write_binary_dimension_distribution_to_file(const string & dimension_distribution_Filename) { ofstream OutputStream( dimension_distribution_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); // calculate the maximum observed dimension unsigned long int max_dim = 0; for (size_t i = 0; i < m_recorded_binary_dimensions.size(); i++) { if (max_dim < m_recorded_binary_dimensions[i]) { max_dim = m_recorded_binary_dimensions[i]; } } // calculate a histogram of dimension counts vector< double > dimension_counts(max_dim + 1, 0.0); for (size_t i = 0; i < m_recorded_binary_dimensions.size(); i++) { dimension_counts[m_recorded_binary_dimensions[i]]++; } // convert from a frequency to a probability for (size_t j = 0; j <= max_dim; j++) { dimension_counts[j] /= static_cast< double >(m_recorded_binary_dimensions.size()); } for (unsigned int k = 0; k <= max_dim; k++) { OutputStream << k << "\t" << dimension_counts[k] << "\n"; } OutputStream.close(); } void rj::write_full_dimension_distribution_to_file(const string & dimension_distribution_Filename){ ofstream OutputStream(dimension_distribution_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); // calculate what the largest observed dimension was unsigned long int max_dim = 0; for (size_t i = 0; i < m_recorded_full_dimensions.size(); i++){ if (max_dim < m_recorded_full_dimensions[i]){ max_dim = m_recorded_full_dimensions[i]; } } // create a histogram of dimension counts vector< double > dimension_counts(max_dim + 1, 0.0); for (unsigned int i = 0; i < m_recorded_full_dimensions.size(); i++){ dimension_counts[m_recorded_full_dimensions[i]]++; } // convert the counts from a frequency to a probability for (size_t j = 0; j <= max_dim; j++){ dimension_counts[j] /= static_cast< double >(m_recorded_full_dimensions.size()); } for( unsigned int k = 0; k <= max_dim; k++ ){ OutputStream << k << "\t" << dimension_counts[k] << "\n"; } OutputStream.close(); } void rj::write_full_effective_dimension_distribution_to_file(const string & effective_dimension_distribution_Filename){ ofstream OutputStream(effective_dimension_distribution_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); // calculate what the largest observed dimension was unsigned long int max_dim = 0; for (size_t i = 0; i < m_recorded_full_effective_dimensions.size(); i++){ if (max_dim < m_recorded_full_effective_dimensions[i]){ max_dim = m_recorded_full_effective_dimensions[i]; } } // create a histogram of dimension counts vector< double > dimension_counts(max_dim + 1, 0.0); for (unsigned int i = 0; i < m_recorded_full_effective_dimensions.size(); i++){ dimension_counts[m_recorded_full_effective_dimensions[i]]++; } // convert the counts from a frequency to a probability for (size_t j = 0; j <= max_dim; j++){ dimension_counts[j] /= static_cast< double >(m_recorded_full_effective_dimensions.size()); } for( unsigned int k = 0; k <= max_dim; k++ ){ OutputStream << k << "\t" << dimension_counts[k] << "\n"; } OutputStream.close(); } void rj::write_basic_changepoints_distribution_to_file(const string & changepoints_distribution_Filename){ ofstream OutputStream( changepoints_distribution_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); for (size_t i = 0; i < m_recorded_basic_changepoints.size(); i++){ OutputStream << static_cast< double >(m_recorded_basic_changepoints[i]) / static_cast< double >(m_basic_iterations / m_basic_thinning) << "\n"; } } void rj::write_binary_changepoints_distribution_to_file(const string & changepoints_distribution_Filename){ ofstream OutputStream( changepoints_distribution_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); if (m_recorded_binary_changepoints.size() != m_number_of_processes * m_number_of_changepoint_bins){ cerr << "don't match"; } for (unsigned int j = 0; j < m_number_of_processes; j++ ){ for (unsigned int index = 0; index < m_number_of_changepoint_bins; index++) { OutputStream << static_cast< double >(m_recorded_binary_changepoints[m_number_of_changepoint_bins * j + index]) / static_cast< double >(m_binary_iterations / m_binary_thinning) << "\t"; } OutputStream << endl; } } void rj::write_full_changepoints_distribution_to_file(const string & changepoints_distribution_Filename, const unsigned long int number_of_iterations){ ofstream OutputStream(changepoints_distribution_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(5); if (m_recorded_full_changepoints.size() != m_number_of_processes * m_number_of_changepoint_bins){ cerr << "don't match"; } for (unsigned int j = 0; j < m_number_of_processes; j++ ){ for (unsigned int index = 0; index < m_number_of_changepoint_bins; index++) { OutputStream << static_cast< double >(m_recorded_full_changepoints[m_number_of_changepoint_bins * j + index]) / static_cast< double >(number_of_iterations / m_full_thinning) << "\t"; } OutputStream << endl; } } void rj::write_number_of_regimes_to_file(const string & number_of_regimes_Filename) { ofstream OutputStream(number_of_regimes_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); for (unsigned int proc = 0; proc < m_number_of_processes; proc++) { // calculate the largest recorded number of regimes for this process unsigned long int max_r = 1; for (size_t i = 0; i < m_recorded_number_of_regimes.size(); i++){ if (max_r < m_recorded_number_of_regimes[i][proc]){ max_r = m_recorded_number_of_regimes[i][proc]; } } // create a histogram of regime counts vector< double > regime_counts(max_r + 1, 0.0); for (unsigned int i = 0; i < m_recorded_number_of_regimes.size(); i++){ regime_counts[static_cast< double >(m_recorded_number_of_regimes[i][proc])]++; } // convert the counts from a frequency to a probability for (size_t j = 0; j <= max_r; j++){ regime_counts[j] /= static_cast< double >(m_recorded_full_dimensions.size()); } for( unsigned int k = 1; k <= max_r; k++ ){ OutputStream << proc << "\t" << k << "\t" << regime_counts[k] << "\n"; } } OutputStream.close(); } void rj::write_number_of_observed_regimes_to_file(const string & number_of_observed_regimes_Filename) { ofstream OutputStream(number_of_observed_regimes_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); for (unsigned int proc = 0; proc < m_number_of_processes; proc++) { // calculate the largest recorded number of regimes for this process unsigned long int max_r = 1; for (size_t i = 0; i < m_recorded_number_of_observed_regimes.size(); i++){ if (max_r < m_recorded_number_of_observed_regimes[i][proc]){ max_r = m_recorded_number_of_observed_regimes[i][proc]; } } // create a histogram of regime counts vector< double > regime_counts(max_r + 1, 0.0); for (unsigned int i = 0; i < m_recorded_number_of_observed_regimes.size(); i++){ regime_counts[static_cast< double >(m_recorded_number_of_observed_regimes[i][proc])]++; } // convert the counts from a frequency to a probability for (size_t j = 0; j <= max_r; j++){ regime_counts[j] /= static_cast< double >(m_recorded_full_dimensions.size()); } for( unsigned int k = 0; k <= max_r; k++ ){ OutputStream << proc << "\t" << k << "\t" << regime_counts[k] << "\n"; } } OutputStream.close(); } void rj::write_basic_log_posterior_trace_to_file(const string & log_posterior_trace_Filename){ ofstream OutputStream(log_posterior_trace_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); for (size_t i = 0; i < m_recorded_basic_log_posteriors.size(); i++ ){ OutputStream << m_recorded_basic_log_posteriors[i] << "\n"; } } void rj::write_binary_log_posterior_trace_to_file(const string & log_posterior_trace_Filename){ ofstream OutputStream(log_posterior_trace_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); for (size_t i = 0; i < m_recorded_binary_log_posteriors.size(); i++ ){ OutputStream << m_recorded_binary_log_posteriors[i] << "\n"; } } void rj::write_full_log_posterior_trace_to_file(const string & log_posterior_trace_Filename){ ofstream OutputStream(log_posterior_trace_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); for (size_t i = 0; i < m_recorded_full_log_posteriors.size(); i++ ){ OutputStream << m_recorded_full_log_posteriors[i] << "\n"; } } void rj::write_basic_dimension_trace_to_file(const string & dimension_trace_Filename) { ofstream OutputStream(dimension_trace_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); for (size_t i = 0; i < m_recorded_basic_dimensions.size(); i++ ){ OutputStream << m_recorded_basic_dimensions[i] << "\n"; } } void rj::write_dimension_trace_to_file(const string & dimension_trace_Filename) { ofstream OutputStream(dimension_trace_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); for (size_t i = 0; i < m_recorded_full_dimensions.size(); i++ ){ OutputStream << m_recorded_full_dimensions[i] << "\n"; } } void rj::write_number_of_regimes_trace_to_file(const string & number_of_regimes_trace_Filename) { ofstream OutputStream(number_of_regimes_trace_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); for (size_t i = 0; i < m_recorded_number_of_regimes.size(); i++ ) { for (size_t j = 0; j < m_recorded_number_of_regimes[i].size() - 1; j++) { OutputStream << m_recorded_number_of_regimes[i][j] << ","; } OutputStream << m_recorded_number_of_regimes[i][m_recorded_number_of_regimes[i].size() - 1] << endl; } } void rj::write_full_acceptance_probabilities_to_file(const string & acceptance_probabilities_Filename) { ofstream OutputStream(acceptance_probabilities_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(10); OutputStream << "full birth acceptance probability" << "\t" << m_recorded_full_birth_acceptances / m_recorded_full_birth_proposals << " = " << static_cast< unsigned int >(m_recorded_full_birth_acceptances) << " / " << static_cast< unsigned int >(m_recorded_full_birth_proposals) << endl; OutputStream << "full death acceptance probability" << "\t" << m_recorded_full_death_acceptances / m_recorded_full_death_proposals << " = " << static_cast< unsigned int >(m_recorded_full_death_acceptances) << " / " << static_cast< unsigned int >(m_recorded_full_death_proposals) << endl; OutputStream << "full move acceptance probability" << "\t" << m_recorded_full_move_acceptances / m_recorded_full_move_proposals << " = " << static_cast< unsigned int >(m_recorded_full_move_acceptances) << " / " << static_cast< unsigned int >(m_recorded_full_move_proposals) << endl; OutputStream << "full resample acceptance probability" << "\t" << m_recorded_full_resample_acceptances / m_recorded_full_resample_proposals << " = " << static_cast< unsigned int >(m_recorded_full_resample_acceptances) << " / " << static_cast< unsigned int >(m_recorded_full_resample_proposals) << endl; OutputStream << "full alter unobserveds acceptance probability" << "\t" << m_recorded_full_unobserveds_acceptances / m_recorded_full_unobserveds_proposals << " = " << static_cast< unsigned int >(m_recorded_full_unobserveds_acceptances) << " / " << static_cast< unsigned int >(m_recorded_full_unobserveds_proposals) << endl; OutputStream << "full overall acceptance probability" << "\t" << (m_recorded_full_birth_acceptances + m_recorded_full_death_acceptances + m_recorded_full_move_acceptances + m_recorded_full_resample_acceptances + m_recorded_full_unobserveds_acceptances) / (m_recorded_full_birth_proposals + m_recorded_full_death_proposals + m_recorded_full_move_proposals + m_recorded_full_resample_proposals + m_recorded_full_unobserveds_proposals) << " = " << static_cast< unsigned int >(m_recorded_full_birth_acceptances + m_recorded_full_death_acceptances + m_recorded_full_move_acceptances + m_recorded_full_resample_acceptances + m_recorded_full_unobserveds_acceptances) << " / " << static_cast< unsigned int >(m_recorded_full_birth_proposals + m_recorded_full_death_proposals + m_recorded_full_move_proposals + m_recorded_full_resample_proposals + m_recorded_full_unobserveds_proposals); } void rj::write_similarity_matrix_to_file(const string & similarity_matrix_Filename) { ofstream OutputStream(similarity_matrix_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(5); if (m_recorded_similarity_matrix.size() != m_number_of_traces) { cerr << "similarity matrix size doesn't match"; } for (unsigned int trace_0 = 0; trace_0 < m_number_of_traces; trace_0++) { for (unsigned int trace_1 = 0; trace_1 < m_number_of_traces; trace_1++) { if (trace_0 == trace_1) { OutputStream << 1 << '\t'; } else { OutputStream << m_recorded_similarity_matrix[trace_0][trace_1] / (static_cast< double >(m_full_iterations / m_full_thinning) * static_cast< double >(m_observations_in_each_trace[trace_0] + m_observations_in_each_trace[trace_1])) << '\t'; } } OutputStream << endl; } } void rj::write_min_proportion_similarity_matrix_to_file(const string & min_proportion_similarity_matrix_Filename) { ofstream OutputStream(min_proportion_similarity_matrix_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(5); if (m_recorded_min_proportion_similarity_matrix.size() != m_number_of_traces) { cerr << "similarity matrix size doesn't match"; } for (unsigned int trace_0 = 0; trace_0 < m_number_of_traces; trace_0++) { for (unsigned int trace_1 = 0; trace_1 < m_number_of_traces; trace_1++) { if (trace_0 == trace_1) { OutputStream << 1 << '\t'; } else { OutputStream << m_recorded_min_proportion_similarity_matrix[trace_0][trace_1] / static_cast< double >(m_full_iterations / m_full_thinning) << '\t'; } } OutputStream << endl; } } void rj::write_similarity_matrices_to_file(const string & similarity_matrices_Filename) { ofstream OutputStream(similarity_matrices_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(5); // start by making a header for the file, detailing which pair of traces we are recording the similarity of for (unsigned int trace_0 = 0; trace_0 < m_number_of_traces; trace_0++) { for (unsigned int trace_1 = trace_0 + 1; trace_1 < m_number_of_traces; trace_1++) { if (trace_0 == m_number_of_traces - 2 && trace_1 == m_number_of_traces - 1) { OutputStream << "traces_" << trace_0 << "_" << trace_1 << endl; } else { OutputStream << "traces_" << trace_0 << "_" << trace_1 << '\t'; } } } for (unsigned iter = 0; iter < m_recorded_similarity_matrices.size(); iter++) { for (unsigned int trace_0 = 0; trace_0 < m_number_of_traces; trace_0++) { for (unsigned int trace_1 = trace_0 + 1; trace_1 < m_number_of_traces; trace_1++) { if (trace_0 == m_number_of_traces - 2 && trace_1 == m_number_of_traces - 1) { OutputStream << m_recorded_similarity_matrices[iter][trace_0][trace_1] / static_cast< double >(m_observations_in_each_trace[trace_0] + m_observations_in_each_trace[trace_1]) << endl; } else { OutputStream << m_recorded_similarity_matrices[iter][trace_0][trace_1] / static_cast< double >(m_observations_in_each_trace[trace_0] + m_observations_in_each_trace[trace_1]) << '\t'; } } } } } void rj::write_min_proportion_similarity_matrices_to_file(const string & min_proportion_similarity_matrices_Filename) { ofstream OutputStream(min_proportion_similarity_matrices_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(5); // start by making a header for the file, detailing which pair of traces we are recording the similarity of for (unsigned int trace_0 = 0; trace_0 < m_number_of_traces; trace_0++) { for (unsigned int trace_1 = trace_0 + 1; trace_1 < m_number_of_traces; trace_1++) { if (trace_0 == m_number_of_traces - 2 && trace_1 == m_number_of_traces - 1) { OutputStream << "traces_" << trace_0 << "_" << trace_1 << endl; } else { OutputStream << "traces_" << trace_0 << "_" << trace_1 << '\t'; } } } for (unsigned iter = 0; iter < m_recorded_similarity_matrices.size(); iter++) { for (unsigned int trace_0 = 0; trace_0 < m_number_of_traces; trace_0++) { for (unsigned int trace_1 = trace_0 + 1; trace_1 < m_number_of_traces; trace_1++) { if (trace_0 == m_number_of_traces - 2 && trace_1 == m_number_of_traces - 1) { OutputStream << m_recorded_min_proportion_similarity_matrices[iter][trace_0][trace_1] << endl; } else { OutputStream << m_recorded_min_proportion_similarity_matrices[iter][trace_0][trace_1] << '\t'; } } } } } void rj::write_association_matrix_to_file(const string & association_matrix_Filename) { ofstream OutputStream(association_matrix_Filename, ios::out); OutputStream << setiosflags(ios::fixed); OutputStream.precision(5); // divide the values in the matrices by the number of records made double num_recordings = static_cast< double >(m_full_iterations / m_full_thinning); OutputStream << "x\ty\tSimilarity\tProcess"; for (unsigned int process = 0; process < m_number_of_processes; process++) { for (unsigned long int ind_0 = 0; ind_0 < m_number_of_association_matrix_bins; ind_0++) { OutputStream << endl << ind_0 * m_end / m_number_of_association_matrix_bins << "\t" << ind_0 * m_end / m_number_of_association_matrix_bins << "\t" << m_association_matrices[process][ind_0][ind_0] / num_recordings << "\t" << process; for (unsigned long int ind_1 = ind_0 + 1; ind_1 < m_number_of_association_matrix_bins; ind_1++) { OutputStream << endl << ind_0 * m_end / m_number_of_association_matrix_bins << "\t" << ind_1 * m_end / m_number_of_association_matrix_bins << "\t" << m_association_matrices[process][ind_0][ind_1] / num_recordings << "\t" << process; OutputStream << endl << ind_1 * m_end / m_number_of_association_matrix_bins << "\t" << ind_0 * m_end / m_number_of_association_matrix_bins << "\t" << m_association_matrices[process][ind_0][ind_1] / num_recordings << "\t" << process; } } } } // work out the likelihoods for each segment for each process. Create the binaries and set the value of alpha for binary marked vector sampling void rj::convert_basic_particle_to_binary_particle(const double & beta_alpha) { /*m_number_of_processes = m_pm_ptr->get_number_of_processes(); vector< vector< double > > log_likelihoods; for (unsigned int proc = 0; proc < m_number_of_processes; proc++) { log_likelihoods.push_back(vector< double >(m_dimension + 1)); if (m_dimension > 0) { log_likelihoods[proc][0] = m_pm_ptr->calculate_log_likelihood(proc, m_particle.get_changepoint(-1).get_position(), m_particle.get_changepoint(0).get_position()); for (unsigned int i = 0; i < m_dimension - 1; i++) { log_likelihoods[proc][i + 1] = m_pm_ptr->calculate_log_likelihood(proc, m_particle.get_changepoint(i).get_position(), m_particle.get_changepoint(i + 1).get_position()); } log_likelihoods[proc][m_dimension] = m_pm_ptr->calculate_log_likelihood(proc, m_particle.get_changepoint(m_dimension - 1).get_position(), m_end + 1); } else { log_likelihoods[proc][0] = m_pm_ptr->calculate_log_likelihood(proc, m_particle.get_changepoint(-1).get_position(), m_end + 1); } }*/ m_number_of_processes = m_pm_ptr->get_number_of_processes(); m_particle.initiate_binaries(m_number_of_processes); m_particle.set_log_binary_I_prior(0); m_particle.set_log_likelihood(0); m_particle.set_beta_alpha(beta_alpha); set_binary_marked_vectors();//m_number_of_processes, log_likelihoods); m_particle.calculate_and_set_log_binary_I_prior(m_number_of_processes); //cout << calculate_total_binary_log_likelihood() << endl; m_binary_left_log_likelihood = vector< double >(m_number_of_processes); m_binary_right_log_likelihood = vector< double >(m_number_of_processes); } void rj::set_binary_marked_vectors() { m_particle.set_all_binary_marked_vectors_equal_to_0_vectors(m_number_of_processes); for (unsigned int j = 0; j < m_number_of_processes; j++) { unsigned int trace_index = 0; // start with cp_index = -1 int cp_index = -1; unsigned long int left_cp_position = m_particle.get_changepoint(cp_index).get_position(); unsigned long int right_cp_position; if (m_particle.get_dimension() == 0) { right_cp_position = m_end + 1; } else { right_cp_position = m_particle.get_changepoint(cp_index + 1).get_position(); } // we get the sufficient statistics and number of observations for all the processes here when we are only interested in the statistics for process j, but don't want to prematurely optimise here double log_likelihood = m_pm_ptr->calculate_log_likelihood(j, left_cp_position, right_cp_position); /*vector< vector< double > > stats_1(m_number_of_processes); vector< vector< double > > stats_2(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(left_cp_position, stats_1); m_pm_ptr->get_cumulative_sufficient_data(right_cp_position, stats_2); for (size_t i = 0; i < stats_1.size(); i++) { stats_2[j][i] -= stats_1[j][i]; } double number_of_observations; m_pm_ptr->get_number_of_observations(stats_2[j], number_of_observations); double log_likelihood = m_pm_ptr->calculate_log_likelihood(j, stats_2[j]);*/ double log_prior = m_particle.calculate_and_get_adding_binary_log_I_prior_ratio(j, true, trace_index, cp_index)[1]; // add this binary to the particle m_particle.increase_log_likelihood(log_likelihood); m_particle.increase_log_binary_I_prior(log_prior); m_particle.add_new_binary(j, -1, log_likelihood); m_particle.increase_log_binary_I_prior(log_prior); for (int cp_index = 0; cp_index < m_particle.get_dimension(); cp_index++) { unsigned long int left_cp_position = m_particle.get_changepoint(cp_index).get_position(); unsigned long int right_cp_position; if (cp_index == m_particle.get_dimension() - 1) { right_cp_position = m_end + 1; } else { right_cp_position = m_particle.get_changepoint(cp_index + 1).get_position(); } bool new_trace = false; if (m_particle.is_changepoint_index_separator_index(cp_index)) { trace_index++; new_trace = true; } // we get the sufficient statistics and number of observations for all the processes here when we are only interested in the statistics for process j, but don't want to prematurely optimise here double right_log_likelihood = m_pm_ptr->calculate_log_likelihood(j, left_cp_position, right_cp_position); /*vector< vector< double > > stats_1(m_number_of_processes); vector< vector< double > > stats_2(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(left_cp_position, stats_1); m_pm_ptr->get_cumulative_sufficient_data(right_cp_position, stats_2); for (size_t i = 0; i < stats_1.size(); i++) { stats_2[j][i] -= stats_1[j][i]; } double right_log_likelihood = m_pm_ptr->calculate_log_likelihood(j, stats_2[j]);*/ unsigned long int previous_binary_left_position = m_particle.get_changepoint(m_particle.get_binary_left_index(j, cp_index)).get_position(); double combined_log_likelihood = m_pm_ptr->calculate_log_likelihood(j, previous_binary_left_position, right_cp_position); /*vector< vector< double > > stats_3(m_number_of_processes); vector< vector< double > > stats_4(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(previous_binary_left_position, stats_3); m_pm_ptr->get_cumulative_sufficient_data(right_cp_position, stats_4); for (size_t i = 0; i < stats_1.size(); i++) { stats_4[j][i] -= stats_3[j][i]; } double combined_log_likelihood = m_pm_ptr->calculate_log_likelihood(j, stats_4[j]);*/ vector< double > log_B(2); vector< double > log_q(2); log_B[0] = combined_log_likelihood - m_particle.get_binary_log_likelihood(j, cp_index); log_B[1] = right_log_likelihood; log_q = m_particle.calculate_and_get_adding_binary_log_I_prior_ratio(j, new_trace, trace_index, cp_index); vector< double > log_Bq(2); // create log_Bq's log_Bq[0] = log_B[0] + log_q[0]; log_Bq[1] = log_B[1] + log_q[1]; double u3 = gsl_ran_flat(r, 0, 1); bool accept_cp = log_Bq[1] > log_Bq[0] + log(u3 / (1 - u3)); if (accept_cp) { m_particle.increase_log_likelihood(log_B[1]); m_particle.increase_log_binary_I_prior(log_q[1]); } else { m_particle.increase_log_likelihood(log_B[0]); m_particle.increase_log_binary_I_prior(log_q[0]); } if (accept_cp) { m_particle.add_new_binary(j, cp_index, right_log_likelihood); } else { m_particle.add_to_binary(j, cp_index, combined_log_likelihood); } } } // add a joke binary at the end of the binaries m_particle.add_end_binaries(m_number_of_processes); } // uses knowledge of the binaries to calculate the log likelihood for the whole particle. double rj::calculate_total_binary_log_likelihood() { double total_log_likelihood = 0; if (m_particle.get_dimension() == 0) { // if there are no change points in the model, the likelihood is given by looking at the whole data total_log_likelihood += m_pm_ptr->calculate_log_likelihood(m_intercept, m_end + 1); } for (unsigned int j = 0; j < m_number_of_processes; j++) { // calculate the sum of the log likelihood for each segment vector< unsigned long int > binary_left_positions = m_particle.get_vector_of_binary_left_positions(j); size_t number_of_left_positions = binary_left_positions.size(); for (unsigned int i = 0; i < number_of_left_positions - 1; i++) { double binary_log_likelihood = m_pm_ptr->calculate_log_likelihood(j, binary_left_positions[i], binary_left_positions[i + 1]); total_log_likelihood += binary_log_likelihood; } } return total_log_likelihood; } void rj::adding_binary_changepoint_setup(const unsigned int & trace_index) { unsigned long int lower_position_bound = ((trace_index == 0) ? 0 : m_separators[trace_index - 1]); unsigned long int upper_position_bound = ((trace_index == m_number_of_traces - 1) ? m_end + 1 : m_separators[trace_index]); m_new_changepoint_position = gsl_rng_uniform_int(r, upper_position_bound - lower_position_bound - 1) + lower_position_bound + 1; while (m_particle.does_changepoint_exist_in_particle(m_new_changepoint_position)) { m_new_changepoint_position = gsl_rng_uniform_int(r, upper_position_bound - lower_position_bound - 1) + lower_position_bound + 1; } m_log_proposal_ratio = m_particle.calculate_and_get_add_cp_proposal_ratio(upper_position_bound - lower_position_bound - 1, trace_index, false); m_adding_changepoint = changepoint(m_new_changepoint_position); unsigned int add_cp_index = m_particle.get_add_changepoint_index(); // m_log_B contains the log of the Bayes factor L(D1)L(D2) / L(D) for adding the new changepoint to process j m_log_B = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); m_binary_left_log_likelihood = vector< double >(m_number_of_processes, 0); m_binary_right_log_likelihood = vector< double >(m_number_of_processes, 0); for (unsigned int j = 0; j < m_number_of_processes; j++) { // find the index of the cp which begins the binary containing m_tau[cp_index - 1] m_binary_left_index = m_particle.get_binary_left_index(j, add_cp_index); // calculate the likelihood in the interval [m_tau[m_binary_left_index], new_cp) m_binary_left_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_binary_left_index).get_position(), m_new_changepoint_position); // find the index of the cp which begins the binary after the one containing m_tau[cp_index - 1] m_binary_right_index = m_particle.get_binary_right_index(j, add_cp_index); // calculate the likelihood in the ineterval [new_cp, m_right_index) if (m_binary_right_index == m_dimension) { m_binary_right_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_new_changepoint_position, m_end + 1); } else { m_binary_right_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_new_changepoint_position, m_particle.get_changepoint(m_binary_right_index).get_position()); } // calculate the log Bayes factor for adding an effective cp to this process m_log_B[j][1] = m_binary_left_log_likelihood[j] + m_binary_right_log_likelihood[j] - m_particle.get_binary_log_likelihood(j, add_cp_index); } m_log_q = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); for (unsigned int j = 0; j < m_number_of_processes; j++){ // calculate the log I prior ratio for adding the changepoint and it being ineffective or effective m_log_q[j] = m_particle.calculate_and_get_binary_log_I_prior_add_ratio(j); } // set m_log_Bq = m_log_B + m_log_q m_log_Bq = m_log_B; for (unsigned int j = 0; j < m_number_of_processes; j++){ m_log_Bq[j][0] += m_log_q[j][0]; m_log_Bq[j][1] += m_log_q[j][1]; } // calculate how this move will affect the k prior m_log_k_prior_ratio = m_particle.calculate_and_get_add_cp_k_prior_ratio(); // calculate the proposal ratio for this move (i.e. choosing a birth move and new_position vs choosing a death move and which to kill) m_log_acceptance_prob = m_log_k_prior_ratio + m_log_proposal_ratio; // calculate log of sum Bq for (unsigned int j = 0; j < m_number_of_processes; j++){ if (m_log_Bq[j][0] > m_log_Bq[j][1]){ m_log_acceptance_prob += m_log_Bq[j][0] + log(1 + exp(m_log_Bq[j][1] - m_log_Bq[j][0])); } else { m_log_acceptance_prob += m_log_Bq[j][1] + log(1 + exp(m_log_Bq[j][0] - m_log_Bq[j][1])); } } } void rj::removing_binary_changepoint_setup(const unsigned int & trace_index) { int lower_index_bound = ((trace_index == 0) ? -1 : static_cast< int >(m_particle.get_separator_index(trace_index - 1))); unsigned int trace_dimension = m_particle.calculate_trace_dimension(trace_index); m_h = static_cast< unsigned int >(gsl_rng_uniform_int(r, trace_dimension) + lower_index_bound + 1); m_log_proposal_ratio = m_particle.calculate_and_get_remove_cp_proposal_ratio(trace_dimension, trace_index, false); m_log_B = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); m_binary_left_log_likelihood = vector< double >(m_number_of_processes, 0); m_binary_right_log_likelihood = vector< double >(m_number_of_processes, 0); m_binary_merged_log_likelihood = vector< double >(m_number_of_processes, 0); for (unsigned int j = 0; j < m_number_of_processes; j++) { // find the left index of the binary that contains m_tau[m_h - 1] m_binary_left_index = m_particle.get_binary_left_index(j, m_h); // find the left index of the binary after the one that contains m_tau[m_h] m_binary_right_index = m_particle.get_binary_right_index(j, m_h + 1); if (m_particle.get_binary_left_index(j, m_h + 1) == m_h) { // if we are removing an effective changepoint for process j // find the log likelihood for the binary containing cp m_(h-1) m_binary_left_log_likelihood[j] = m_particle.get_binary_log_likelihood(j, m_h); // find the log likelihood for the binary starting at cp m_h m_binary_right_log_likelihood[j] = m_particle.get_binary_log_likelihood(j, m_h + 1); // calculate the log likelihood of the interval [m_binary_left_index, m_binary_right_index) if (m_binary_right_index == m_dimension) { m_binary_merged_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_binary_left_index).get_position(), m_end + 1); } else { m_binary_merged_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_binary_left_index).get_position(), m_particle.get_changepoint(m_binary_right_index).get_position()); } } else { // we are not removing an effective changepoint for process j // calculate the log likelihood from this cp to the left of the changepoint m_h to cp m_h m_binary_left_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_binary_left_index).get_position(), m_particle.get_changepoint(m_h).get_position()); // calculate the log likelihood from m_h to the first effective cp after m_h for process j if (m_h == m_dimension - 1){ m_binary_right_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_h).get_position(), m_end + 1); } else { if (m_binary_right_index == m_dimension) { m_binary_right_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_h).get_position(), m_end + 1); } else { m_binary_right_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_h).get_position(), m_particle.get_changepoint(m_binary_right_index).get_position()); } } // find the log likelihood of the binary that contains cp m_h - 1 m_binary_merged_log_likelihood[j] = m_particle.get_binary_log_likelihood(j, m_h); } m_log_B[j][1] = m_binary_left_log_likelihood[j] + m_binary_right_log_likelihood[j] - m_binary_merged_log_likelihood[j]; } m_log_q = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); for (unsigned int j = 0; j < m_number_of_processes; j++){ if (m_particle.get_binary_left_index(j, m_h + 1) == m_h) { //if I_{h,j} == 1 m_log_q[j] = m_particle.calculate_and_get_binary_log_I_prior_remove_ratio(j, true); } else { m_log_q[j] = m_particle.calculate_and_get_binary_log_I_prior_remove_ratio(j, false); } } // set m_log_Bq = m_log_B + m_log_q m_log_Bq = m_log_B; for (unsigned int j = 0; j < m_number_of_processes; j++){ m_log_Bq[j][0] += m_log_q[j][0]; m_log_Bq[j][1] += m_log_q[j][1]; } m_log_k_prior_ratio = m_particle.calculate_and_get_remove_cp_k_prior_ratio(); m_log_acceptance_prob = m_log_proposal_ratio + m_log_k_prior_ratio; for (unsigned int j = 0; j < m_number_of_processes; j++){ if (m_log_Bq[j][0] > m_log_Bq[j][1]){ m_log_acceptance_prob -= m_log_Bq[j][0] + log(1 + exp(m_log_Bq[j][1] - m_log_Bq[j][0])); } else { m_log_acceptance_prob -= m_log_Bq[j][1] + log(1 + exp(m_log_Bq[j][0] - m_log_Bq[j][1])); } } } void rj::moving_binary_changepoint_setup(const unsigned int & trace_index) { // choose which changepoint to move (we know that there is at least one that can be moved from our definitions of b_k, d_k, etc.) int lower_index_bound = ((trace_index == 0) ? -1 : static_cast< int >(m_particle.get_separator_index(trace_index - 1))); unsigned int trace_dimension = m_particle.calculate_trace_dimension(trace_index); m_h = static_cast< unsigned int >(gsl_rng_uniform_int(r, trace_dimension) + lower_index_bound + 1); // find the indices of the changepoints before and after m_tau[h] (so that we can sample the position of the new changepoint between them) unsigned long int tau_h_minus_1 = m_particle.get_changepoint(m_h - 1).get_position(); unsigned long int tau_h_plus_1; if (m_h == m_dimension - 1){ tau_h_plus_1 = m_end + 1; } else { tau_h_plus_1 = m_particle.get_changepoint(m_h + 1).get_position(); } // sample the new changepoint position (and don't let m_tau[h] stay in the same position) m_new_changepoint_position = gsl_rng_uniform_int(r, tau_h_plus_1 - tau_h_minus_1 - 1) + tau_h_minus_1 + 1; if (tau_h_plus_1 - tau_h_minus_1 > 2) { while (m_particle.does_changepoint_exist_in_particle(m_new_changepoint_position, m_h, m_h)){ m_new_changepoint_position = gsl_rng_uniform_int(r, tau_h_plus_1 - tau_h_minus_1 - 1) + tau_h_minus_1 + 1; } } m_log_B = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); m_log_B_reverse = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); m_log_q = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); m_log_q_reverse = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); m_log_Bq = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); m_log_Bq_reverse = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); m_binary_left_log_likelihood = vector< double >(m_number_of_processes, 0); m_binary_right_log_likelihood = vector< double >(m_number_of_processes, 0); m_binary_merged_log_likelihood = vector< double >(m_number_of_processes, 0); m_binary_left_log_likelihood_reverse = vector< double >(m_number_of_processes, 0); m_binary_right_log_likelihood_reverse = vector< double >(m_number_of_processes, 0); for (unsigned int j = 0; j < m_number_of_processes; j++){ // find the first effective changepoint to the left of m_tau[m_h] m_binary_left_index = m_particle.get_binary_left_index(j, m_h); // find the first effective changepoint to the right of m_tau[m_h] m_binary_right_index = m_particle.get_binary_right_index(j, m_h + 1); // calculate the log likelihood from m_binary_left_index to the new cp position m_binary_left_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_binary_left_index).get_position(), m_new_changepoint_position); // calculate the log likelihood from the new cp position to m_binary_right_index if (m_binary_right_index == m_dimension){ m_binary_right_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_new_changepoint_position, m_end + 1); } else { m_binary_right_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_new_changepoint_position, m_particle.get_changepoint(m_binary_right_index).get_position()); } if (m_particle.get_binary_left_index(j, m_h + 1) == m_h) { // if we are moving an effective changepoint // need to calculate the merged log likelihood manually as it is not stored in any binary if (m_binary_right_index == m_dimension) { m_binary_merged_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_binary_left_index).get_position(), m_end + 1); } else { m_binary_merged_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_binary_left_index).get_position(), m_particle.get_changepoint(m_binary_right_index).get_position()); } } else { // if we are not moving an effeective changepoint // merged log likelihood is stored in the binary that containes m_tau[m_h - 1] m_binary_merged_log_likelihood[j] = m_particle.get_binary_log_likelihood(j, m_h); } m_log_B[j][1] = m_binary_left_log_likelihood[j] + m_binary_right_log_likelihood[j] - m_binary_merged_log_likelihood[j]; if (m_particle.get_binary_left_index(j, m_h + 1) == m_h) { // we are moving an effective changepoint for process j // find the log likelihood from m_binary_left_index to the changepoint we are moving m_binary_left_log_likelihood_reverse[j] = m_particle.get_binary_log_likelihood(j, m_h); // find the log likelihood from m_h to m_binary_right_index m_binary_right_log_likelihood_reverse[j] = m_particle.get_binary_log_likelihood(j, m_h + 1); } else { // we are not removing an effective changepoint for process j // calculate the log likelihood from m_binary_left_index to the changepoint we are moving m_binary_left_log_likelihood_reverse[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_binary_left_index).get_position(), m_particle.get_changepoint(m_h).get_position()); // calculate the log likelihood from the changepoint we are moving to m_binary_right_index if (m_binary_right_index == m_dimension) { m_binary_right_log_likelihood_reverse[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_h).get_position(), m_end + 1); } else { m_binary_right_log_likelihood_reverse[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_h).get_position(), m_particle.get_changepoint(m_binary_right_index).get_position()); } } m_log_B_reverse[j][1] = m_binary_left_log_likelihood_reverse[j] + m_binary_right_log_likelihood_reverse[j] - m_binary_merged_log_likelihood[j]; // calculate m_log_q if (m_particle.get_binary_left_index(j, m_h + 1) == m_h) {//if I_{h,j} == 1 m_log_q[j] = m_log_q_reverse[j] = m_particle.calculate_and_get_binary_log_I_prior_move_ratio(j, true); } else { m_log_q[j] = m_log_q_reverse[j] = m_particle.calculate_and_get_binary_log_I_prior_move_ratio(j, false); } // m_log_Bq = m_log_B + m_log_q m_log_Bq[j][0] = m_log_B[j][0] + m_log_q[j][0]; m_log_Bq[j][1] = m_log_B[j][1] + m_log_q[j][1]; // m_log_Bq_reverse = m_log_B_reverse + m_log_q_reverse m_log_Bq_reverse[j][0] = m_log_B_reverse[j][0] + m_log_q_reverse[j][0]; m_log_Bq_reverse[j][1] = m_log_B_reverse[j][1] + m_log_q_reverse[j][1]; } // these are zero because move proposals are symmetric and we are not proposing an increase to the number of changepoints m_log_proposal_ratio = 0; m_log_k_prior_ratio = 0; m_log_acceptance_prob = 0; // calculate log of sum of Bq and Bq_reverse for (unsigned int j = 0; j < m_number_of_processes; j++){ if (m_log_Bq[j][0] > m_log_Bq[j][1]){ m_log_acceptance_prob += m_log_Bq[j][0] + log(1 + exp(m_log_Bq[j][1] - m_log_Bq[j][0])); } else { m_log_acceptance_prob += m_log_Bq[j][1] + log(1 + exp(m_log_Bq[j][0] - m_log_Bq[j][1])); } if (m_log_Bq_reverse[j][0] > m_log_Bq_reverse[j][1]) { m_log_acceptance_prob -= m_log_Bq_reverse[j][0] + log(1 + exp(m_log_Bq_reverse[j][1] - m_log_Bq_reverse[j][0])); } else { m_log_acceptance_prob -= m_log_Bq_reverse[j][1] + log(1 + exp(m_log_Bq_reverse[j][0] - m_log_Bq_reverse[j][1])); } } } void rj::resampling_binary_changepoint_setup(const unsigned int & trace_index) { // choose the changepoint index to resample int lower_index_bound = ((trace_index == 0) ? -1 : static_cast< int >(m_particle.get_separator_index(trace_index - 1))); unsigned int trace_dimension = m_particle.calculate_trace_dimension(trace_index); m_h = static_cast< unsigned int >(gsl_rng_uniform_int(r, trace_dimension) + lower_index_bound + 1); m_log_B = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); m_log_B_reverse = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); m_log_q = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); m_log_q_reverse = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); m_log_Bq = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); m_log_Bq_reverse = vector< vector< double > >(m_number_of_processes, vector< double >(2, 0)); m_binary_left_log_likelihood = vector< double >(m_number_of_processes, 0); m_binary_right_log_likelihood = vector< double >(m_number_of_processes, 0); m_binary_merged_log_likelihood = vector< double >(m_number_of_processes, 0); for (unsigned int j = 0; j < m_number_of_processes; j++){ // find the first index i to the left of m_h s.t. I_{i,j} = 1 m_binary_left_index = m_particle.get_binary_left_index(j, m_h); //find the first index i to the right of m_h s.t. I_{i,j} = 1 m_binary_right_index = m_particle.get_binary_right_index(j, m_h + 1); if (m_particle.get_binary_left_index(j, m_h + 1) == m_h) { // if we are resampling an effective changepoint for process j // find the log likelihood from m_binary_left_index to the changepoint we are moving m_binary_left_log_likelihood[j] = m_particle.get_binary_log_likelihood(j, m_h); // find the log likelihood from m_h to m_binary_right_index m_binary_right_log_likelihood[j] = m_particle.get_binary_log_likelihood(j, m_h + 1); // calculate the merged log likelihood if (m_binary_right_index == m_dimension) { m_binary_merged_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_binary_left_index).get_position(), m_end + 1); } else { m_binary_merged_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_binary_left_index).get_position(), m_particle.get_changepoint(m_binary_right_index).get_position()); } } else { // if we are resampling an ineffective changepoint for process j // calculate the likelihood from the left index to changepint m_h m_binary_left_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_binary_left_index).get_position(), m_particle.get_changepoint(m_h).get_position()); // calculate the log likelihood in the interval to the right of changepoint m_h if (m_binary_right_index == m_dimension){ m_binary_right_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_h).get_position(), m_end + 1); } else { m_binary_right_log_likelihood[j] = m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_h).get_position(), m_particle.get_changepoint(m_binary_right_index).get_position()); } // calculate the likelihood from m_h to the right_index m_binary_merged_log_likelihood[j] = m_particle.get_binary_log_likelihood(j, m_h); } //recover the likelihood from the left index to the right index (it is given in a binary object) m_log_B[j][1] = m_log_B_reverse[j][1] = m_binary_left_log_likelihood[j] + m_binary_right_log_likelihood[j] - m_binary_merged_log_likelihood[j]; //cout << m_pm_ptr->calculate_log_likelihood(j, m_particle.get_changepoint(m_binary_left_index).get_position(), m_particle.get_changepoint(m_binary_right_index).get_position()) << endl; //cout << m_particle.get_binary_left_index(j, m_h + 1) << "\t" << m_h << endl; if (m_particle.get_binary_left_index(j, m_h + 1) == m_h){ // if I_{h,j} == 1 m_log_q[j] = m_log_q_reverse[j] = m_particle.calculate_and_get_binary_log_I_prior_move_ratio(j, true); } else { m_log_q[j] = m_log_q_reverse[j] = m_particle.calculate_and_get_binary_log_I_prior_move_ratio(j, false); } m_log_Bq[j][0] = m_log_Bq_reverse[j][0] = m_log_B[j][0] + m_log_q[j][0]; m_log_Bq[j][1] = m_log_Bq_reverse[j][1] = m_log_B[j][1] + m_log_q[j][1]; } // proposal ratio is symmetric m_log_proposal_ratio = 0; // not adding or removing changepoints m_log_k_prior_ratio = 0; // move is guaranteed to be accepted m_log_acceptance_prob = 0; } void rj::binary_acceptance_procedure(const double & u1){ m_particle.increase_log_k_prior(m_log_k_prior_ratio); if (u1 < m_b_k){ vector< bool > accept_cp; for (unsigned int j = 0; j < m_number_of_processes; j++){ double u3 = gsl_ran_flat(r, 0, 1); accept_cp.push_back(m_log_Bq[j][1] > m_log_Bq[j][0] + log(u3 / (1 - u3))); if (accept_cp[j]) { m_particle.increase_log_likelihood(m_log_B[j][1]); m_particle.increase_log_binary_I_prior(m_log_q[j][1]); } else { m_particle.increase_log_binary_I_prior(m_log_q[j][0]); } } m_particle.add_binary_changepoint(m_particle.get_add_changepoint_index(), m_adding_changepoint, accept_cp, m_binary_left_log_likelihood, m_binary_right_log_likelihood); } else if (u1 < m_d_k){ vector< bool > remove_effective_cp; for (unsigned int j = 0; j < m_number_of_processes; j++){ if (m_particle.get_binary_left_index(j, m_h + 1) == m_h){//if I_{h,j} == 1 remove_effective_cp.push_back(true); m_particle.increase_log_likelihood(-m_log_B[j][1]); m_particle.increase_log_binary_I_prior(-m_log_q[j][1]); } else { remove_effective_cp.push_back(false); m_particle.increase_log_binary_I_prior(-m_log_q[j][0]); } } m_particle.remove_binary_changepoint(m_h, remove_effective_cp, m_binary_merged_log_likelihood); } else if (u1 < m_m_k){ vector< bool > accept_cp; vector< bool > remove_effective_cp; for (unsigned int j = 0; j < m_number_of_processes; j++){ double u3 = gsl_ran_flat(r, 0, 1); accept_cp.push_back(m_log_Bq[j][1] > m_log_Bq[j][0] + log(u3 / (1 - u3))); remove_effective_cp.push_back(m_particle.get_binary_left_index(j, m_h + 1) == m_h); if (accept_cp[j]) { if (remove_effective_cp[j]) { m_particle.increase_log_likelihood(m_log_B[j][1] - m_log_B_reverse[j][1]); } else { m_particle.increase_log_likelihood(m_log_B[j][1]); m_particle.increase_log_binary_I_prior(m_log_q[j][1] - m_log_q_reverse[j][0]); } } else { if (remove_effective_cp[j]) { m_particle.increase_log_likelihood(- m_log_B_reverse[j][1]); m_particle.increase_log_binary_I_prior(m_log_q[j][0] - m_log_q_reverse[j][1]); } } } m_particle.move_binary_changepoint(m_h, m_new_changepoint_position, remove_effective_cp, accept_cp, m_binary_left_log_likelihood, m_binary_right_log_likelihood, m_binary_merged_log_likelihood); } else if (u1 < m_r_k){ vector< bool > accept_cp; vector< bool > remove_effective_cp; for (unsigned int j = 0; j < m_number_of_processes; j++){ double u3 = gsl_ran_flat(r, 0, 1); accept_cp.push_back(m_log_Bq[j][1] > m_log_Bq[j][0] + log(u3 / (1 - u3))); remove_effective_cp.push_back(m_particle.get_binary_left_index(j, m_h + 1) == m_h); if (accept_cp[j] && !remove_effective_cp[j]) { m_particle.increase_log_likelihood(m_log_B[j][1] - m_log_B_reverse[j][0]); m_particle.increase_log_binary_I_prior(m_log_q[j][1] - m_log_q_reverse[j][0]); } else if (!accept_cp[j] && remove_effective_cp[j]) { m_particle.increase_log_likelihood(m_log_B[j][0] - m_log_B_reverse[j][1]); m_particle.increase_log_binary_I_prior(m_log_q[j][0] - m_log_q_reverse[j][1]); } } m_particle.resample_binary_changepoint(m_h, remove_effective_cp, accept_cp, m_binary_left_log_likelihood, m_binary_right_log_likelihood, m_binary_merged_log_likelihood); } } void rj::binary_recording_procedure(){ m_recorded_binary_dimensions.push_back(m_dimension); vector< unsigned long int > changepoint_hist = m_particle.calculate_and_get_binary_changepoint_histogram(m_number_of_changepoint_bins, m_number_of_processes); size_t size_of_recorded_changepoints = m_recorded_binary_changepoints.size(); if (size_of_recorded_changepoints > 0){ //have we started recording changepoint histograms, or is this the first time? for (unsigned long int i = 0; i < size_of_recorded_changepoints; i++){ m_recorded_binary_changepoints[i] += changepoint_hist[i]; } } else { m_recorded_binary_changepoints = changepoint_hist; } double log_posterior = m_particle.get_binary_log_posterior(); m_recorded_binary_log_posteriors.push_back(log_posterior); if (m_binary_MAP_log_posterior < log_posterior){ m_binary_MAP_particle = m_particle; m_binary_MAP_dimension = m_binary_MAP_particle.get_dimension(); m_binary_MAP_log_posterior = log_posterior; } } void rj::run_binary_simulation(){ cout << "starting binary changepoint stage" << endl; m_particle.print_likelihood(); cout << "the posterior is " << m_particle.get_binary_log_posterior() << endl << endl; for (unsigned long int iteration = 0; iteration < m_binary_burnin; iteration++) { m_dimension = m_particle.get_dimension(); unsigned int trace_index = static_cast< unsigned int >(gsl_rng_uniform_int(r, m_number_of_traces)); unsigned int trace_dimension = m_particle.calculate_trace_dimension(trace_index); if (trace_dimension > 0) { m_b_k = 1.0 / 4.0, m_d_k = 2.0 / 4.0, m_m_k = 3.0 / 4.0, m_r_k = 1.0; } else { m_b_k = 1.0; } //m_particle.check_separator_changepoints(); //m_particle.print_likelihood(); //cout << iteration << endl; double u1 = gsl_ran_flat( r, 0, 1 ); if (u1 < m_b_k) { //birth adding_binary_changepoint_setup(trace_index); } else if (u1 < m_d_k) { //death removing_binary_changepoint_setup(trace_index); } else if (u1 < m_m_k) { //move moving_binary_changepoint_setup(trace_index); } else if (u1 < m_r_k) { //resample marked vector resampling_binary_changepoint_setup(trace_index); } if (m_log_acceptance_prob >= 0 || (log(gsl_ran_flat(r, 0, 1)) < m_log_acceptance_prob)) { binary_acceptance_procedure(u1); } //if (m_particle.calculate_and_get_binary_log_posterior(m_number_of_processes) - m_particle.get_binary_log_posterior() > 0.000001 || m_particle.calculate_and_get_binary_log_posterior(m_number_of_processes) - m_particle.get_binary_log_posterior() < -0.000001) { // cout << iteration << '\t' << m_particle.calculate_and_get_binary_log_posterior(m_number_of_processes) - m_particle.get_binary_log_posterior() << endl; //} } for (unsigned long int iteration = 0; iteration < m_binary_iterations; iteration++) { m_dimension = m_particle.get_dimension(); unsigned int trace_index = static_cast< unsigned int >(gsl_rng_uniform_int(r, m_number_of_traces)); unsigned int trace_dimension = m_particle.calculate_trace_dimension(trace_index); if (trace_dimension > 0) { m_b_k = 1.0 / 4.0, m_d_k = 2.0 / 4.0, m_m_k = 3.0 / 4.0, m_r_k = 1.0; } else { m_b_k = 1.0; } double u1 = gsl_ran_flat(r, 0, 1); if (u1 < m_b_k) { //birth adding_binary_changepoint_setup(trace_index); } else if (u1 < m_d_k) { //death removing_binary_changepoint_setup(trace_index); } else if (u1 < m_m_k) { //move moving_binary_changepoint_setup(trace_index); } else if (u1 < m_r_k) { //resample resampling_binary_changepoint_setup(trace_index); } if (m_log_acceptance_prob > 0 || (log(gsl_ran_flat(r, 0, 1)) < m_log_acceptance_prob)) { binary_acceptance_procedure(u1); } if (m_recording_binary_samples && (iteration % m_binary_thinning == 0)) { //store sample binary_recording_procedure(); } } cout << "ending binary changepoint stage" << endl; m_particle.print_likelihood(); cout << "the posterior is " << m_particle.get_binary_log_posterior() << endl << endl; } void rj::convert_binary_particle_to_full_particle(const double & dirichlet_alpha, const double & rho) { m_particle.initiate_regime_vectors(m_number_of_processes); /*vector< vector< vector< double > > > sufficient_stats(m_dimension + 2, vector< vector< double > >(m_number_of_processes)); vector< vector< double > > number_of_observations(m_dimension + 2, vector< double >(m_number_of_processes)); for (int change = 0; change < m_dimension + 1; change++) { m_pm_ptr->get_cumulative_sufficient_data(m_particle.get_changepoint(change - 1).get_position(), sufficient_stats[change]); m_pm_ptr->get_number_of_observations(sufficient_stats[change], number_of_observations[change]); } m_pm_ptr->get_cumulative_sufficient_data(m_end + 1, sufficient_stats[m_dimension + 1]); m_pm_ptr->get_number_of_observations(sufficient_stats[m_dimension + 1], number_of_observations[m_dimension + 1]);*/ m_particle.set_log_likelihood(0); m_particle.set_log_full_I_prior(0); m_particle.set_log_regimes_prior(static_cast< double >(m_number_of_processes) * (log(rho) - log(1 - rho))); m_particle.set_log_full_separators_prior(0); m_particle.set_dirichlet_alpha(dirichlet_alpha); m_particle.set_rho(rho); set_full_marked_vectors();// m_number_of_processes, sufficient_stats, number_of_observations); //check_total_full_log_likelihood(m_particle); m_particle.set_log_full_I_prior(m_particle.calculate_and_get_log_full_I_prior(m_number_of_processes)); m_particle.set_log_regimes_prior(m_particle.calculate_and_get_log_regimes_prior(m_number_of_processes)); m_particle.set_log_full_separators_prior(m_particle.calculate_and_get_log_full_separators_prior(m_number_of_processes)); //m_particle.check_full_log_posterior(); } void rj::set_full_marked_vectors() {//const size_t & number_of_processes), const vector< vector< vector< double > > > & sufficient_statistics, const vector< vector< double > > & number_of_observations) { // make sure that the full marked vectors exist for each changepoint so that they can be changed as we go along m_particle.set_all_full_marked_vectors_equal_to_binary_marked_vectors(m_number_of_processes); for (unsigned int j = 0; j < m_number_of_processes; j++) { vector< unsigned long int > binary_left_changepoint_positions = m_particle.get_vector_of_binary_left_positions(j); vector< int > binary_left_changepoint_indices = m_particle.get_vector_of_binary_left_indices(j); size_t number_of_binaries = m_particle.get_number_of_binaries(j); unsigned int trace_index = 0; // start with binary_index = 0 unsigned int binary_index = 0; unsigned long int left_cp_position = binary_left_changepoint_positions[binary_index]; //cout << "left cp " << left_cp_position << endl; unsigned long int right_cp_position = binary_left_changepoint_positions[binary_index + 1]; //cout << "right cp " << right_cp_position << endl; // we get the sufficient statistics and number of observations for all the processes here when we are only interested in the statistics for process j, but don't want to prematurely optimise here vector< vector< double > > stats_1(m_number_of_processes); vector< vector< double > > stats_2(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(left_cp_position, stats_1); m_pm_ptr->get_cumulative_sufficient_data(right_cp_position, stats_2); for (size_t i = 0; i < stats_1[j].size(); i++) { stats_2[j][i] -= stats_1[j][i]; } double number_of_observations; m_pm_ptr->get_number_of_observations(j, stats_2[j], number_of_observations); //cout << "number of observations: " << number_of_observations << endl; double log_likelihood = m_pm_ptr->calculate_log_likelihood(j, stats_2[j]); double log_prior = m_particle.calculate_and_get_full_adding_binary_log_I_prior_ratio(j, 0, binary_left_changepoint_indices[binary_index + 1] - binary_left_changepoint_indices[binary_index], true, trace_index, -1); vector< unsigned int > transitions = vector< unsigned int >(0); // set the regime index to be 0 unsigned int regime_index = 0; vector< unsigned int > transitions_histogram = vector< unsigned int >(1); vector< int > right_changepoint_indices; for (unsigned int i = binary_left_changepoint_indices[binary_index] + 1; i < binary_left_changepoint_indices[binary_index + 1]; i++) { transitions.push_back(regime_index); transitions_histogram[regime_index]++; right_changepoint_indices.push_back(i); } transitions.push_back(-1); right_changepoint_indices.push_back(binary_left_changepoint_indices[binary_index + 1]); // add this regime to the particle m_particle.add_new_regime(j, right_changepoint_indices, transitions, transitions_histogram, stats_2[j], number_of_observations, log_likelihood, m_number_of_traces, true, trace_index, -1); m_particle.increase_log_likelihood(log_likelihood); m_particle.increase_log_full_I_prior(log_prior, true, j, trace_index, true); //m_particle.check_transitions_out(); //cout << m_particle.calculate_and_get_log_full_I_prior(j+1) - m_particle.get_log_full_I_prior() << endl; for (unsigned int binary_index = 1; binary_index < number_of_binaries - 1; binary_index++) { unsigned long int left_cp_position = binary_left_changepoint_positions[binary_index]; unsigned long int right_cp_position = binary_left_changepoint_positions[binary_index + 1]; int left_cp_index = binary_left_changepoint_indices[binary_index]; int right_cp_index = binary_left_changepoint_indices[binary_index + 1]; bool new_trace = false; if (m_particle.is_changepoint_index_separator_index(left_cp_index)) { trace_index++; new_trace = true; } unsigned int previous_regime = m_particle.get_previous_regime(left_cp_index, j); // we get the sufficient statistics and number of observations for all the processes here when we are only interested in the statistics for process j, but don't want to prematurely optimise here vector< vector< double > > stats_1(m_number_of_processes); vector< vector< double > > stats_2(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(left_cp_position, stats_1); m_pm_ptr->get_cumulative_sufficient_data(right_cp_position, stats_2); for (size_t i = 0; i < stats_1[j].size(); i++) { stats_2[j][i] -= stats_1[j][i]; } double number_of_observations; m_pm_ptr->get_number_of_observations(j, stats_2[j], number_of_observations); double log_likelihood = m_pm_ptr->calculate_log_likelihood(j, stats_2[j]); size_t number_of_regimes = m_particle.get_number_of_regimes(j); vector< double > log_B(number_of_regimes + 1, 0); vector< double > log_likelihoods_with_right_sufficient_statistics(number_of_regimes + 1, 0); vector< double > log_q(number_of_regimes + 1, 0); // now choose the regime to which we will add this binary for (unsigned int regime = 0; regime < number_of_regimes; regime++) { vector< double > regime_sufficient_stats = m_particle.get_sufficient_statistics(j, regime); //cout << "reg suff stat: " << regime_sufficient_stats[1] << endl; // calculate the sufficient statistics for the regime if the right sufficient statistics are added to it vector< double > regime_sufficient_stats_with_right_sufficient_statistics = regime_sufficient_stats; for (unsigned int index = 0; index < regime_sufficient_stats.size(); index++) { regime_sufficient_stats_with_right_sufficient_statistics[index] += stats_2[j][index]; } double regime_log_likelihood = m_particle.get_regime_log_likelihood(j, regime); //cout << "regime log likelihood: " << regime_log_likelihood << endl; double regime_log_likelihood_with_sufficient_stats = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics); //cout << "regime log likelihood with sufficient stats: " << regime_log_likelihood_with_sufficient_stats << endl; log_B[regime] = regime_log_likelihood_with_sufficient_stats - regime_log_likelihood; log_likelihoods_with_right_sufficient_statistics[regime] = regime_log_likelihood_with_sufficient_stats; log_q[regime] = m_particle.calculate_and_get_full_adding_binary_log_I_prior_ratio(j, regime, right_cp_index - left_cp_index, new_trace, trace_index, previous_regime); } // also propose adding a new regime double regime_log_likelihood_with_sufficient_stats = log_likelihood; log_B[number_of_regimes] = regime_log_likelihood_with_sufficient_stats; log_likelihoods_with_right_sufficient_statistics[number_of_regimes] = regime_log_likelihood_with_sufficient_stats; log_q[number_of_regimes] = m_particle.calculate_and_get_full_adding_binary_log_I_prior_ratio(j, static_cast< unsigned int >(number_of_regimes), right_cp_index - left_cp_index, new_trace, trace_index, previous_regime); vector< double > log_Bq(number_of_regimes + 1); // create log_Bq's for (unsigned int regime = 0; regime < number_of_regimes + 1; regime++) { log_Bq[regime] = log_B[regime] + log_q[regime]; } // calculate the ordering of the elements of m_log_Bq so that we can use fancy log and exp tricks to calculate the sum of the log Bq values. vector< unsigned int > log_Bq_descending_order(number_of_regimes + 1, 0); for (unsigned int i = 1; i < number_of_regimes + 1; i++) { log_Bq_descending_order[i] = i; } calculate_vector_descending_order(number_of_regimes + 1, log_Bq, log_Bq_descending_order); double log_of_sum_Bq = log_Bq[log_Bq_descending_order[0]]; double temp_sum = 1; double largest_Bq = log_Bq[log_Bq_descending_order[0]]; for (unsigned int index = 1; index < number_of_regimes + 1; index++){ temp_sum += exp(log_Bq[log_Bq_descending_order[index]] - largest_Bq); } log_of_sum_Bq += log(temp_sum); // choose to which regime we will add this binary temp_sum = 0; double regime_chooser = log(gsl_ran_flat(r, 0, 1)) + log_of_sum_Bq; unsigned int index = 0; bool chosen = false; do { temp_sum += exp(log_Bq[log_Bq_descending_order[index]] - log_Bq[log_Bq_descending_order[0]]); chosen = regime_chooser <= log_Bq[log_Bq_descending_order[0]] + log(temp_sum); index++; } while(!chosen); unsigned int new_regime = log_Bq_descending_order[index - 1]; m_particle.increase_log_likelihood(log_B[new_regime]); //cout << "log_B[new_regime]: " << log_B[new_regime] << endl; bool adding_new_regime = new_regime == number_of_regimes; m_particle.increase_log_full_I_prior(log_q[new_regime], adding_new_regime, j, trace_index, new_trace); // if a new regime is added, the (1-rho) in m_log_q[j][new_regimes[j]] will be added to m_log_regime_prior and taken away from m_log_full_I_prior vector< unsigned int > transitions = vector< unsigned int >(0); // set the regime index to be the new regime number unsigned int regime_index = new_regime; vector< unsigned int > transitions_histogram = vector< unsigned int >(number_of_regimes + (adding_new_regime ? 1 : 0)); vector< int > right_changepoint_indices(0); for (int i = left_cp_index + 1; i < right_cp_index; i++) { transitions.push_back(regime_index); transitions_histogram[regime_index]++; right_changepoint_indices.push_back(i); } transitions.push_back(-1); right_changepoint_indices.push_back(right_cp_index); if (adding_new_regime) { m_particle.add_new_regime(j, right_changepoint_indices, transitions, transitions_histogram, stats_2[j], number_of_observations, log_likelihood, m_number_of_traces, new_trace, trace_index, previous_regime); } else { m_particle.add_binary_to_regime(j, regime_index, right_changepoint_indices, transitions, transitions_histogram, stats_2[j], number_of_observations, log_likelihoods_with_right_sufficient_statistics[regime_index], m_number_of_traces, new_trace, trace_index, previous_regime); } //m_particle.check_transitions_out(); //cout << m_particle.calculate_and_get_log_full_I_prior(j+1) - m_particle.get_log_full_I_prior() << endl; } } // set the number of unobserved regimes for each process to be 0 m_particle.set_all_regimes_to_be_observed(m_number_of_processes); } // calculate the full log likelihood from the changepoint positions void rj::check_total_full_log_likelihood(particle & P) { double log_likelihood = 0; for (unsigned int process = 0; process < m_number_of_processes; process++) { size_t number_of_regimes = P.get_number_of_regimes(process); for (unsigned int regime = 0; regime < number_of_regimes; regime++) { double regime_log_likelihood = 0; vector< int > regime_right_changepoint_indices = P.get_right_changepoint_indices(process, regime); vector< vector< double > > regime_sufficient_stats(m_number_of_processes); if (0 < regime_right_changepoint_indices.size()) { unsigned long int left_cp_position = P.get_changepoint(regime_right_changepoint_indices[0] - 1).get_position(); unsigned long int right_cp_position; if (regime_right_changepoint_indices[0] == P.get_dimension()) { right_cp_position = m_end + 1; } else { right_cp_position = P.get_changepoint(regime_right_changepoint_indices[0]).get_position(); } vector< vector< double > > stats_1(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(left_cp_position, stats_1); m_pm_ptr->get_cumulative_sufficient_data(right_cp_position, regime_sufficient_stats); for (size_t i = 0; i < stats_1[process].size(); i++) { regime_sufficient_stats[process][i] -= stats_1[process][i]; } } for (unsigned int index = 1; index < regime_right_changepoint_indices.size(); index++) { // get sufficient stats for the interval from CP regime_right_changepoint_indices[index] - 1 to CP regime_right_changepoint_indices[index] unsigned long int left_cp_position = P.get_changepoint(regime_right_changepoint_indices[index] - 1).get_position(); unsigned long int right_cp_position; if (regime_right_changepoint_indices[index] == P.get_dimension()) { right_cp_position = m_end + 1; } else { right_cp_position = P.get_changepoint(regime_right_changepoint_indices[index]).get_position(); } vector< vector< double > > stats_1(m_number_of_processes); vector< vector< double > > stats_2(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(left_cp_position, stats_1); m_pm_ptr->get_cumulative_sufficient_data(right_cp_position, stats_2); for (size_t i = 0; i < stats_1[process].size(); i++) { stats_2[process][i] -= stats_1[process][i]; regime_sufficient_stats[process][i] += stats_2[process][i]; } } if (0 < regime_right_changepoint_indices.size()) { regime_log_likelihood = m_pm_ptr->calculate_log_likelihood(process, regime_sufficient_stats[process]); } // else leave it equal to 0 because this regime is unobserved // check that the regime_log_likelihood we have calculated equals the log likelihood stored in the regime if (abs(regime_log_likelihood - P.get_regime_log_likelihood(process, regime)) > 0.00001) { cout << "log likelihood doesn't match" << endl; } log_likelihood += regime_log_likelihood; } } if (abs(log_likelihood - P.get_log_likelihood()) > 0.000001) { cout << "total log likelihood doesn't match" << endl; } } /*void rj::check_adding_changepoint(const unsigned int & add_cp_index) { // cycing through different regimes to add to the particle, check if the log likelihood ratio is equal to what m_log_B implies it would be and m_log_q values are right as well // work out which process has the most regimes for (unsigned int process = 0; process < m_number_of_processes; process++) { size_t number_of_regimes = m_particle.get_number_of_regimes(process); for (unsigned int regime = 0; regime < number_of_regimes + 1; regime++) { // create a copy of the particle particle P = m_particle; // for log likelihood need to have the changepoint added and the right_changepoint indices for the regime we are adding to and the previous one need to be amended changepoint Frederick(m_new_changepoint_position); P.add_full_changepoint(add_cp_index, Frederick, <#const vector<unsigned int> &new_regimes#>, <#const vector<vector<double> > &sufficient_statistics#>, <#const vector<vector<double> > &log_likelihoods#>, <#const vector<double> &previous_log_likelihoods#>, <#const vector<double> &number_of_observations#>) if (m_particle.is_changepoint_index_separator_index(add_cp_index - 1)) { // if the previous } // for log_full_I_prior need to alter the right transitions histogram for the regime we are adding to and the previous regime. } } }*/ // A and order both have size length. order contains 0, 1, ..., length - 1, and A is a copy of a (usually) unordered vector. order will be set to be the descending ordering of A, so order[0] will give the index of A that is largest. The ordering will not detect ties and will simply order whichever is first in the vector as the largest if there is a tie. void rj::calculate_vector_descending_order(const size_t & length, vector< double > A, vector< unsigned int > & order) { int i, j; unsigned int b; double a; for (j = 1; j < length; j++) { a = A[j]; b = order[j]; i = j - 1; while (i >= 0 && A[i] < a) { A[i + 1] = A[i]; order[i + 1] = order[i]; i--; } A[i + 1] = a; order[i + 1] = b; } } void rj::adding_full_changepoint_setup(const unsigned int & trace_index) { unsigned long int lower_position_bound = ((trace_index == 0) ? 0 : m_separators[trace_index - 1]); unsigned long int upper_position_bound = ((trace_index == m_number_of_traces - 1) ? m_end + 1 : m_separators[trace_index]); m_new_changepoint_position = gsl_rng_uniform_int(r, upper_position_bound - lower_position_bound - 1) + lower_position_bound + 1; while (m_particle.does_changepoint_exist_in_particle(m_new_changepoint_position)) { m_new_changepoint_position = gsl_rng_uniform_int(r, upper_position_bound - lower_position_bound - 1) + lower_position_bound + 1; } m_log_proposal_ratio = m_particle.calculate_and_get_add_cp_proposal_ratio(upper_position_bound - lower_position_bound - 1, trace_index, false); m_adding_changepoint = changepoint(m_new_changepoint_position); unsigned int add_cp_index = m_particle.get_add_changepoint_index(); // calculate how this move will affect the k prior m_log_k_prior_ratio = m_particle.calculate_and_get_add_cp_k_prior_ratio(); m_log_acceptance_prob = m_log_k_prior_ratio + m_log_proposal_ratio; unsigned long int right_cp_position; if (add_cp_index == m_dimension) { right_cp_position = m_end + 1; } else { right_cp_position = m_particle.get_changepoint(add_cp_index).get_position(); } unsigned long int left_cp_position = m_particle.get_changepoint(add_cp_index - 1).get_position(); // m_log_B contains the log of the Bayes factors for adding a changepoint with each regime, so m_log_B[i] = log_Bayes_factor_if_I_h_j_prime_equals_i m_log_B = vector< vector< double > >(0); // m_log_q contains the log of the I prior ratio for adding a changepoint with each regime, so m_log_B[i] = log_I_prior_ratio_if_I_h_j_prime_equals_i m_log_q = vector< vector< double > >(0); // m_log_Bq contains the log of (the I prior ratio times the likelihood) m_log_Bq = vector< vector< double > >(0); // m_log_Bq_ordering gives the order of the elements of m_log_Bq. So m_log_Bq_ordering[j][0] gives the index of the largest element in m_log_Bq[j], etc m_log_Bq_descending_order = vector< vector< unsigned int > >(0); // m_log_of_sum_Bq contains the log of the sum of the Bq values for each process (useful for calculating which regime to choose and for acceptance probability calculations) m_log_of_sum_Bq = vector< double >(m_number_of_processes, 0); //m_previous_log_likelihoods_without_right_sufficient_statistics gives (for each process) the likelihood for the previous regime without the right sufficient stats m_previous_log_likelihoods_without_right_sufficient_statistics = vector< double >(m_number_of_processes, 0); // m_log_likelihoods_with_right_sufficient_statistics gives (for each process) the likelihood for each regime if the right sufficient stats are added to it. Equals 0 for the previous regime for each process, as the 'adding a changepoint' procedure knows that it shouldn't be setting new log likelihoods if new regime == previous regime m_log_likelihoods_with_right_sufficient_statistics = vector< vector< double > >(0); // m_left_sufficient_statistics holds the sufficient statistics for the interval from (the changepoint prior to m_new_changepoint_position) to m_new_changepoint_position for process j. // the assignment here looks wrong, but it will have the cumulative data up to left_cp_position subtracted later m_left_sufficient_statistics = vector< vector< double > >(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(m_new_changepoint_position, m_left_sufficient_statistics); // m_right_sufficient_statistics holds the sufficient statistics for the interval from m_new_changepoint_position to (the changepoint after m_new_changepoint_position) for process j // the assignment here looks wrong, but it will have the cumulative data up to m_new_cp_position subtracted later m_right_sufficient_statistics = vector< vector< double > >(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(right_cp_position, m_right_sufficient_statistics); vector< vector< double > > sufficient_statistics_up_to_left_cp_position(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(left_cp_position, sufficient_statistics_up_to_left_cp_position); for (unsigned int j = 0; j < m_number_of_processes; j++) { size_t number_of_regimes = m_particle.get_number_of_regimes(j); m_log_B.push_back(vector< double >(number_of_regimes + 1, 0)); m_log_q.push_back(vector< double >(number_of_regimes + 1, 0)); m_log_likelihoods_with_right_sufficient_statistics.push_back(vector< double >(number_of_regimes + 1, 0)); for (unsigned int index = 0; index < m_right_sufficient_statistics[j].size(); index++) { // this is correct, as m_left_sufficient_statistics currently holds the information up to the new changepoint position m_right_sufficient_statistics[j][index] -= m_left_sufficient_statistics[j][index]; // we now make m_left_sufficient_statistics correct m_left_sufficient_statistics[j][index] -= sufficient_statistics_up_to_left_cp_position[j][index]; } // get the sufficient statistics for the regime that affects the changepoint prior to the new changepoint unsigned int previous_regime = m_particle.get_previous_regime(add_cp_index, j); vector< double > previous_regime_sufficient_stats = m_particle.get_sufficient_statistics(j, previous_regime); // calculate the likelihood for the regime that affects the changepoint prior to the new changepoint double previous_regime_log_likelihood = m_particle.get_regime_log_likelihood(j, previous_regime); // calculate the likelihood for the previous regime with the right sufficient statistics removed vector< double > previous_regime_sufficient_statistics_without_right_sufficient_statistics = previous_regime_sufficient_stats; for (unsigned int index = 0; index < previous_regime_sufficient_statistics_without_right_sufficient_statistics.size(); index++) { previous_regime_sufficient_statistics_without_right_sufficient_statistics[index] -= m_right_sufficient_statistics[j][index]; } double previous_regime_log_likelihood_without_right_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, previous_regime_sufficient_statistics_without_right_sufficient_statistics); m_previous_log_likelihoods_without_right_sufficient_statistics[j] = previous_regime_log_likelihood_without_right_sufficient_statistics; // for each regime calculate the Bayes factor for assigning this regime to the new changepoint. for (unsigned int regime = 0; regime < number_of_regimes; regime++) { if (regime != previous_regime) { // check if there is anything to calculate. If the regime we are considering for the new changepoint is the same as the previous one, then the Bayes factor is 1. vector< double > regime_sufficient_stats = m_particle.get_sufficient_statistics(j, regime); // calculate the sufficient statistics for the regime if the right sufficient statistics are added to it vector< double > regime_sufficient_stats_with_right_sufficient_statistics = m_particle.get_sufficient_statistics(j, regime); for (unsigned int index = 0; index < regime_sufficient_stats.size(); index++) { regime_sufficient_stats_with_right_sufficient_statistics[index] += m_right_sufficient_statistics[j][index]; } double regime_log_likelihood_with_sufficient_stats = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics); m_log_B[j][regime] = previous_regime_log_likelihood_without_right_sufficient_statistics + regime_log_likelihood_with_sufficient_stats - previous_regime_log_likelihood - m_particle.get_regime_log_likelihood(j, regime); m_log_likelihoods_with_right_sufficient_statistics[j][regime] = regime_log_likelihood_with_sufficient_stats; } } // also propose adding a new regime double regime_log_likelihood_with_sufficient_stats = m_pm_ptr->calculate_log_likelihood(j, m_right_sufficient_statistics[j]); m_log_B[j][number_of_regimes] = previous_regime_log_likelihood_without_right_sufficient_statistics + regime_log_likelihood_with_sufficient_stats - previous_regime_log_likelihood; m_log_likelihoods_with_right_sufficient_statistics[j][number_of_regimes] = regime_log_likelihood_with_sufficient_stats; // for each regime calculate marked vector prior ratio if (add_cp_index == m_dimension || m_particle.is_changepoint_index_separator_index(add_cp_index)) { for (unsigned int regime = 0; regime < number_of_regimes; regime++) { m_log_q[j][regime] = m_particle.full_log_I_prior_add_ratio(add_cp_index, false, j, previous_regime, regime); } m_log_q[j][number_of_regimes] = m_particle.full_log_I_prior_add_ratio(add_cp_index, true, j, previous_regime, static_cast< unsigned int >(number_of_regimes)); } else { unsigned int subsequent_regime = m_particle.get_previous_regime(add_cp_index + 1, j); for (unsigned int regime = 0; regime < number_of_regimes; regime++) { m_log_q[j][regime] = m_particle.full_log_I_prior_add_ratio(add_cp_index, false, j, previous_regime, regime, subsequent_regime); } m_log_q[j][number_of_regimes] = m_particle.full_log_I_prior_add_ratio(add_cp_index, true, j, previous_regime, static_cast< unsigned int >(number_of_regimes), subsequent_regime); } //check_adding_changepoint(add_cp_index); // set m_log_Bq = m_log_B + m_log_q m_log_Bq.push_back(vector< double >(number_of_regimes + 1, 0)); for (unsigned int regime = 0; regime < number_of_regimes + 1; regime++){ m_log_Bq[j][regime] = m_log_B[j][regime] + m_log_q[j][regime]; } // check if regime r_j is unobserved: if so, the Bq term will equal 0 as there would be no reverse move. if (m_particle.is_regime_unobserved(j, number_of_regimes - 1)) { m_log_Bq[j][number_of_regimes - 1] = -1e300; } // calculate the ordering of the elements of m_log_Bq so that we can use fancy log and exp tricks to calculate the sum of the log Bq values. m_log_Bq_descending_order.push_back(vector< unsigned int >(number_of_regimes + 1, 0)); for (unsigned int i = 1; i < number_of_regimes + 1; i++) { m_log_Bq_descending_order[j][i] = i; } calculate_vector_descending_order(number_of_regimes + 1, m_log_Bq[j], m_log_Bq_descending_order[j]); // calculate log of sum Bq double largest_Bq = m_log_Bq[j][m_log_Bq_descending_order[j][0]]; m_log_of_sum_Bq[j] = m_log_Bq[j][m_log_Bq_descending_order[j][0]]; double temp_sum = 1; for (unsigned int index = 1; index < number_of_regimes + 1; index++){ temp_sum += exp(m_log_Bq[j][m_log_Bq_descending_order[j][index]] - largest_Bq); } m_log_of_sum_Bq[j] += log(temp_sum); m_log_acceptance_prob += m_log_of_sum_Bq[j]; } } void rj::removing_full_changepoint_setup(const unsigned int & trace_index) { int lower_index_bound = ((trace_index == 0) ? -1 : static_cast< int >(m_particle.get_separator_index(trace_index - 1))); unsigned int trace_dimension = m_particle.calculate_trace_dimension(trace_index); m_h = static_cast< unsigned int >(gsl_rng_uniform_int(r, trace_dimension) + lower_index_bound + 1); m_log_proposal_ratio = m_particle.calculate_and_get_remove_cp_proposal_ratio(trace_dimension, trace_index, false); m_log_k_prior_ratio = m_particle.calculate_and_get_remove_cp_k_prior_ratio(); m_log_acceptance_prob = m_log_k_prior_ratio + m_log_proposal_ratio; // m_log_B_reverse[i] contains log Bayes factor if I_h_j_prime = i vs no changepoint here. m_log_q_reverse is the log I ratio for the same setting. m_log_Bq_reverse is their sum m_log_B_reverse = vector< vector< double > >(0); m_log_q_reverse = vector< vector< double > >(0); m_log_Bq_reverse = vector< vector< double > >(0); // m_log_Bq_reverse_descending_order[j][0] gives the index of the largest element in m_log_Bq_reverse, m_log_of_sum_Bq_reverse[j] gives the log of the sum of the Bq_reverse[j] m_log_Bq_reverse_descending_order = vector< vector< unsigned int > >(0); m_log_of_sum_Bq_reverse = vector< double >(m_number_of_processes, 0); //m_previous_log_likelihoods_with_right_sufficient_statistics_reverse gives (for each process) the likelihood for the previous regime with the reverse right sufficient stats m_previous_log_likelihoods_with_right_sufficient_statistics_reverse = vector< double >(m_number_of_processes, 0); // m_actual_log_likelihoods_without_right_sufficient_statistics_reverse gives (for each process) the likelihood for the removed regime if the right sufficient stats are removed from it. Equals 0 for the previous regime for each process, as the 'removing a changepoint' procedure knows that it shouldn't be setting new log likelihoods if new regime == previous regime m_actual_log_likelihoods_without_right_sufficient_statistics_reverse = vector< double >(m_number_of_processes, 0); // m_right_sufficient_statistics_reverse holds the sufficient statistics for the interval from m_h to m_h+1 for process j // the assignment here looks wrong, but it will have the cumulative data up to m_h subtracted later m_right_sufficient_statistics_reverse = vector< vector< double > >(m_number_of_processes); // calculate how this move will affect the k prior if (m_h == m_dimension - 1) { m_pm_ptr->get_cumulative_sufficient_data(m_end + 1, m_right_sufficient_statistics_reverse); } else { m_pm_ptr->get_cumulative_sufficient_data(m_particle.get_changepoint(m_h + 1).get_position(), m_right_sufficient_statistics_reverse); } m_left_sufficient_statistics_reverse = vector< vector< double > >(m_number_of_processes); // Is this needed? m_pm_ptr->get_cumulative_sufficient_data(m_particle.get_changepoint(m_h).get_position(), m_left_sufficient_statistics_reverse); vector< vector< double > > sufficient_statistics_up_to_left_cp_position(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(m_particle.get_changepoint(m_h - 1).get_position(), sufficient_statistics_up_to_left_cp_position); m_removing_unobserved_regimes = vector< bool >(m_number_of_processes, false); for (unsigned int j = 0; j < m_number_of_processes; j++) { size_t number_of_regimes = m_particle.get_number_of_regimes(j); // get the actual regime for changepoint m_h unsigned int actual_regime = m_particle.get_previous_regime(m_h + 1, j); // if we are removing a whole regime from the process then we need to subtract 1 from the number of regimes. // if deleting tau_h means that regime r_j becomes unobserved then the number of regimes is reduced by 1. if (m_particle.removing_full_changepoint_leaves_highest_regime_unobserved(j, actual_regime)) { number_of_regimes--; m_removing_unobserved_regimes[j] = true; } m_log_B_reverse.push_back(vector< double >(number_of_regimes + 1, 0)); m_log_q_reverse.push_back(vector< double >(number_of_regimes + 1, 0)); for (unsigned int index = 0; index < m_right_sufficient_statistics_reverse[j].size(); index++) { // this is correct, as m_left_sufficient_statistics_reverse currently holds the information up to m_h m_right_sufficient_statistics_reverse[j][index] -= m_left_sufficient_statistics_reverse[j][index]; // we now make m_left_sufficient_statistics_reverse correct m_left_sufficient_statistics_reverse[j][index] -= sufficient_statistics_up_to_left_cp_position[j][index]; } // get the sufficient statistics for the regime that affects the changepoint prior to the new changepoint unsigned int previous_regime = m_particle.get_previous_regime(m_h, j); vector< double > previous_regime_sufficient_stats = m_particle.get_sufficient_statistics(j, previous_regime); // calculate the likelihood for the regime that affects the changepoint prior to the new changepoint double previous_regime_log_likelihood = m_particle.get_regime_log_likelihood(j, previous_regime); // calculate the likelihood for the previous regime with the right sufficient statistics removed vector< double > previous_regime_sufficient_statistics_with_right_sufficient_statistics = previous_regime_sufficient_stats; for (unsigned int index = 0; index < previous_regime_sufficient_statistics_with_right_sufficient_statistics.size(); index++) { previous_regime_sufficient_statistics_with_right_sufficient_statistics[index] += m_right_sufficient_statistics_reverse[j][index]; } double previous_regime_log_likelihood_with_right_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, previous_regime_sufficient_statistics_with_right_sufficient_statistics); m_previous_log_likelihoods_with_right_sufficient_statistics_reverse[j] = previous_regime_log_likelihood_with_right_sufficient_statistics; // for each regime calculate the reverse Bayes factor for assigning this regime to the new changepoint. for (unsigned int regime = 0; regime < number_of_regimes; regime++) { if (previous_regime == actual_regime) { if (regime == previous_regime) { m_log_B_reverse[j][regime] = 0; } else { vector< double > previous_regime_sufficient_statistics_without_right_sufficient_statistics = previous_regime_sufficient_stats; for (unsigned int index = 0; index < previous_regime_sufficient_statistics_with_right_sufficient_statistics.size(); index++) { previous_regime_sufficient_statistics_without_right_sufficient_statistics[index] -= m_right_sufficient_statistics_reverse[j][index]; } double previous_regime_log_likelihood_without_right_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, previous_regime_sufficient_statistics_without_right_sufficient_statistics); vector< double > regime_sufficient_stats = m_particle.get_sufficient_statistics(j, regime); // calculate the sufficient statistics for the regime if the right sufficient statistics are added to it vector< double > regime_sufficient_stats_with_right_sufficient_statistics = m_particle.get_sufficient_statistics(j, regime); for (unsigned int index = 0; index < regime_sufficient_stats.size(); index++) { regime_sufficient_stats_with_right_sufficient_statistics[index] += m_right_sufficient_statistics_reverse[j][index]; } m_log_B_reverse[j][regime] = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics) + previous_regime_log_likelihood_without_right_sufficient_statistics - m_particle.get_regime_log_likelihood(j, regime) - previous_regime_log_likelihood; } } else { if (regime == previous_regime) { m_log_B_reverse[j][regime] = 0; } else { if (regime == actual_regime) { vector< double > regime_sufficient_stats = m_particle.get_sufficient_statistics(j, regime); // calculate the sufficient statistics for the regime if the right sufficient statistics are added to it vector< double > regime_sufficient_stats_without_right_sufficient_statistics = m_particle.get_sufficient_statistics(j, regime); for (unsigned int index = 0; index < regime_sufficient_stats.size(); index++) { regime_sufficient_stats_without_right_sufficient_statistics[index] -= m_right_sufficient_statistics_reverse[j][index]; } double regime_log_likelihood_without_right_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_without_right_sufficient_statistics); m_actual_log_likelihoods_without_right_sufficient_statistics_reverse[j] = regime_log_likelihood_without_right_sufficient_statistics; m_log_B_reverse[j][regime] = m_particle.get_regime_log_likelihood(j, regime) + previous_regime_log_likelihood - regime_log_likelihood_without_right_sufficient_statistics - previous_regime_log_likelihood_with_right_sufficient_statistics; } else { vector< double > regime_sufficient_stats = m_particle.get_sufficient_statistics(j, regime); // calculate the sufficient statistics for the regime if the right sufficient statistics are added to it vector< double > regime_sufficient_stats_with_right_sufficient_statistics = m_particle.get_sufficient_statistics(j, regime); for (unsigned int index = 0; index < regime_sufficient_stats.size(); index++) { regime_sufficient_stats_with_right_sufficient_statistics[index] += m_right_sufficient_statistics_reverse[j][index]; } m_log_B_reverse[j][regime] = m_pm_ptr->calculate_log_likelihood(j,regime_sufficient_stats_with_right_sufficient_statistics) + previous_regime_log_likelihood - previous_regime_log_likelihood_with_right_sufficient_statistics - m_particle.get_regime_log_likelihood(j, regime); } } } } // also propose adding a new regime if (previous_regime == actual_regime) { // calculate the likelihood for the previous regime with right interval removed vector< double > previous_regime_sufficient_statistics_without_right_sufficient_statistics = previous_regime_sufficient_stats; for (unsigned int index = 0; index < previous_regime_sufficient_statistics_with_right_sufficient_statistics.size(); index++) { previous_regime_sufficient_statistics_without_right_sufficient_statistics[index] -= m_right_sufficient_statistics_reverse[j][index]; } double previous_regime_log_likelihood_without_right_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, previous_regime_sufficient_statistics_without_right_sufficient_statistics); m_log_B_reverse[j][number_of_regimes] = previous_regime_log_likelihood_without_right_sufficient_statistics + m_pm_ptr->calculate_log_likelihood(j, m_right_sufficient_statistics_reverse[j]) - previous_regime_log_likelihood; } else { m_log_B_reverse[j][number_of_regimes] = previous_regime_log_likelihood + m_pm_ptr->calculate_log_likelihood(j, m_right_sufficient_statistics_reverse[j]) - previous_regime_log_likelihood_with_right_sufficient_statistics; } // for each regime calculate marked vector prior ratio if (m_h == m_dimension - 1 || m_particle.is_changepoint_index_separator_index(m_h + 1)) { for (unsigned int regime = 0; regime < number_of_regimes; regime++) { m_log_q_reverse[j][regime] = m_particle.full_log_I_prior_remove_ratio(m_h, j, previous_regime, regime, false, actual_regime, m_removing_unobserved_regimes[j]); } m_log_q_reverse[j][number_of_regimes] = m_particle.full_log_I_prior_remove_ratio(m_h, j, previous_regime, static_cast< unsigned int >(number_of_regimes), true, actual_regime, m_removing_unobserved_regimes[j]); } else { unsigned int subsequent_regime = m_particle.get_previous_regime(m_h + 2, j); for (unsigned int regime = 0; regime < number_of_regimes; regime++) { m_log_q_reverse[j][regime] = m_particle.full_log_I_prior_remove_ratio(m_h, j, previous_regime, regime, false, actual_regime, m_removing_unobserved_regimes[j], subsequent_regime); } m_log_q_reverse[j][number_of_regimes] = m_particle.full_log_I_prior_remove_ratio(m_h, j, previous_regime, static_cast< unsigned int >(number_of_regimes), true, actual_regime, m_removing_unobserved_regimes[j], subsequent_regime); } // calculate the trace index m_particle.is_changepoint_index_separator_index(m_h); // only running this to set the trace_index. If not run, m_trace_index may give the trace index of the next trace because is_changepoint_index_separator_index(index + 1) has just been run // set m_log_Bq = m_log_B + m_log_q m_log_Bq_reverse.push_back(vector< double >(number_of_regimes + 1, 0)); for (unsigned int regime = 0; regime < number_of_regimes + 1; regime++){ m_log_Bq_reverse[j][regime] = m_log_B_reverse[j][regime] + m_log_q_reverse[j][regime]; } // check if regime number_of_regimes - 1 is unobserved. If removing tau_h reduces the number of regimes by 1, this will already be accounted for if (m_particle.is_regime_unobserved(j, number_of_regimes - 1)) { m_log_Bq_reverse[j][number_of_regimes - 1] = -1e300; } // initialise m_log_Bq_reverse_descending_order and then calculate the ordering of the elements of m_log_Bq so that we can use fancy log and exp tricks to calculate the sum of the log Bq values. m_log_Bq_reverse_descending_order.push_back(vector< unsigned int >(number_of_regimes + 1, 0)); for (unsigned int i = 1; i < number_of_regimes + 1; i++) { m_log_Bq_reverse_descending_order[j][i] = i; } calculate_vector_descending_order(number_of_regimes + 1, m_log_Bq_reverse[j], m_log_Bq_reverse_descending_order[j]); // calculate log of sum Bq double largest_Bq_reverse = m_log_Bq_reverse[j][m_log_Bq_reverse_descending_order[j][0]]; double temp_sum = 1; for (unsigned int index = 1; index < number_of_regimes + 1; index++){ temp_sum += exp(m_log_Bq_reverse[j][m_log_Bq_reverse_descending_order[j][index]] - largest_Bq_reverse); } m_log_acceptance_prob -= largest_Bq_reverse + log(temp_sum); } } void rj::moving_full_changepoint_setup(const unsigned int & trace_index) { // choose which changepoint to move (we know that there is at least one that can be moved from our definitions of b_k, d_k, etc.) int lower_index_bound = ((trace_index == 0) ? -1 : static_cast< int >(m_particle.get_separator_index(trace_index - 1))); unsigned int trace_dimension = m_particle.calculate_trace_dimension(trace_index); m_h = static_cast< unsigned int >(gsl_rng_uniform_int(r, trace_dimension) + lower_index_bound + 1); m_log_proposal_ratio = 0; m_log_acceptance_prob = 0; // find the indices of the changepoints before and after m_tau[h] (so that we can sample the position of the new changepoint between them) unsigned long int tau_h_minus_1 = m_particle.get_changepoint(m_h - 1).get_position(); unsigned long int tau_h_plus_1; if (m_h == m_dimension - 1){ tau_h_plus_1 = m_end + 1; } else { tau_h_plus_1 = m_particle.get_changepoint(m_h + 1).get_position(); } // sample the new changepoint position (and don't let m_tau[h] stay in the same position) m_new_changepoint_position = gsl_rng_uniform_int(r, tau_h_plus_1 - tau_h_minus_1 - 1) + tau_h_minus_1 + 1; if (tau_h_plus_1 - tau_h_minus_1 > 2) { while (m_particle.does_changepoint_exist_in_particle(m_new_changepoint_position, m_h, m_h)){ m_new_changepoint_position = gsl_rng_uniform_int(r, tau_h_plus_1 - tau_h_minus_1 - 1) + tau_h_minus_1 + 1; } } m_log_k_prior_ratio = 0; m_adding_changepoint = changepoint(m_new_changepoint_position); // m_log_B_reverse[i] contains log Bayes factor if I_h_j_prime = i vs no changepoint here. m_log_q_reverse is the log I ratio for the same setting. m_log_Bq_reverse is their sum // m_log_B[i] contains log Bayes factor for adding a changepoint at m_new_changepoint_position with regime vs no changepoint there or at m_h. m_log_B = vector< vector< double > >(0); m_log_B_reverse = vector< vector< double > >(0); m_log_q = vector< vector< double > >(0); m_log_q_reverse = vector< vector< double > >(0); m_log_Bq = vector< vector< double > >(0); m_log_Bq_reverse = vector< vector< double > >(0); // m_log_Bq_reverse_descending_order[j][0] gives the index of the largest element in m_log_Bq_reverse, m_log_of_sum_Bq_reverse[j] gives the log of the sum of the Bq_reverse[j], same for m_log_Bq m_log_Bq_descending_order = vector< vector< unsigned int > >(0); m_log_Bq_reverse_descending_order = vector< vector< unsigned int > >(0); m_log_of_sum_Bq = vector< double >(m_number_of_processes, 0); m_log_of_sum_Bq_reverse = vector< double >(m_number_of_processes, 0); m_previous_log_likelihoods_without_right_sufficient_statistics = vector< double >(m_number_of_processes, 0); m_log_likelihoods_with_right_sufficient_statistics = vector< vector< double > >(0); m_previous_log_likelihoods_with_right_sufficient_statistics = vector< double >(m_number_of_processes, 0); m_previous_log_likelihoods_with_right_sufficient_statistics_reverse = vector< double >(m_number_of_processes, 0); m_actual_log_likelihoods_without_right_sufficient_statistics = vector< double >(m_number_of_processes, 0); m_actual_log_likelihoods_without_right_sufficient_statistics_reverse = vector< double >(m_number_of_processes, 0); m_previous_log_likelihoods_without_middle_sufficient_statistics = vector< double >(m_number_of_processes, 0); m_previous_log_likelihoods_with_middle_sufficient_statistics = vector< double >(m_number_of_processes, 0); m_actual_log_likelihoods_with_middle_sufficient_statistics = vector< double >(m_number_of_processes, 0); m_actual_log_likelihoods_without_middle_sufficient_statistics = vector< double >(m_number_of_processes, 0); // m_right_sufficient_statistics_reverse holds the sufficient statistics for the interval from m_h to m_h+1 for process j // the assignment here looks wrong, but it will have the cumulative data up to m_h subtracted later m_right_sufficient_statistics = vector< vector< double > >(m_number_of_processes); m_right_sufficient_statistics_reverse = vector< vector< double > >(m_number_of_processes); if (m_h == m_dimension - 1) { m_pm_ptr->get_cumulative_sufficient_data(tau_h_plus_1, m_right_sufficient_statistics); m_pm_ptr->get_cumulative_sufficient_data(tau_h_plus_1, m_right_sufficient_statistics_reverse); } else { m_pm_ptr->get_cumulative_sufficient_data(tau_h_plus_1, m_right_sufficient_statistics); m_pm_ptr->get_cumulative_sufficient_data(tau_h_plus_1, m_right_sufficient_statistics_reverse); } unsigned long int tau_h = m_particle.get_changepoint(m_h).get_position(); m_tau_h_greater_than_tau_h_prime = tau_h > m_new_changepoint_position; m_middle_sufficient_statistics = vector< vector< double > >(m_number_of_processes); vector< vector< double > > sufficient_statistics_for_middle_sufficient_statistics(m_number_of_processes); if (m_tau_h_greater_than_tau_h_prime) { m_pm_ptr->get_cumulative_sufficient_data(tau_h, m_middle_sufficient_statistics); m_pm_ptr->get_cumulative_sufficient_data(m_new_changepoint_position, sufficient_statistics_for_middle_sufficient_statistics); } else { m_pm_ptr->get_cumulative_sufficient_data(m_new_changepoint_position, m_middle_sufficient_statistics); m_pm_ptr->get_cumulative_sufficient_data(tau_h, sufficient_statistics_for_middle_sufficient_statistics); } m_left_sufficient_statistics = vector< vector< double > >(m_number_of_processes); m_left_sufficient_statistics_reverse = vector< vector< double > >(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(m_new_changepoint_position, m_left_sufficient_statistics); m_pm_ptr->get_cumulative_sufficient_data(m_particle.get_changepoint(m_h).get_position(), m_left_sufficient_statistics_reverse); vector< vector< double > > sufficient_statistics_up_to_left_cp_position(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(m_particle.get_changepoint(m_h - 1).get_position(), sufficient_statistics_up_to_left_cp_position); m_removing_unobserved_regimes = vector< bool >(m_number_of_processes, false); for (unsigned int j = 0; j < m_number_of_processes; j++) { size_t number_of_regimes = m_particle.get_number_of_regimes(j); // get the actual regime for changepoint m_h unsigned int actual_regime = m_particle.get_previous_regime(m_h + 1, j); // if we are removing a whole regime from the process then we need to subtract 1 from the number of regimes. // if deleting tau_h means that regime r_j becomes unobserved then the number of regimes is reduced by 1. if (m_particle.removing_full_changepoint_leaves_highest_regime_unobserved(j, actual_regime)) { number_of_regimes--; m_removing_unobserved_regimes[j] = true; } // if we are removing a whole regime from the process then we need to subtract 1 from the number of regimes. m_log_B_reverse.push_back(vector< double >(number_of_regimes + 1, 0)); m_log_q_reverse.push_back(vector< double >(number_of_regimes + 1, 0)); m_log_B.push_back(vector< double >(number_of_regimes + 1, 0)); m_log_q.push_back(vector< double >(number_of_regimes + 1, 0)); m_log_likelihoods_with_right_sufficient_statistics.push_back(vector< double >(number_of_regimes + 1, 0)); for (unsigned int index = 0; index < m_right_sufficient_statistics_reverse[j].size(); index++) { // this is correct, as m_left_sufficient_statistics_reverse currently holds the information up to m_h m_right_sufficient_statistics[j][index] -= m_left_sufficient_statistics[j][index]; m_right_sufficient_statistics_reverse[j][index] -= m_left_sufficient_statistics_reverse[j][index]; // we now make m_left_sufficient_statistics_reverse correct m_left_sufficient_statistics[j][index] -= sufficient_statistics_up_to_left_cp_position[j][index]; m_left_sufficient_statistics_reverse[j][index] -= sufficient_statistics_up_to_left_cp_position[j][index]; // also make m_middle_statistics correct m_middle_sufficient_statistics[j][index] -= sufficient_statistics_for_middle_sufficient_statistics[j][index]; } // get the sufficient statistics for the regime that affects the changepoint prior to the new changepoint unsigned int previous_regime = m_particle.get_previous_regime(m_h, j); vector< double > previous_regime_sufficient_stats = m_particle.get_sufficient_statistics(j, previous_regime); // calculate the likelihood for the regime that affects the changepoint prior to the new changepoint double previous_regime_log_likelihood = m_particle.get_regime_log_likelihood(j, previous_regime); // calculate the likelihood for the previous and actual regime with the right sufficient statistics removed etc vector< double > previous_regime_sufficient_statistics_without_right_sufficient_statistics = previous_regime_sufficient_stats; vector< double > previous_regime_sufficient_statistics_without_right_sufficient_statistics_reverse = previous_regime_sufficient_stats; vector< double > previous_regime_sufficient_statistics_with_right_sufficient_statistics_reverse = previous_regime_sufficient_stats; vector< double > previous_regime_sufficient_statistics_with_middle_sufficient_statistics = previous_regime_sufficient_stats; vector< double > previous_regime_sufficient_statistics_without_middle_sufficient_statistics = previous_regime_sufficient_stats; for (unsigned int index = 0; index < previous_regime_sufficient_statistics_without_right_sufficient_statistics.size(); index++) { if (previous_regime == actual_regime) { previous_regime_sufficient_statistics_without_right_sufficient_statistics[index] -= m_right_sufficient_statistics[j][index]; previous_regime_sufficient_statistics_without_right_sufficient_statistics_reverse[index] -= m_right_sufficient_statistics_reverse[j][index]; } else { //previous_regime_sufficient_statistics_without_right_sufficient_statistics_reverse[index] -= m_right_sufficient_statistics_reverse[j][index]; delete? previous_regime_sufficient_statistics_with_right_sufficient_statistics_reverse[index] += m_right_sufficient_statistics_reverse[j][index]; if (m_tau_h_greater_than_tau_h_prime) { previous_regime_sufficient_statistics_without_middle_sufficient_statistics[index] -= m_middle_sufficient_statistics[j][index]; } else { previous_regime_sufficient_statistics_with_middle_sufficient_statistics[index] += m_middle_sufficient_statistics[j][index]; } } } double previous_regime_log_likelihood_without_right_sufficient_statistics = 0, previous_regime_log_likelihood_without_right_sufficient_statistics_reverse = 0, previous_regime_log_likelihood_with_right_sufficient_statistics_reverse = 0, previous_regime_log_likelihood_with_middle_sufficient_statistics = 0, previous_regime_log_likelihood_without_middle_sufficient_statistics = 0; if (previous_regime == actual_regime) { previous_regime_log_likelihood_without_right_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, previous_regime_sufficient_statistics_without_right_sufficient_statistics); previous_regime_log_likelihood_without_right_sufficient_statistics_reverse = m_pm_ptr->calculate_log_likelihood(j, previous_regime_sufficient_statistics_without_right_sufficient_statistics_reverse); } else { previous_regime_log_likelihood_with_right_sufficient_statistics_reverse = m_pm_ptr->calculate_log_likelihood(j, previous_regime_sufficient_statistics_with_right_sufficient_statistics_reverse); if (m_tau_h_greater_than_tau_h_prime) { previous_regime_log_likelihood_without_middle_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, previous_regime_sufficient_statistics_without_middle_sufficient_statistics); } else { previous_regime_log_likelihood_with_middle_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, previous_regime_sufficient_statistics_with_middle_sufficient_statistics); } } vector< double > actual_regime_sufficient_statistics = vector< double >(0); vector< double > actual_regime_sufficient_statistics_without_right_sufficient_statistics_reverse = vector< double >(0); vector< double > actual_regime_sufficient_statistics_with_middle_sufficient_statistics = vector< double >(0); vector< double > actual_regime_sufficient_statistics_without_middle_sufficient_statistics = vector< double >(0); double actual_regime_log_likelihood = 0, actual_regime_log_likelihood_without_right_sufficient_statistics_reverse = 0, actual_regime_log_likelihood_with_middle_sufficient_statistics = 0, actual_regime_log_likelihood_without_middle_sufficient_statistics = 0; if (previous_regime != actual_regime) { actual_regime_sufficient_statistics = m_particle.get_sufficient_statistics(j, actual_regime); actual_regime_sufficient_statistics_without_right_sufficient_statistics_reverse = actual_regime_sufficient_statistics; if (m_tau_h_greater_than_tau_h_prime) { actual_regime_sufficient_statistics_with_middle_sufficient_statistics = actual_regime_sufficient_statistics; } else { actual_regime_sufficient_statistics_without_middle_sufficient_statistics = actual_regime_sufficient_statistics; } for (unsigned int index = 0; index < actual_regime_sufficient_statistics.size(); index++) { actual_regime_sufficient_statistics_without_right_sufficient_statistics_reverse[index] -= m_right_sufficient_statistics_reverse[j][index]; if (m_tau_h_greater_than_tau_h_prime) { actual_regime_sufficient_statistics_with_middle_sufficient_statistics[index] += m_middle_sufficient_statistics[j][index]; } else { actual_regime_sufficient_statistics_without_middle_sufficient_statistics[index] -= m_middle_sufficient_statistics[j][index]; } } actual_regime_log_likelihood = m_pm_ptr->calculate_log_likelihood(j, actual_regime_sufficient_statistics); actual_regime_log_likelihood_without_right_sufficient_statistics_reverse = m_pm_ptr->calculate_log_likelihood(j, actual_regime_sufficient_statistics_without_right_sufficient_statistics_reverse); if (m_tau_h_greater_than_tau_h_prime) { actual_regime_log_likelihood_with_middle_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, actual_regime_sufficient_statistics_with_middle_sufficient_statistics); } else { actual_regime_log_likelihood_without_middle_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, actual_regime_sufficient_statistics_without_middle_sufficient_statistics); } } // for each regime calculate the reverse Bayes factor for assigning this regime to the new changepoint. for (unsigned int regime = 0; regime < number_of_regimes; regime++) { if (previous_regime == actual_regime) { m_previous_log_likelihoods_without_right_sufficient_statistics[j] = previous_regime_log_likelihood_without_right_sufficient_statistics; if (regime == previous_regime) { m_log_B[j][regime] = 0; m_log_B_reverse[j][regime] = 0; } else { vector< double > regime_sufficient_stats = m_particle.get_sufficient_statistics(j, regime); // calculate the sufficient statistics for the regime if the right sufficient statistics are added to it vector< double > regime_sufficient_stats_with_right_sufficient_statistics = regime_sufficient_stats; vector< double > regime_sufficient_stats_with_right_sufficient_statistics_reverse = regime_sufficient_stats; for (unsigned int index = 0; index < regime_sufficient_stats.size(); index++) { regime_sufficient_stats_with_right_sufficient_statistics[index] += m_right_sufficient_statistics[j][index]; regime_sufficient_stats_with_right_sufficient_statistics_reverse[index] += m_right_sufficient_statistics_reverse[j][index]; } double log_likelihoods_with_right_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics); m_log_B[j][regime] = log_likelihoods_with_right_sufficient_statistics + previous_regime_log_likelihood_without_right_sufficient_statistics - m_particle.get_regime_log_likelihood(j, regime) - previous_regime_log_likelihood; m_log_B_reverse[j][regime] = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics_reverse) + previous_regime_log_likelihood_without_right_sufficient_statistics_reverse - m_particle.get_regime_log_likelihood(j, regime) - previous_regime_log_likelihood; m_log_likelihoods_with_right_sufficient_statistics[j][regime] = log_likelihoods_with_right_sufficient_statistics; } } else { if (regime == previous_regime) { m_log_B[j][regime] = 0; m_log_B_reverse[j][regime] = 0; m_previous_log_likelihoods_with_right_sufficient_statistics_reverse[j] = previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; m_actual_log_likelihoods_without_right_sufficient_statistics_reverse[j] = actual_regime_log_likelihood_without_right_sufficient_statistics_reverse; } else { if (regime == actual_regime) { if (m_tau_h_greater_than_tau_h_prime) { m_log_B[j][regime] = actual_regime_log_likelihood_with_middle_sufficient_statistics + previous_regime_log_likelihood_without_middle_sufficient_statistics - actual_regime_log_likelihood_without_right_sufficient_statistics_reverse - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; m_log_B_reverse[j][regime] = actual_regime_log_likelihood + previous_regime_log_likelihood - actual_regime_log_likelihood_without_right_sufficient_statistics_reverse - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; m_previous_log_likelihoods_without_middle_sufficient_statistics[j] = previous_regime_log_likelihood_without_middle_sufficient_statistics; m_actual_log_likelihoods_with_middle_sufficient_statistics[j] = actual_regime_log_likelihood_with_middle_sufficient_statistics; } else { m_log_B[j][regime] = actual_regime_log_likelihood_without_middle_sufficient_statistics + previous_regime_log_likelihood_with_middle_sufficient_statistics - actual_regime_log_likelihood_without_right_sufficient_statistics_reverse - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; m_log_B_reverse[j][regime] = actual_regime_log_likelihood + previous_regime_log_likelihood - actual_regime_log_likelihood_without_right_sufficient_statistics_reverse - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; m_previous_log_likelihoods_with_middle_sufficient_statistics[j] = previous_regime_log_likelihood_with_middle_sufficient_statistics; m_actual_log_likelihoods_without_middle_sufficient_statistics[j] = actual_regime_log_likelihood_without_middle_sufficient_statistics; } } else { vector< double > regime_sufficient_stats = m_particle.get_sufficient_statistics(j, regime); vector< double > regime_sufficient_stats_with_right_sufficient_statistics = regime_sufficient_stats; vector< double > regime_sufficient_stats_with_right_sufficient_statistics_reverse = regime_sufficient_stats; for (unsigned int index = 0; index < regime_sufficient_stats.size(); index++) { regime_sufficient_stats_with_right_sufficient_statistics[index] += m_right_sufficient_statistics[j][index]; regime_sufficient_stats_with_right_sufficient_statistics_reverse[index] += m_right_sufficient_statistics_reverse[j][index]; } double regime_log_likelihood_with_right_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics); double regime_log_likelihood_with_right_sufficient_statistics_reverse = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics_reverse); m_log_likelihoods_with_right_sufficient_statistics[j][regime] = regime_log_likelihood_with_right_sufficient_statistics; m_actual_log_likelihoods_without_right_sufficient_statistics_reverse[j] = actual_regime_log_likelihood_without_right_sufficient_statistics_reverse; if (m_tau_h_greater_than_tau_h_prime) { m_log_B[j][regime] = regime_log_likelihood_with_right_sufficient_statistics + previous_regime_log_likelihood_without_middle_sufficient_statistics - m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats) - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; m_log_B_reverse[j][regime] = regime_log_likelihood_with_right_sufficient_statistics_reverse + previous_regime_log_likelihood - m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats) - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; m_previous_log_likelihoods_without_middle_sufficient_statistics[j] = previous_regime_log_likelihood_without_middle_sufficient_statistics; } else { m_log_B[j][regime] = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics) + previous_regime_log_likelihood_with_middle_sufficient_statistics - m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats) - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; m_log_B_reverse[j][regime] = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics_reverse) + previous_regime_log_likelihood - m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats) - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; m_previous_log_likelihoods_with_middle_sufficient_statistics[j] = previous_regime_log_likelihood_with_middle_sufficient_statistics; } } } } } // also propose making a new regime if (previous_regime == actual_regime) { vector< double > regime_sufficient_stats_with_right_sufficient_statistics = m_right_sufficient_statistics[j]; vector< double > regime_sufficient_stats_with_right_sufficient_statistics_reverse = m_right_sufficient_statistics_reverse[j]; double log_likelihoods_with_right_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics); m_log_B[j][number_of_regimes] = log_likelihoods_with_right_sufficient_statistics + previous_regime_log_likelihood_without_right_sufficient_statistics - previous_regime_log_likelihood; m_log_B_reverse[j][number_of_regimes] = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics_reverse) + previous_regime_log_likelihood_with_right_sufficient_statistics_reverse - previous_regime_log_likelihood; m_log_likelihoods_with_right_sufficient_statistics[j][number_of_regimes] = log_likelihoods_with_right_sufficient_statistics; } else { vector< double > regime_sufficient_stats_with_right_sufficient_statistics = m_right_sufficient_statistics[j]; vector< double > regime_sufficient_stats_with_right_sufficient_statistics_reverse = m_right_sufficient_statistics_reverse[j]; double regime_log_likelihood_with_right_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics); double regime_log_likelihood_with_right_sufficient_statistics_reverse = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics_reverse); m_log_likelihoods_with_right_sufficient_statistics[j][number_of_regimes] = regime_log_likelihood_with_right_sufficient_statistics; m_actual_log_likelihoods_without_right_sufficient_statistics_reverse[j] = actual_regime_log_likelihood_without_right_sufficient_statistics_reverse; if (m_tau_h_greater_than_tau_h_prime) { m_log_B[j][number_of_regimes] = regime_log_likelihood_with_right_sufficient_statistics + previous_regime_log_likelihood_without_middle_sufficient_statistics - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; m_log_B_reverse[j][number_of_regimes] = regime_log_likelihood_with_right_sufficient_statistics_reverse + previous_regime_log_likelihood - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; m_previous_log_likelihoods_without_middle_sufficient_statistics[j] = previous_regime_log_likelihood_without_middle_sufficient_statistics; if (actual_regime == number_of_regimes) { // because we have removed a regime and are now proposing adding it back in m_actual_log_likelihoods_with_middle_sufficient_statistics[j] = actual_regime_log_likelihood_with_middle_sufficient_statistics; } } else { m_log_B[j][number_of_regimes] = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics) + previous_regime_log_likelihood_with_middle_sufficient_statistics - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; m_log_B_reverse[j][number_of_regimes] = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics_reverse) + previous_regime_log_likelihood - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; m_previous_log_likelihoods_with_middle_sufficient_statistics[j] = previous_regime_log_likelihood_with_middle_sufficient_statistics; if (actual_regime == number_of_regimes) { // because we have removed a regime and are now proposing adding it back in m_actual_log_likelihoods_without_middle_sufficient_statistics[j] = actual_regime_log_likelihood_without_middle_sufficient_statistics; } } } if (m_h == m_dimension - 1 || m_particle.is_changepoint_index_separator_index(m_h + 1)) { for (unsigned int regime = 0; regime < number_of_regimes; regime++) { m_log_q[j][regime] = m_log_q_reverse[j][regime] = m_particle.full_log_I_prior_remove_ratio(m_h, j, previous_regime, regime, false, actual_regime, m_removing_unobserved_regimes[j]); } m_log_q[j][number_of_regimes] = m_log_q_reverse[j][number_of_regimes] = m_particle.full_log_I_prior_remove_ratio(m_h, j, previous_regime, static_cast< unsigned int >(number_of_regimes), true, actual_regime, m_removing_unobserved_regimes[j]); } else { unsigned int subsequent_regime = m_particle.get_previous_regime(m_h + 2, j); for (unsigned int regime = 0; regime < number_of_regimes; regime++) { m_log_q[j][regime] = m_log_q_reverse[j][regime] = m_particle.full_log_I_prior_remove_ratio(m_h, j, previous_regime, regime, false, actual_regime, m_removing_unobserved_regimes[j], subsequent_regime); } m_log_q[j][number_of_regimes] = m_log_q_reverse[j][number_of_regimes] = m_particle.full_log_I_prior_remove_ratio(m_h, j, previous_regime, static_cast<unsigned int>(number_of_regimes), true, actual_regime, m_removing_unobserved_regimes[j], subsequent_regime); } // set m_log_Bq = m_log_B + m_log_q m_log_Bq.push_back(vector< double >(number_of_regimes + 1, 0)); m_log_Bq_reverse.push_back(vector< double >(number_of_regimes + 1, 0)); for (unsigned int regime = 0; regime < number_of_regimes + 1; regime++){ m_log_Bq[j][regime] = m_log_B[j][regime] + m_log_q[j][regime]; m_log_Bq_reverse[j][regime] = m_log_B_reverse[j][regime] + m_log_q_reverse[j][regime]; } // check if regime number_of_regimes - 1 is unobserved. If removing tau_h reduces the number of regimes by 1, this will already be accounted for if (m_particle.is_regime_unobserved(j, number_of_regimes - 1)) { m_log_Bq[j][number_of_regimes - 1] = -1e300; m_log_Bq_reverse[j][number_of_regimes - 1] = -1e300; } // initialise m_log_Bq_reverse_descending_order and then calculate the ordering of the elements of m_log_Bq so that we can use fancy log and exp tricks to calculate the sum of the log Bq values. m_log_Bq_descending_order.push_back(vector< unsigned int >(number_of_regimes + 1, 0)); m_log_Bq_reverse_descending_order.push_back(vector< unsigned int >(number_of_regimes + 1, 0)); for (unsigned int i = 1; i < number_of_regimes + 1; i++) { m_log_Bq_descending_order[j][i] = i; m_log_Bq_reverse_descending_order[j][i] = i; } calculate_vector_descending_order(number_of_regimes + 1, m_log_Bq[j], m_log_Bq_descending_order[j]); calculate_vector_descending_order(number_of_regimes + 1, m_log_Bq_reverse[j], m_log_Bq_reverse_descending_order[j]); // calculate log of sum Bq double largest_Bq = m_log_Bq[j][m_log_Bq_descending_order[j][0]]; double largest_Bq_reverse = m_log_Bq_reverse[j][m_log_Bq_reverse_descending_order[j][0]]; double temp_sum = 1; double temp_sum_reverse = 1; for (unsigned int index = 1; index < number_of_regimes + 1; index++){ temp_sum += exp(m_log_Bq[j][m_log_Bq_descending_order[j][index]] - largest_Bq); temp_sum_reverse += exp(m_log_Bq_reverse[j][m_log_Bq_reverse_descending_order[j][index]] - largest_Bq_reverse); } m_log_of_sum_Bq[j] = largest_Bq + log(temp_sum); m_log_acceptance_prob += largest_Bq - largest_Bq_reverse + log(temp_sum) - log(temp_sum_reverse); } // set the trace index m_particle.is_changepoint_index_separator_index(m_h); // need to run this because we ran is_changepoint_index_separator_index(m_h + 1); before and if m_h + 1 is a separator it will set the trace index to be one too high } void rj::resampling_full_changepoint_setup(const unsigned int & trace_index) { // choose which changepoint to resample // choose the changepoint index to resample int lower_index_bound = ((trace_index == 0) ? -1 : static_cast< int >(m_particle.get_separator_index(trace_index - 1))); unsigned int trace_dimension = m_particle.calculate_trace_dimension(trace_index); m_h = static_cast< unsigned int >(gsl_rng_uniform_int(r, trace_dimension) + lower_index_bound + 1); m_log_proposal_ratio = 0; m_log_k_prior_ratio = 0; m_log_acceptance_prob = 0; // m_log_B_reverse[i] contains log Bayes factor if I_h_j_prime = i vs no changepoint here. m_log_q_reverse is the log I ratio for the same setting. m_log_Bq_reverse is their sum m_log_B_reverse = vector< vector< double > >(0); m_log_q_reverse = vector< vector< double > >(0); m_log_Bq_reverse = vector< vector< double > >(0); m_log_likelihoods_with_right_sufficient_statistics_reverse = vector< vector< double > >(0); // m_log_Bq_reverse_descending_order[j][0] gives the index of the largest element in m_log_Bq_reverse, m_log_of_sum_Bq_reverse[j] gives the log of the sum of the Bq_reverse[j] m_log_Bq_reverse_descending_order = vector< vector< unsigned int > >(0); m_log_of_sum_Bq_reverse = vector< double >(m_number_of_processes, 0); m_actual_log_likelihoods_without_right_sufficient_statistics_reverse = vector< double >(m_number_of_processes, 0); // m_right_sufficient_statistics_reverse holds the sufficient statistics for the interval from m_h to m_h+1 for process j // the assignment here looks wrong, but it will have the cumulative data up to m_h subtracted later m_right_sufficient_statistics_reverse = vector< vector< double > >(m_number_of_processes); // calculate how this move will affect the k prior if (m_h == m_dimension - 1) { m_pm_ptr->get_cumulative_sufficient_data(m_end + 1, m_right_sufficient_statistics_reverse); } else { m_pm_ptr->get_cumulative_sufficient_data(m_particle.get_changepoint(m_h + 1).get_position(), m_right_sufficient_statistics_reverse); } m_left_sufficient_statistics_reverse = vector< vector< double > >(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(m_particle.get_changepoint(m_h).get_position(), m_left_sufficient_statistics_reverse); vector< vector< double > > sufficient_statistics_up_to_left_cp_position(m_number_of_processes); m_pm_ptr->get_cumulative_sufficient_data(m_particle.get_changepoint(m_h - 1).get_position(), sufficient_statistics_up_to_left_cp_position); m_removing_unobserved_regimes = vector< bool >(m_number_of_processes, false); for (unsigned int j = 0; j < m_number_of_processes; j++) { size_t number_of_regimes = m_particle.get_number_of_regimes(j); // get the actual regime for changepoint m_h unsigned int actual_regime = m_particle.get_previous_regime(m_h + 1, j); // if we are removing a whole regime from the process then we need to subtract 1 from the number of regimes. // if deleting tau_h means that regime r_j becomes unobserved then the number of regimes is reduced by 1. if (m_particle.removing_full_changepoint_leaves_highest_regime_unobserved(j, actual_regime)) { number_of_regimes--; m_removing_unobserved_regimes[j] = true; } m_log_B_reverse.push_back(vector< double >(number_of_regimes + 1, 0)); m_log_q_reverse.push_back(vector< double >(number_of_regimes + 1, 0)); m_log_likelihoods_with_right_sufficient_statistics_reverse.push_back(vector< double >(number_of_regimes + 1, 0)); for (unsigned int index = 0; index < m_right_sufficient_statistics_reverse[j].size(); index++) { // this is correct, as m_left_sufficient_statistics_reverse currently holds the information up to m_h m_right_sufficient_statistics_reverse[j][index] -= m_left_sufficient_statistics_reverse[j][index]; // we now make m_left_sufficient_statistics_reverse correct m_left_sufficient_statistics_reverse[j][index] -= sufficient_statistics_up_to_left_cp_position[j][index]; } // get the sufficient statistics for the regime that affects the changepoint prior to the new changepoint unsigned int previous_regime = m_particle.get_previous_regime(m_h, j); vector< double > previous_regime_sufficient_stats = m_particle.get_sufficient_statistics(j, previous_regime); // calculate the likelihood for the regime that affects the changepoint prior to the new changepoint double previous_regime_log_likelihood = m_particle.get_regime_log_likelihood(j, previous_regime); // calculate the likelihood for the previous regime with the right sufficient statistics removed vector< double > previous_regime_sufficient_statistics_with_right_sufficient_statistics_reverse = previous_regime_sufficient_stats; for (unsigned int index = 0; index < previous_regime_sufficient_statistics_with_right_sufficient_statistics_reverse.size(); index++) { previous_regime_sufficient_statistics_with_right_sufficient_statistics_reverse[index] += m_right_sufficient_statistics_reverse[j][index]; } double previous_regime_log_likelihood_with_right_sufficient_statistics_reverse = m_pm_ptr->calculate_log_likelihood(j, previous_regime_sufficient_statistics_with_right_sufficient_statistics_reverse); vector< double > actual_regime_sufficient_statistics_without_right_sufficient_statistics_reverse = m_particle.get_sufficient_statistics(j, actual_regime); for (unsigned int index = 0; index < actual_regime_sufficient_statistics_without_right_sufficient_statistics_reverse.size(); index++) { actual_regime_sufficient_statistics_without_right_sufficient_statistics_reverse[index] -= m_right_sufficient_statistics_reverse[j][index]; } m_actual_log_likelihoods_without_right_sufficient_statistics_reverse[j] = m_pm_ptr->calculate_log_likelihood(j, actual_regime_sufficient_statistics_without_right_sufficient_statistics_reverse); // for each regime calculate the reverse Bayes factor for assigning this regime to the new changepoint. for (unsigned int regime = 0; regime < number_of_regimes; regime++) { if (previous_regime == actual_regime) { if (regime == previous_regime) { m_log_B_reverse[j][regime] = 0; } else { vector< double > previous_regime_sufficient_statistics_without_right_sufficient_statistics_reverse = previous_regime_sufficient_stats; for (unsigned int index = 0; index < previous_regime_sufficient_statistics_with_right_sufficient_statistics_reverse.size(); index++) { previous_regime_sufficient_statistics_without_right_sufficient_statistics_reverse[index] -= m_right_sufficient_statistics_reverse[j][index]; } double previous_regime_log_likelihood_without_right_sufficient_statistics_reverse = m_pm_ptr->calculate_log_likelihood(j, previous_regime_sufficient_statistics_without_right_sufficient_statistics_reverse); vector< double > regime_sufficient_stats = m_particle.get_sufficient_statistics(j, regime); // calculate the sufficient statistics for the regime if the right sufficient statistics are added to it vector< double > regime_sufficient_stats_with_right_sufficient_statistics_reverse = m_particle.get_sufficient_statistics(j, regime); for (unsigned int index = 0; index < regime_sufficient_stats.size(); index++) { regime_sufficient_stats_with_right_sufficient_statistics_reverse[index] += m_right_sufficient_statistics_reverse[j][index]; } double regime_log_likelihood_with_right_sufficient_statistics_reverse = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics_reverse); m_log_B_reverse[j][regime] = regime_log_likelihood_with_right_sufficient_statistics_reverse + previous_regime_log_likelihood_without_right_sufficient_statistics_reverse - m_particle.get_regime_log_likelihood(j, regime) - previous_regime_log_likelihood; m_log_likelihoods_with_right_sufficient_statistics_reverse[j][regime] = regime_log_likelihood_with_right_sufficient_statistics_reverse; } } else { if (regime == previous_regime) { m_log_B_reverse[j][regime] = 0; m_log_likelihoods_with_right_sufficient_statistics_reverse[j][regime] = previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; } else { if (regime == actual_regime) { vector< double > regime_sufficient_stats = m_particle.get_sufficient_statistics(j, regime); // calculate the sufficient statistics for the regime if the right sufficient statistics are added to it vector< double > regime_sufficient_stats_without_right_sufficient_statistics = m_particle.get_sufficient_statistics(j, regime); for (unsigned int index = 0; index < regime_sufficient_stats.size(); index++) { regime_sufficient_stats_without_right_sufficient_statistics[index] -= m_right_sufficient_statistics_reverse[j][index]; } m_log_B_reverse[j][regime] = m_particle.get_regime_log_likelihood(j, regime) + previous_regime_log_likelihood - m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_without_right_sufficient_statistics) - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; } else { vector< double > regime_sufficient_stats = m_particle.get_sufficient_statistics(j, regime); // calculate the sufficient statistics for the regime if the right sufficient statistics are added to it vector< double > regime_sufficient_stats_with_right_sufficient_statistics_reverse = m_particle.get_sufficient_statistics(j, regime); for (unsigned int index = 0; index < regime_sufficient_stats.size(); index++) { regime_sufficient_stats_with_right_sufficient_statistics_reverse[index] += m_right_sufficient_statistics_reverse[j][index]; } double regime_log_likelihood_with_right_sufficient_statistics_reverse = m_pm_ptr->calculate_log_likelihood(j, regime_sufficient_stats_with_right_sufficient_statistics_reverse); m_log_B_reverse[j][regime] = regime_log_likelihood_with_right_sufficient_statistics_reverse + previous_regime_log_likelihood - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse - m_particle.get_regime_log_likelihood(j, regime); m_log_likelihoods_with_right_sufficient_statistics_reverse[j][regime] = regime_log_likelihood_with_right_sufficient_statistics_reverse; } } } } // also propose adding a new regime if (previous_regime == actual_regime) { // calculate the likelihood for the previous regime with right interval removed vector< double > previous_regime_sufficient_statistics_without_right_sufficient_statistics_reverse = previous_regime_sufficient_stats; for (unsigned int index = 0; index < previous_regime_sufficient_statistics_without_right_sufficient_statistics_reverse.size(); index++) { previous_regime_sufficient_statistics_without_right_sufficient_statistics_reverse[index] -= m_right_sufficient_statistics_reverse[j][index]; } double previous_regime_log_likelihood_without_right_sufficient_statistics = m_pm_ptr->calculate_log_likelihood(j, previous_regime_sufficient_statistics_without_right_sufficient_statistics_reverse); double regime_log_likelihood_with_right_sufficient_statistics_reverse = m_pm_ptr->calculate_log_likelihood(j, m_right_sufficient_statistics_reverse[j]); m_log_B_reverse[j][number_of_regimes] = previous_regime_log_likelihood_without_right_sufficient_statistics + regime_log_likelihood_with_right_sufficient_statistics_reverse - previous_regime_log_likelihood; m_log_likelihoods_with_right_sufficient_statistics_reverse[j][number_of_regimes] = regime_log_likelihood_with_right_sufficient_statistics_reverse; } else { double regime_log_likelihood_with_right_sufficient_statistics_reverse = m_pm_ptr->calculate_log_likelihood(j, m_right_sufficient_statistics_reverse[j]); m_log_B_reverse[j][number_of_regimes] = previous_regime_log_likelihood + regime_log_likelihood_with_right_sufficient_statistics_reverse - previous_regime_log_likelihood_with_right_sufficient_statistics_reverse; m_log_likelihoods_with_right_sufficient_statistics_reverse[j][number_of_regimes] = regime_log_likelihood_with_right_sufficient_statistics_reverse; } if (m_particle.is_changepoint_index_separator_index(m_h)) { if (m_particle.is_changepoint_index_separator_index(m_h + 1) || m_h == m_dimension - 1) { m_log_q_reverse[j][number_of_regimes] = m_particle.log_resampling_separator_changepoint_prior_ratio(j, true, true, m_removing_unobserved_regimes[j], actual_regime); } else { unsigned int following_regime = m_particle.get_previous_regime(m_h + 2, j); m_log_q_reverse[j][number_of_regimes] = m_particle.log_resampling_separator_changepoint_prior_ratio(j, false, true, m_removing_unobserved_regimes[j], actual_regime, following_regime); for (unsigned int regime = 0; regime < number_of_regimes; regime++) { m_log_q_reverse[j][regime] = m_particle.log_resampling_separator_changepoint_prior_ratio(j, false, false, m_removing_unobserved_regimes[j], actual_regime, following_regime, regime); } } } else { // for each regime calculate marked vector prior ratio if (m_h == m_dimension - 1 || m_particle.is_changepoint_index_separator_index(m_h + 1)) { for (unsigned int regime = 0; regime < number_of_regimes; regime++) { m_log_q_reverse[j][regime] = m_particle.full_log_I_prior_remove_ratio(m_h, j, previous_regime, regime, false, actual_regime, m_removing_unobserved_regimes[j]); } m_log_q_reverse[j][number_of_regimes] = m_particle.full_log_I_prior_remove_ratio(m_h, j, previous_regime, static_cast< unsigned int >(number_of_regimes), true, actual_regime, m_removing_unobserved_regimes[j]); } else { unsigned int subsequent_regime = m_particle.get_previous_regime(m_h + 2, j); for (unsigned int regime = 0; regime < number_of_regimes; regime++) { m_log_q_reverse[j][regime] = m_particle.full_log_I_prior_remove_ratio(m_h, j, previous_regime, regime, false, actual_regime, m_removing_unobserved_regimes[j], subsequent_regime); } m_log_q_reverse[j][number_of_regimes] = m_particle.full_log_I_prior_remove_ratio(m_h, j, previous_regime, static_cast< unsigned int >(number_of_regimes), true, actual_regime, m_removing_unobserved_regimes[j], subsequent_regime); } } // set m_log_Bq = m_log_B + m_log_q m_log_Bq_reverse.push_back(vector< double >(number_of_regimes + 1, 0)); for (unsigned int regime = 0; regime < number_of_regimes + 1; regime++){ m_log_Bq_reverse[j][regime] = m_log_B_reverse[j][regime] + m_log_q_reverse[j][regime]; } // check if regime number_of_regimes - 1 is unobserved. If removing tau_h reduces the number of regimes by 1, this will already be accounted for if (m_particle.is_regime_unobserved(j, number_of_regimes - 1)) { m_log_Bq_reverse[j][number_of_regimes - 1] = -1e300; } // initialise m_log_Bq_reverse_descending_order and then calculate the ordering of the elements of m_log_Bq so that we can use fancy log and exp tricks to calculate the sum of the log Bq values. m_log_Bq_reverse_descending_order.push_back(vector< unsigned int >(number_of_regimes + 1, 0)); for (unsigned int i = 1; i < number_of_regimes + 1; i++) { m_log_Bq_reverse_descending_order[j][i] = i; } calculate_vector_descending_order(number_of_regimes + 1, m_log_Bq_reverse[j], m_log_Bq_reverse_descending_order[j]); // calculate log of sum Bq double largest_Bq_reverse = m_log_Bq_reverse[j][m_log_Bq_reverse_descending_order[j][0]]; double temp_sum = 1; for (unsigned int index = 1; index < number_of_regimes + 1; index++){ temp_sum += exp(m_log_Bq_reverse[j][m_log_Bq_reverse_descending_order[j][index]] - largest_Bq_reverse); } m_log_of_sum_Bq_reverse[j] = largest_Bq_reverse + log(temp_sum); } } void rj::altering_unobserved_regimes_setup() { m_log_acceptance_prob = 0; m_log_k_prior_ratio = 0; m_log_regimes_prior_ratio = 0; m_log_full_I_prior_ratio = 0; m_altering_unobserved_regimes = vector< int >(m_number_of_processes, 0); double alpha, beta, u, log_acceptance_prob, log_full_I_prior_ratio, log_regimes_prior_ratio; for (unsigned int process = 0; process < m_number_of_processes; process++) { if (m_particle.get_number_of_unobserved_regimes()[process] > 0) { alpha = 1.0 / 2.0, beta = 1.0; } else { alpha = 1.0, beta = 1.0; } u = gsl_ran_flat(r, 0, 1); double r_j = static_cast< double >(m_particle.get_number_of_regimes(process)); double r_tilde_j = static_cast< double >(m_particle.get_number_of_unobserved_regimes()[process]); if (u < alpha) { log_acceptance_prob = log(0.5) - log(alpha) + log(r_j) - log(r_tilde_j + 1.0); log_full_I_prior_ratio = m_particle.calculate_and_get_add_unobserved_regimes_full_I_prior_ratio(process); log_regimes_prior_ratio = log(1.0 - m_particle.get_rho()); log_acceptance_prob += log_full_I_prior_ratio + log_regimes_prior_ratio; if (log_acceptance_prob > 0 || (log(gsl_ran_flat(r, 0, 1)) < log_acceptance_prob)) { m_altering_unobserved_regimes[process] = 1; m_log_full_I_prior_ratio += log_full_I_prior_ratio; m_log_regimes_prior_ratio += log_regimes_prior_ratio; } } else if (u < beta) { log_acceptance_prob = log(0.5) - log(beta - alpha) + (m_particle.get_number_of_unobserved_regimes()[process] == 1 ? log(2.0) : 0.0) + log(r_tilde_j) - log(r_j - 1.0); log_full_I_prior_ratio = m_particle.calculate_and_get_remove_unobserved_regimes_full_I_prior_ratio(process); log_regimes_prior_ratio = -log(1 - m_particle.get_rho()); log_acceptance_prob += log_full_I_prior_ratio + log_regimes_prior_ratio; if (log_acceptance_prob > 0 || (log(gsl_ran_flat(r, 0, 1)) < log_acceptance_prob)) { m_altering_unobserved_regimes[process] = -1; m_log_full_I_prior_ratio += log_full_I_prior_ratio; m_log_regimes_prior_ratio += log_regimes_prior_ratio; } } } } void rj::full_acceptance_procedure(const double & u1){ m_particle.increase_log_k_prior(m_log_k_prior_ratio); if (u1 < m_b_k) { // choose the new regimes for this new changepoint vector< unsigned int > new_regimes(m_number_of_processes, 0); double temp_sum, regime_chooser; unsigned int index; bool chosen; for (unsigned int j = 0; j < m_number_of_processes; j++) { temp_sum = 0; regime_chooser = log(gsl_ran_flat(r, 0, 1)) + m_log_of_sum_Bq[j]; index = 0; chosen = false; do { temp_sum += exp(m_log_Bq[j][m_log_Bq_descending_order[j][index]] - m_log_Bq[j][m_log_Bq_descending_order[j][0]]); chosen = regime_chooser <= m_log_Bq[j][m_log_Bq_descending_order[j][0]] + log(temp_sum); index++; } while(!chosen); new_regimes[j] = m_log_Bq_descending_order[j][index - 1]; m_particle.increase_log_likelihood(m_log_B[j][new_regimes[j]]); bool adding_new_regime = new_regimes[j] == m_log_q[j].size() - 1; m_particle.increase_log_full_I_prior(m_log_q[j][new_regimes[j]], adding_new_regime, j); // if a new regime is added, the (1-rho) in m_log_q[j][new_regimes[j]] will be added to m_log_regime_prior and taken away from m_log_full_I_prior } vector< double > right_number_of_observations = vector< double >(m_number_of_processes); m_pm_ptr->get_number_of_observations(m_right_sufficient_statistics, right_number_of_observations); m_particle.add_full_changepoint(m_particle.get_add_changepoint_index(), m_adding_changepoint, new_regimes, m_right_sufficient_statistics, m_log_likelihoods_with_right_sufficient_statistics, m_previous_log_likelihoods_without_right_sufficient_statistics, right_number_of_observations); } else if (u1 < m_d_k) { for (unsigned int j = 0; j < m_number_of_processes; j++) { unsigned int actual_regime = m_particle.get_previous_regime(m_h + 1, j); m_particle.increase_log_likelihood(-m_log_B_reverse[j][actual_regime]); m_particle.decrease_log_full_I_prior(m_log_q_reverse[j][actual_regime], m_removing_unobserved_regimes[j], false, j); } vector< double > right_number_of_observations_reverse = vector< double >(m_number_of_processes); m_pm_ptr->get_number_of_observations(m_right_sufficient_statistics_reverse, right_number_of_observations_reverse); m_particle.remove_full_changepoint(m_h, m_right_sufficient_statistics_reverse, m_actual_log_likelihoods_without_right_sufficient_statistics_reverse, m_previous_log_likelihoods_with_right_sufficient_statistics_reverse, right_number_of_observations_reverse, m_removing_unobserved_regimes); } else if (u1 < m_m_k) { // choose the new regimes for this new changepoint vector< unsigned int > new_regimes(m_number_of_processes, 0); double temp_sum, regime_chooser; unsigned int index; bool chosen; for (unsigned int j = 0; j < m_number_of_processes; j++) { temp_sum = 0; regime_chooser = log(gsl_ran_flat(r, 0, 1)) + m_log_of_sum_Bq[j]; index = 0; chosen = false; do { temp_sum += exp(m_log_Bq[j][m_log_Bq_descending_order[j][index]] - m_log_Bq[j][m_log_Bq_descending_order[j][0]]); chosen = regime_chooser <= m_log_Bq[j][m_log_Bq_descending_order[j][0]] + log(temp_sum); index++; } while(!chosen); new_regimes[j] = m_log_Bq_descending_order[j][index - 1]; unsigned int actual_regime = m_particle.get_previous_regime(m_h + 1, j); m_particle.increase_log_likelihood(m_log_B[j][new_regimes[j]] - m_log_B_reverse[j][actual_regime]); bool adding_new_regime = new_regimes[j] == m_log_q[j].size() - 1; m_particle.decrease_log_full_I_prior(-m_log_q[j][new_regimes[j]] + m_log_q_reverse[j][actual_regime], m_removing_unobserved_regimes[j], adding_new_regime, j); // if a new regime is added, the (1-rho) in m_log_q[j][new_regimes[j]] will be added to m_log_regime_prior and taken away from m_log_full_I_prior } vector< double > right_number_of_observations = vector< double >(m_number_of_processes); m_pm_ptr->get_number_of_observations(m_right_sufficient_statistics, right_number_of_observations); vector< double > right_number_of_observations_reverse = vector< double >(m_number_of_processes); m_pm_ptr->get_number_of_observations(m_right_sufficient_statistics_reverse, right_number_of_observations_reverse); vector< double > middle_number_of_observations = vector< double >(m_number_of_processes); m_pm_ptr->get_number_of_observations(m_middle_sufficient_statistics, middle_number_of_observations); m_particle.move_full_changepoint(m_h, m_adding_changepoint, new_regimes, m_tau_h_greater_than_tau_h_prime, m_right_sufficient_statistics, right_number_of_observations, m_right_sufficient_statistics_reverse, right_number_of_observations_reverse, m_middle_sufficient_statistics, middle_number_of_observations,m_previous_log_likelihoods_without_right_sufficient_statistics, m_previous_log_likelihoods_with_right_sufficient_statistics_reverse, m_previous_log_likelihoods_with_middle_sufficient_statistics, m_previous_log_likelihoods_without_middle_sufficient_statistics, m_log_likelihoods_with_right_sufficient_statistics, m_actual_log_likelihoods_with_middle_sufficient_statistics, m_actual_log_likelihoods_without_middle_sufficient_statistics, m_actual_log_likelihoods_without_right_sufficient_statistics_reverse, m_removing_unobserved_regimes); } else if (u1 < m_r_k){ // choose the new regimes for this new changepoint vector< unsigned int > new_regimes(m_number_of_processes, 0); double temp_sum, regime_chooser; unsigned int index; bool chosen; for (unsigned int j = 0; j < m_number_of_processes; j++) { temp_sum = 0; regime_chooser = log(gsl_ran_flat(r, 0, 1)) + m_log_of_sum_Bq_reverse[j]; index = 0; chosen = false; do { temp_sum += exp(m_log_Bq_reverse[j][m_log_Bq_reverse_descending_order[j][index]] - m_log_Bq_reverse[j][m_log_Bq_reverse_descending_order[j][0]]); chosen = regime_chooser <= m_log_Bq_reverse[j][m_log_Bq_reverse_descending_order[j][0]] + log(temp_sum); index++; } while(!chosen); new_regimes[j] = m_log_Bq_reverse_descending_order[j][index - 1]; unsigned int actual_regime = m_particle.get_previous_regime(m_h + 1, j); m_particle.increase_log_likelihood(m_log_B_reverse[j][new_regimes[j]] - m_log_B_reverse[j][actual_regime]); bool adding_new_regime = new_regimes[j] == m_log_q_reverse[j].size() - 1; m_particle.decrease_log_full_I_prior(-m_log_q_reverse[j][new_regimes[j]] + m_log_q_reverse[j][actual_regime], m_removing_unobserved_regimes[j], adding_new_regime, j); // if a new regime is added, the (1-rho) in m_log_q[j][new_regimes[j]] will be added to m_log_regime_prior and taken away from m_log_full_I_prior } vector< double > right_number_of_observations_reverse = vector< double >(m_number_of_processes); m_pm_ptr->get_number_of_observations(m_right_sufficient_statistics_reverse, right_number_of_observations_reverse); m_particle.resample_full_changepoint(m_h, new_regimes, m_right_sufficient_statistics_reverse, right_number_of_observations_reverse, m_log_likelihoods_with_right_sufficient_statistics_reverse, m_actual_log_likelihoods_without_right_sufficient_statistics_reverse, m_removing_unobserved_regimes); } else if (u1 < m_au_k) { m_particle.increase_log_full_I_prior_unobserved(m_log_full_I_prior_ratio, m_altering_unobserved_regimes); m_particle.increase_log_regimes_prior(m_log_regimes_prior_ratio); m_particle.alter_unobserved_regimes(m_altering_unobserved_regimes, m_number_of_processes); } } void rj::full_recording_procedure() { m_recorded_full_dimensions.push_back(m_particle.get_dimension()); //calculate the effective dimension (i.e. don't count changepoints where none of the processes change regime at that changepoint. m_recorded_full_effective_dimensions.push_back(m_particle.calculate_and_get_full_effective_dimension()); vector< unsigned long int > changepoint_hist = m_particle.calculate_and_get_full_changepoint_histogram(m_number_of_changepoint_bins, m_number_of_processes); size_t size_of_recorded_changepoints = m_recorded_full_changepoints.size(); if (size_of_recorded_changepoints > 0){ //have we started recording changepoint histograms, or is this the first time? for (unsigned long int i = 0; i < size_of_recorded_changepoints; i++){ m_recorded_full_changepoints[i] += changepoint_hist[i]; } } else { m_recorded_full_changepoints = changepoint_hist; } vector< size_t > number_of_regimes(m_number_of_processes, 0); for (unsigned int proc = 0; proc < m_number_of_processes; proc++) { number_of_regimes[proc] = m_particle.get_number_of_regimes(proc); } m_recorded_number_of_regimes.push_back(number_of_regimes); vector< unsigned int > number_of_observed_regimes(m_number_of_processes, 0); for (unsigned int proc = 0; proc < m_number_of_processes; proc++) { number_of_observed_regimes[proc] = m_particle.get_number_of_observed_regimes(proc); } m_recorded_number_of_observed_regimes.push_back(number_of_observed_regimes); if (m_number_of_traces > 0) { if (m_recorded_similarity_matrix.size() > 0) { m_particle.calculate_and_add_similarity_matrices(m_recorded_similarity_matrix, m_number_of_processes); m_particle.calculate_and_add_min_proportion_similarity_matrices(m_recorded_min_proportion_similarity_matrix, m_number_of_processes, m_observations_in_each_trace); m_recorded_similarity_matrices.push_back(vector< vector< double > >(m_number_of_traces, vector< double >(m_number_of_traces, 0.0))); m_recorded_min_proportion_similarity_matrices.push_back(vector< vector< double > >(m_number_of_traces, vector< double >(m_number_of_traces, 0.0))); m_particle.calculate_and_add_similarity_matrices(m_recorded_similarity_matrices.back(), m_number_of_processes); m_particle.calculate_and_add_min_proportion_similarity_matrices(m_recorded_min_proportion_similarity_matrices.back(), m_number_of_processes, m_observations_in_each_trace); } else { m_recorded_similarity_matrix = vector< vector< double > >(m_number_of_traces, vector< double >(m_number_of_traces, 0.0)); m_recorded_min_proportion_similarity_matrix = vector< vector< double > >(m_number_of_traces, vector< double >(m_number_of_traces, 0.0)); m_recorded_similarity_matrices = vector< vector< vector< double > > >(1, vector< vector< double > >(m_number_of_traces, vector< double >(m_number_of_traces, 0.0))); m_recorded_min_proportion_similarity_matrices = vector< vector< vector< double > > >(1, vector< vector< double > >(m_number_of_traces, vector< double >(m_number_of_traces, 0.0))); m_particle.calculate_and_add_similarity_matrices(m_recorded_similarity_matrix, m_number_of_processes); m_particle.calculate_and_add_min_proportion_similarity_matrices(m_recorded_min_proportion_similarity_matrix, m_number_of_processes, m_observations_in_each_trace); m_particle.calculate_and_add_similarity_matrices(m_recorded_similarity_matrices.back(), m_number_of_processes); m_particle.calculate_and_add_min_proportion_similarity_matrices(m_recorded_min_proportion_similarity_matrices.back(), m_number_of_processes, m_observations_in_each_trace); } } if (m_recording_association_matrix) { for (unsigned int process = 0; process < m_number_of_processes; process++) { m_particle.add_to_association_matrix(m_association_matrices[process], process); } } double log_posterior = m_particle.get_full_log_posterior(); m_recorded_full_log_posteriors.push_back(log_posterior); } void rj::update_full_MAP() { double log_posterior = m_particle.get_full_log_posterior(); if (m_full_MAP_log_posterior < log_posterior){ m_full_MAP_particle = m_particle; m_full_MAP_dimension = m_full_MAP_particle.get_dimension(); m_full_MAP_log_posterior = log_posterior; } } void rj::run_full_simulation(){ cout << "starting full changepoint stage" << endl; m_particle.print_likelihood(); cout << "the posterior is " << m_particle.get_full_log_posterior() << endl << endl; // burnin for (unsigned long int iteration = 0; iteration < m_full_burnin; iteration++) { m_dimension = m_particle.get_dimension(); unsigned int trace_index = static_cast< unsigned int >(gsl_rng_uniform_int(r, m_number_of_traces)); unsigned int trace_dimension = m_particle.calculate_trace_dimension(trace_index); if (trace_dimension > 0) { m_b_k = 1.0 / 4.0, m_d_k = 2.0 / 4.0, m_m_k = 2.75 / 4.0, m_r_k = 3.5 / 4.0, m_au_k = 4.0 / 4.0; } else { m_b_k = 1.0; } double u1 = gsl_ran_flat(r, 0, 1); if (u1 < m_b_k) { // birth adding_full_changepoint_setup(trace_index); } else if (u1 < m_d_k) { // death removing_full_changepoint_setup(trace_index); } else if (u1 < m_m_k) { // move moving_full_changepoint_setup(trace_index); } else if (u1 < m_r_k) { // resample marked vector resampling_full_changepoint_setup(trace_index); } else if (u1 < m_au_k) { // add unobserved regimes altering_unobserved_regimes_setup(); } if (m_log_acceptance_prob >= 0 || (log(gsl_ran_flat(r, 0, 1)) < m_log_acceptance_prob)){ full_acceptance_procedure(u1); } //m_particle.check_observations_in_traces(m_end + 1); //check_total_full_log_likelihood(m_particle); //m_particle.check_full_log_posterior(); /*if (iteration % 1000 == 0 && (m_particle.calculate_and_get_full_log_posterior(m_number_of_processes) - m_particle.get_full_log_posterior() > 0.000001 || m_particle.calculate_and_get_full_log_posterior(m_number_of_processes) - m_particle.get_full_log_posterior() < -0.000001)) { cout << iteration << '\t' << m_particle.calculate_and_get_full_log_posterior(m_number_of_processes) - m_particle.get_full_log_posterior() << endl; }*/ } for (unsigned long int iteration = 0; iteration < m_full_iterations; iteration++) { m_dimension = m_particle.get_dimension(); unsigned int trace_index = static_cast< unsigned int >(gsl_rng_uniform_int(r, m_number_of_traces)); unsigned int trace_dimension = m_particle.calculate_trace_dimension(trace_index); if (trace_dimension > 0) { m_b_k = 1.0 / 4.0, m_d_k = 2.0 / 4.0, m_m_k = 2.75 / 4.0, m_r_k = 3.5 / 4.0, m_au_k = 4.0 / 4.0; } else { m_b_k = 1.0; } double u1 = gsl_ran_flat(r, 0, 1); if (u1 < m_b_k) { // birth adding_full_changepoint_setup(trace_index); m_recorded_full_birth_proposals++; } else if (u1 < m_d_k) { // death removing_full_changepoint_setup(trace_index); m_recorded_full_death_proposals++; } else if (u1 < m_m_k) { // move moving_full_changepoint_setup(trace_index); m_recorded_full_move_proposals++; } else if (u1 < m_r_k) { // resample marked vector resampling_full_changepoint_setup(trace_index); m_recorded_full_resample_proposals++; } else if (u1 < m_au_k) { // add unobserved regimes altering_unobserved_regimes_setup(); m_recorded_full_unobserveds_proposals++; } if (m_log_acceptance_prob > 0 || (log(gsl_ran_flat(r, 0, 1)) < m_log_acceptance_prob)){ full_acceptance_procedure(u1); // check if this alters the best-seen set of change points and regimes update_full_MAP(); if (u1 < m_b_k) { // birth m_recorded_full_birth_acceptances++; } else if (u1 < m_d_k) { // death m_recorded_full_death_acceptances++; } else if (u1 < m_m_k) { // move m_recorded_full_move_acceptances++; } else if (u1 < m_r_k) { // resample marked vector m_recorded_full_resample_acceptances++; } else if (u1 < m_au_k) { // add unobserved regimes m_recorded_full_unobserveds_acceptances++; } } //check_total_full_log_likelihood(m_particle); //m_particle.check_full_log_posterior(); if (m_recording_full_samples && (iteration % m_full_thinning == 0)) { //store sample full_recording_procedure(); /* CODE IF YOU WANT TO REGULARLY WRITE OUTPUT TO FILE if (CONDITION ON ITERATION) { string MAP_cps_Filename = m_data_file + "_full_MAP_CPs.txt"; rjobject.write_full_MAP_changepoints_to_file(MAP_cps_Filename); string dimension_distribution_Filename = m_data_file + "_full_dimension_distribution.txt"; rjobject.write_full_dimension_distribution_to_file(dimension_distribution_Filename); string changepoints_distribution_Filename = m_data_file + "_full_changepoints_distribution.txt"; rjobject.write_full_changepoints_distribution_to_file(changepoints_distribution_Filename, iteration); string log_posterior_trace_Filename = m_data_file + "_full_log_posterior_trace.txt"; rjobject.write_full_log_posterior_trace_to_file(log_posterior_trace_Filename); }*/ } /*if (m_particle.calculate_and_get_full_log_posterior(m_number_of_processes) - m_particle.get_full_log_posterior() > 0.000001 || m_particle.calculate_and_get_full_log_posterior(m_number_of_processes) - m_particle.get_full_log_posterior() < -0.000001) { cout << iteration << '\t' << m_particle.calculate_and_get_full_log_posterior(m_number_of_processes) - m_particle.get_full_log_posterior() << endl; }*/ } cout << "ending full changepoint stage" << endl; m_particle.print_likelihood(); cout << "the posterior is " << m_particle.get_full_log_posterior() << endl; } /*void rj::add_unobserved_regimes_setup() { m_adding_unobserved_regimes = vector< bool >(m_number_of_processes, false); for (unsigned int process = 0; process < m_number_of_processes; process++) { m_adding_unobserved_regimes[process] = gsl_ran_flat(r, 0, 1) > 0.5; } m_log_k_prior_ratio = 0; m_log_full_I_prior_ratio = m_particle.calculate_and_get_add_unobserved_regimes_full_I_prior_ratio(m_adding_unobserved_regimes, m_number_of_processes); m_log_regimes_prior_ratio = m_particle.calculate_and_get_add_unobserved_regimes_regimes_prior_ratio(m_adding_unobserved_regimes, m_number_of_processes); m_log_acceptance_prob = m_log_full_I_prior_ratio + m_log_regimes_prior_ratio + m_particle.calculate_and_get_add_unobserved_regimes_proposal_ratio(m_adding_unobserved_regimes, m_number_of_processes); } void rj::remove_unobserved_regimes_setup() { vector< bool > any_unobserved_regimes = m_particle.any_unobserved_regimes(); m_removing_unobserved_regimes = vector< bool >(m_number_of_processes, false); for (unsigned int process = 0; process < m_number_of_processes; process++) { if (any_unobserved_regimes[process]) { m_removing_unobserved_regimes[process] = gsl_ran_flat(r, 0, 1) > 0.5; } } m_log_k_prior_ratio = 0; m_log_full_I_prior_ratio = m_particle.calculate_and_get_remove_unobserved_regimes_full_I_prior_ratio(m_removing_unobserved_regimes, m_number_of_processes); m_log_regimes_prior_ratio = m_particle.calculate_and_get_remove_unobserved_regimes_regimes_prior_ratio(m_removing_unobserved_regimes, m_number_of_processes); m_log_acceptance_prob = m_log_full_I_prior_ratio + m_log_regimes_prior_ratio + m_particle.calculate_and_get_remove_unobserved_regimes_proposal_ratio(m_removing_unobserved_regimes, m_number_of_processes); }*/ /*else if (u1 < m_au_k) { m_particle.increase_log_full_I_prior_unobserved(m_log_full_I_prior_ratio, m_adding_unobserved_regimes, true); m_particle.increase_log_regimes_prior(m_log_regimes_prior_ratio); m_particle.add_unobserved_regimes(m_adding_unobserved_regimes, m_number_of_processes); } else if (u1 < m_ru_k) { m_particle.increase_log_full_I_prior_unobserved(m_log_full_I_prior_ratio, m_removing_unobserved_regimes, false); m_particle.increase_log_regimes_prior(m_log_regimes_prior_ratio); m_particle.remove_unobserved_regimes(m_removing_unobserved_regimes, m_number_of_processes); }*/ /*else if (u1 < m_au_k) { for (unsigned int process = 0; process < m_number_of_processes; process++) { if (m_adding_unobserved_regimes[process]) { full_log_acceptance_probability += log(1 - m_particle.get_rho()); size_t number_of_regimes = m_particle.get_number_of_regimes(process); double r_j = static_cast< double >(number_of_regimes), delta = m_particle.get_dirichlet_alpha(); for (unsigned int regime = 0; regime < number_of_regimes; regime++) { double k_j_s = static_cast< double >(m_particle.get_number_of_right_transitions(process, regime)); full_log_acceptance_probability += gsl_sf_lngamma(r_j * delta + delta) + gsl_sf_lngamma(r_j * delta + k_j_s) - gsl_sf_lngamma(r_j * delta) - gsl_sf_lngamma(r_j * delta + delta + k_j_s); } double r_tilde_j = static_cast< double >(m_particle.get_number_of_unobserved_regimes()[process]); full_log_acceptance_probability += log(r_j) - log(r_tilde_j + 1); } } } else if (u1 < m_ru_k) { for (unsigned int process = 0; process < m_number_of_processes; process++) { if (m_removing_unobserved_regimes[process]) { full_log_acceptance_probability -= log(1 - m_particle.get_rho()); size_t number_of_regimes = m_particle.get_number_of_regimes(process); double r_j = static_cast< double >(number_of_regimes), delta = m_particle.get_dirichlet_alpha(); for (unsigned int regime = 0; regime < number_of_regimes; regime++) { double k_j_s = static_cast< double >(m_particle.get_number_of_right_transitions(process, regime)); full_log_acceptance_probability += gsl_sf_lngamma(r_j * delta - delta) + gsl_sf_lngamma(r_j * delta + k_j_s) - gsl_sf_lngamma(r_j * delta) - gsl_sf_lngamma(r_j * delta - delta + k_j_s); } double r_tilde_j = static_cast< double >(m_particle.get_number_of_unobserved_regimes()[process]); full_log_acceptance_probability += log(r_tilde_j) - log(r_j - 1); } */ #endif
{ "alphanum_fraction": 0.7295249841, "avg_line_length": 68.7592076756, "ext": "h", "hexsha": "d9f3aa8008a7879410be73e7986951a27e3989cd", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "759724c67f12db9155f83dd7dd8f63d526ecbd79", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "AlexanderDBolton/Regimes_RJMCMC", "max_forks_repo_path": "Code/basic_RJMCMC.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "759724c67f12db9155f83dd7dd8f63d526ecbd79", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "AlexanderDBolton/Regimes_RJMCMC", "max_issues_repo_path": "Code/basic_RJMCMC.h", "max_line_length": 1031, "max_stars_count": null, "max_stars_repo_head_hexsha": "759724c67f12db9155f83dd7dd8f63d526ecbd79", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "AlexanderDBolton/Regimes_RJMCMC", "max_stars_repo_path": "Code/basic_RJMCMC.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 52776, "size": 222161 }
#include "example.h" #include <stdio.h> #include <gsl/gsl_sf_bessel.h> int print_gsl_sf_bessel_J0 (double x) { double y = gsl_sf_bessel_J0 (x); printf ("J0(%g) = %.18e\n", x, y); return 0; }
{ "alphanum_fraction": 0.6565656566, "avg_line_length": 16.5, "ext": "c", "hexsha": "6f2cda51f7e8302ab68c674f165db37779007c4f", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "940e8c78eb9cf2eda1bc69427a6279b8b832bfe1", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Dwoosh/cBinder", "max_forks_repo_path": "tests/simplecases/externaldeps/sources/gsl_dependent/example.c", "max_issues_count": 33, "max_issues_repo_head_hexsha": "940e8c78eb9cf2eda1bc69427a6279b8b832bfe1", "max_issues_repo_issues_event_max_datetime": "2020-01-18T11:12:46.000Z", "max_issues_repo_issues_event_min_datetime": "2019-10-24T09:09:05.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Tetrite/cBinder", "max_issues_repo_path": "tests/simplecases/externaldeps/sources/gsl_dependent/example.c", "max_line_length": 36, "max_stars_count": 3, "max_stars_repo_head_hexsha": "940e8c78eb9cf2eda1bc69427a6279b8b832bfe1", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Dwoosh/cBinder", "max_stars_repo_path": "tests/simplecases/externaldeps/sources/gsl_dependent/example.c", "max_stars_repo_stars_event_max_datetime": "2021-05-22T21:18:47.000Z", "max_stars_repo_stars_event_min_datetime": "2019-11-22T20:05:49.000Z", "num_tokens": 73, "size": 198 }
// This file is part of playd. // playd is licensed under the MIT licence: see LICENSE.txt. /** * @file * The Ring_buffer class. */ #ifndef PLAYD_RING_BUFFER_HPP #define PLAYD_RING_BUFFER_HPP #include <atomic> #include <mutex> #include <vector> #undef max #include <gsl/gsl> namespace Playd::Audio { /** * A concurrent ring buffer. * * This is not particularly efficient, but does the job for playd. * It uses two release-acquire-atomic counters to store read and write. */ class RingBuffer { public: /** * Constructs a Ring_buffer. * @param capacity The capacity of the ring buffer, in bytes. */ explicit RingBuffer(size_t capacity); /// Destructs a Ring_buffer. ~RingBuffer() = default; /// Deleted copy constructor. RingBuffer(const RingBuffer &) = delete; /// Deleted copy-assignment. RingBuffer &operator=(const RingBuffer &) = delete; /** * The current write capacity. * @return The number of samples this ring buffer has space to store. * @see Write */ size_t WriteCapacity() const; /** * The current read capacity. * @return The number of samples available in this ring buffer. * @see Read */ size_t ReadCapacity() const; /** * Writes samples from a span into the ring buffer. * * * Precondition: @a src is a valid span. * * Postcondition: The ringbuffer has been written to with the contents * of the first WriteCapacity() bytes of @a src. * * @param src The span of bytes to write into the ring buffer. * @return The number of bytes written. * @see WriteCapacity */ size_t Write(gsl::span<const std::byte> src); /** * Reads samples from the ring buffer into an array. * To read one sample, pass a count of 1 and take a pointer to the * sample variable. * * * Precondition: @a dest is a valid span. * * Postcondition: The first ReadCapacity() bytes of @a dest have been * filled with the appropriate number of bytes from the front of the * ring buffer. * * @param dest The span of bytes to fill with bytes read from the ring * buffer. * @return The number of bytes read. * @see ReadCapacity */ size_t Read(gsl::span<std::byte> dest); /// Empties the ring buffer. void Flush(); private: /// Empties the ring buffer without acquiring locks. void FlushInner(); std::vector<std::byte> buffer; ///< The array used by the ringbuffer. std::vector<std::byte>::const_iterator r_it; ///< The read iterator. std::vector<std::byte>::iterator w_it; ///< The write iterator. std::atomic<size_t> count; ///< The current read capacity. // Write capacity is the total buffer capacity minus count. std::mutex r_lock; ///< The read lock. std::mutex w_lock; ///< The write lock. }; } // namespace Playd::Audio #endif // PLAYD_RINGBUFFER_HPP
{ "alphanum_fraction": 0.6870944484, "avg_line_length": 24.990990991, "ext": "h", "hexsha": "422ee667e807b6aca297a7bd3ee26de577d88e44", "lang": "C", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2017-02-23T12:41:21.000Z", "max_forks_repo_forks_event_min_datetime": "2016-09-04T23:04:46.000Z", "max_forks_repo_head_hexsha": "dc072e4934acb21b9dddb225818732bd27671ae0", "max_forks_repo_licenses": [ "BSL-1.0", "MIT" ], "max_forks_repo_name": "UniversityRadioYork/ury-playd", "max_forks_repo_path": "src/audio/ringbuffer.h", "max_issues_count": 56, "max_issues_repo_head_hexsha": "dc072e4934acb21b9dddb225818732bd27671ae0", "max_issues_repo_issues_event_max_datetime": "2020-01-28T13:48:14.000Z", "max_issues_repo_issues_event_min_datetime": "2015-01-01T19:22:34.000Z", "max_issues_repo_licenses": [ "BSL-1.0", "MIT" ], "max_issues_repo_name": "UniversityRadioYork/ury-playd", "max_issues_repo_path": "src/audio/ringbuffer.h", "max_line_length": 73, "max_stars_count": 5, "max_stars_repo_head_hexsha": "dc072e4934acb21b9dddb225818732bd27671ae0", "max_stars_repo_licenses": [ "BSL-1.0", "MIT" ], "max_stars_repo_name": "UniversityRadioYork/ury-playd", "max_stars_repo_path": "src/audio/ringbuffer.h", "max_stars_repo_stars_event_max_datetime": "2017-08-19T20:01:51.000Z", "max_stars_repo_stars_event_min_datetime": "2015-02-11T20:33:45.000Z", "num_tokens": 702, "size": 2774 }
#ifndef BIO_GSL_H_ #define BIO_GSL_H_ #include "bio/defs.h" #include "bio/raii.h" #include <gsl/gsl_integration.h> BIO_NS_START void gsl_init(); template <> RAII<gsl_integration_workspace *>::~RAII(); #define BIO_GSL_EXP(x) ((x) < GSL_LOG_DBL_MIN ? 0.0 : gsl_sf_exp(x)) BIO_NS_END #endif //BIO_GSL_H_
{ "alphanum_fraction": 0.7211538462, "avg_line_length": 12.48, "ext": "h", "hexsha": "5c4d82136d375d8142b8785c1e77faa26b4ff2b4", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1eeb714ba5b53f2ecf776d865d32e2078cbc0338", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "JohnReid/biopsy", "max_forks_repo_path": "C++/include/bio/gsl.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "1eeb714ba5b53f2ecf776d865d32e2078cbc0338", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "JohnReid/biopsy", "max_issues_repo_path": "C++/include/bio/gsl.h", "max_line_length": 68, "max_stars_count": null, "max_stars_repo_head_hexsha": "1eeb714ba5b53f2ecf776d865d32e2078cbc0338", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "JohnReid/biopsy", "max_stars_repo_path": "C++/include/bio/gsl.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 102, "size": 312 }
/* linalg/choleskyc.c * * Copyright (C) 2007 Patrick Alken * Copyright (C) 2010 Huan Wu (gsl_linalg_complex_cholesky_invert) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <config.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_linalg.h> #include <gsl/gsl_math.h> #include <gsl/gsl_complex.h> #include <gsl/gsl_complex_math.h> #include <gsl/gsl_blas.h> #include <gsl/gsl_errno.h> /* * This module contains routines related to the Cholesky decomposition * of a complex Hermitian positive definite matrix. */ static void cholesky_complex_conj_vector(gsl_vector_complex *v); /* gsl_linalg_complex_cholesky_decomp() Perform the Cholesky decomposition on a Hermitian positive definite matrix. See Golub & Van Loan, "Matrix Computations" (3rd ed), algorithm 4.2.2. Inputs: A - (input/output) complex postive definite matrix Return: success or error The lower triangle of A is overwritten with the Cholesky decomposition */ int gsl_linalg_complex_cholesky_decomp(gsl_matrix_complex *A) { const size_t N = A->size1; if (N != A->size2) { GSL_ERROR("cholesky decomposition requires square matrix", GSL_ENOTSQR); } else { size_t i, j; gsl_complex z; double ajj; for (j = 0; j < N; ++j) { z = gsl_matrix_complex_get(A, j, j); ajj = GSL_REAL(z); if (j > 0) { gsl_vector_complex_const_view aj = gsl_matrix_complex_const_subrow(A, j, 0, j); gsl_blas_zdotc(&aj.vector, &aj.vector, &z); ajj -= GSL_REAL(z); } if (ajj <= 0.0) { GSL_ERROR("matrix is not positive definite", GSL_EDOM); } ajj = sqrt(ajj); GSL_SET_COMPLEX(&z, ajj, 0.0); gsl_matrix_complex_set(A, j, j, z); if (j < N - 1) { gsl_vector_complex_view av = gsl_matrix_complex_subcolumn(A, j, j + 1, N - j - 1); if (j > 0) { gsl_vector_complex_view aj = gsl_matrix_complex_subrow(A, j, 0, j); gsl_matrix_complex_view am = gsl_matrix_complex_submatrix(A, j + 1, 0, N - j - 1, j); cholesky_complex_conj_vector(&aj.vector); gsl_blas_zgemv(CblasNoTrans, GSL_COMPLEX_NEGONE, &am.matrix, &aj.vector, GSL_COMPLEX_ONE, &av.vector); cholesky_complex_conj_vector(&aj.vector); } gsl_blas_zdscal(1.0 / ajj, &av.vector); } } /* Now store L^H in upper triangle */ for (i = 1; i < N; ++i) { for (j = 0; j < i; ++j) { z = gsl_matrix_complex_get(A, i, j); gsl_matrix_complex_set(A, j, i, gsl_complex_conjugate(z)); } } return GSL_SUCCESS; } } /* gsl_linalg_complex_cholesky_decomp() */ /* gsl_linalg_complex_cholesky_solve() Solve A x = b where A is in cholesky form */ int gsl_linalg_complex_cholesky_solve (const gsl_matrix_complex * cholesky, const gsl_vector_complex * b, gsl_vector_complex * x) { if (cholesky->size1 != cholesky->size2) { GSL_ERROR ("cholesky matrix must be square", GSL_ENOTSQR); } else if (cholesky->size1 != b->size) { GSL_ERROR ("matrix size must match b size", GSL_EBADLEN); } else if (cholesky->size2 != x->size) { GSL_ERROR ("matrix size must match solution size", GSL_EBADLEN); } else { gsl_vector_complex_memcpy (x, b); /* solve for y using forward-substitution, L y = b */ gsl_blas_ztrsv (CblasLower, CblasNoTrans, CblasNonUnit, cholesky, x); /* perform back-substitution, L^H x = y */ gsl_blas_ztrsv (CblasLower, CblasConjTrans, CblasNonUnit, cholesky, x); return GSL_SUCCESS; } } /* gsl_linalg_complex_cholesky_solve() */ /* gsl_linalg_complex_cholesky_svx() Solve A x = b in place where A is in cholesky form */ int gsl_linalg_complex_cholesky_svx (const gsl_matrix_complex * cholesky, gsl_vector_complex * x) { if (cholesky->size1 != cholesky->size2) { GSL_ERROR ("cholesky matrix must be square", GSL_ENOTSQR); } else if (cholesky->size2 != x->size) { GSL_ERROR ("matrix size must match solution size", GSL_EBADLEN); } else { /* solve for y using forward-substitution, L y = b */ gsl_blas_ztrsv (CblasLower, CblasNoTrans, CblasNonUnit, cholesky, x); /* perform back-substitution, L^H x = y */ gsl_blas_ztrsv (CblasLower, CblasConjTrans, CblasNonUnit, cholesky, x); return GSL_SUCCESS; } } /* gsl_linalg_complex_cholesky_svx() */ /****************************************************************************** gsl_linalg_complex_cholesky_invert() Compute the inverse of an Hermitian positive definite matrix in Cholesky form. Inputs: LLT - matrix in cholesky form on input A^{-1} = L^{-H} L^{-1} on output Return: success or error ******************************************************************************/ int gsl_linalg_complex_cholesky_invert(gsl_matrix_complex * LLT) { if (LLT->size1 != LLT->size2) { GSL_ERROR ("cholesky matrix must be square", GSL_ENOTSQR); } else { size_t N = LLT->size1; size_t i, j; gsl_vector_complex_view v1; /* invert the lower triangle of LLT */ for (i = 0; i < N; ++i) { double ajj; gsl_complex z; j = N - i - 1; { gsl_complex z0 = gsl_matrix_complex_get(LLT, j, j); ajj = 1.0 / GSL_REAL(z0); } GSL_SET_COMPLEX(&z, ajj, 0.0); gsl_matrix_complex_set(LLT, j, j, z); { gsl_complex z1 = gsl_matrix_complex_get(LLT, j, j); ajj = -GSL_REAL(z1); } if (j < N - 1) { gsl_matrix_complex_view m; m = gsl_matrix_complex_submatrix(LLT, j + 1, j + 1, N - j - 1, N - j - 1); v1 = gsl_matrix_complex_subcolumn(LLT, j, j + 1, N - j - 1); gsl_blas_ztrmv(CblasLower, CblasNoTrans, CblasNonUnit, &m.matrix, &v1.vector); gsl_blas_zdscal(ajj, &v1.vector); } } /* for (i = 0; i < N; ++i) */ /* * The lower triangle of LLT now contains L^{-1}. Now compute * A^{-1} = L^{-H} L^{-1} * * The (ij) element of A^{-1} is column i of conj(L^{-1}) dotted into * column j of L^{-1} */ for (i = 0; i < N; ++i) { gsl_complex sum; for (j = i + 1; j < N; ++j) { gsl_vector_complex_view v2; v1 = gsl_matrix_complex_subcolumn(LLT, i, j, N - j); v2 = gsl_matrix_complex_subcolumn(LLT, j, j, N - j); /* compute Ainv[i,j] = sum_k{conj(Linv[k,i]) * Linv[k,j]} */ gsl_blas_zdotc(&v1.vector, &v2.vector, &sum); /* store in upper triangle */ gsl_matrix_complex_set(LLT, i, j, sum); } /* now compute the diagonal element */ v1 = gsl_matrix_complex_subcolumn(LLT, i, i, N - i); gsl_blas_zdotc(&v1.vector, &v1.vector, &sum); gsl_matrix_complex_set(LLT, i, i, sum); } /* copy the Hermitian upper triangle to the lower triangle */ for (j = 1; j < N; j++) { for (i = 0; i < j; i++) { gsl_complex z = gsl_matrix_complex_get(LLT, i, j); gsl_matrix_complex_set(LLT, j, i, gsl_complex_conjugate(z)); } } return GSL_SUCCESS; } } /* gsl_linalg_complex_cholesky_invert() */ /******************************************** * INTERNAL ROUTINES * ********************************************/ static void cholesky_complex_conj_vector(gsl_vector_complex *v) { size_t i; for (i = 0; i < v->size; ++i) { gsl_complex z = gsl_vector_complex_get(v, i); gsl_vector_complex_set(v, i, gsl_complex_conjugate(z)); } } /* cholesky_complex_conj_vector() */
{ "alphanum_fraction": 0.5488241032, "avg_line_length": 28.5665634675, "ext": "c", "hexsha": "9cff18f5fbc434192ea3a9c40ab6f8ef33d9f7bb", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2015-10-02T01:32:59.000Z", "max_forks_repo_forks_event_min_datetime": "2015-10-02T01:32:59.000Z", "max_forks_repo_head_hexsha": "1b4ee4c146f526ea6e2f4f8607df7e9687204a9e", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "Brian-ning/HMNE", "max_forks_repo_path": "Source/BaselineMethods/MNE/C++/gsl-2.4/linalg/choleskyc.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "1b4ee4c146f526ea6e2f4f8607df7e9687204a9e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "Brian-ning/HMNE", "max_issues_repo_path": "Source/BaselineMethods/MNE/C++/gsl-2.4/linalg/choleskyc.c", "max_line_length": 81, "max_stars_count": 14, "max_stars_repo_head_hexsha": "857b6ee8866a2950aa5721d575d2d7d0797c4302", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "peterahrens/FillEstimationIPDPS2017", "max_stars_repo_path": "gsl-2.4/linalg/choleskyc.c", "max_stars_repo_stars_event_max_datetime": "2021-06-10T11:31:28.000Z", "max_stars_repo_stars_event_min_datetime": "2015-12-18T18:09:25.000Z", "num_tokens": 2465, "size": 9227 }
#pragma once #include <nlopt.h> #include <mex.h> struct mexObjectiveFunction { static double obj_fun(unsigned n, const double *x, double *gradient, void *d_); static void precond_fun(unsigned n, const double *x, const double *v, double *vpre, void *f_data); mxArray *prhs[2]; // feval mexMatlabCall input arguments for objective function evaluation nlopt_opt &opt; mxArray *hessmult_args[3]; // Hv = HessMultFcn(x,v) mxArray *outfun_args[4]; // feval mexMatlabCall input arguments for OutputFun function evaluation mxArray *lasterror; // trapped MException bool stop; // true if user issued a stop mexObjectiveFunction(nlopt_opt & optin, mxArray * mxFun, mxArray * mxHessMultFcn, mxArray * mxOutputFun); ~mexObjectiveFunction(); double evalFun(unsigned n, const double *x, double *gradient); void evalHessMultFcn(unsigned n, const double *x, const double *v, double *vpre); bool evalOutputFun(bool init); mxArray *evalGrad(mxArray *x); private: static mxArray *create_optimvalues(); void set_outfun_args(const char *state, mxArray *mxFval, mxArray *mxGradient = NULL); bool call_matlab_feval_with_trap(int nlhs, mxArray *plhs[], int nrhs, mxArray *prhs[]); };
{ "alphanum_fraction": 0.7219078416, "avg_line_length": 41.2333333333, "ext": "h", "hexsha": "7953fcf9f038cd025ca1135acf22c868cc1eae4f", "lang": "C", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2022-02-23T02:23:20.000Z", "max_forks_repo_forks_event_min_datetime": "2019-04-23T09:52:50.000Z", "max_forks_repo_head_hexsha": "19d3de4d2d3ad80a247dfc95fb1e43faae92a19f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "hokiedsp/matlab-nlopt", "max_forks_repo_path": "+nlopt/@options/mexObjectiveFunction.h", "max_issues_count": 1, "max_issues_repo_head_hexsha": "19d3de4d2d3ad80a247dfc95fb1e43faae92a19f", "max_issues_repo_issues_event_max_datetime": "2020-09-25T01:13:12.000Z", "max_issues_repo_issues_event_min_datetime": "2020-09-22T03:36:01.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "hokiedsp/matlab-nlopt", "max_issues_repo_path": "+nlopt/@options/mexObjectiveFunction.h", "max_line_length": 107, "max_stars_count": 3, "max_stars_repo_head_hexsha": "19d3de4d2d3ad80a247dfc95fb1e43faae92a19f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "hokiedsp/matlab-nlopt", "max_stars_repo_path": "+nlopt/@options/mexObjectiveFunction.h", "max_stars_repo_stars_event_max_datetime": "2021-08-02T05:57:14.000Z", "max_stars_repo_stars_event_min_datetime": "2018-07-30T06:35:32.000Z", "num_tokens": 334, "size": 1237 }
#include <stdio.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_odeiv.h> #include "Effective_Potential.h" #define PI 3.14159265 #define G 4.30091252525*pow(10,-3) //grav constant, in (parsec*km^2)/(Ms*sec^2) #define c 300000 //speed of light, km/sec void Geodesic_Orbit(int* time_size, double M, double periapsis, double e) { int dimension = 2; /* number of differential equations */ double eps_abs = 1.e-8; /* absolute error requested */ double eps_rel = 1.e-10; /* relative error requested */ /* define the type of routine for making steps: */ const gsl_odeiv_step_type *type_ptr = gsl_odeiv_step_rkf45; /* allocate/initialize the stepper, the control function, and the evolution function. */ gsl_odeiv_step *step_ptr = gsl_odeiv_step_alloc (type_ptr, dimension); gsl_odeiv_control *control_ptr = gsl_odeiv_control_y_new (eps_abs, eps_rel); gsl_odeiv_evolve *evolve_ptr = gsl_odeiv_evolve_alloc (dimension); gsl_odeiv_system my_system; /* structure with the rhs function, etc. */ double E = E_r(M, periapsis, e); double L = L_r(e,periapsis,M); double r[*time_size]; /* current solution vector */ double T, T_next; /* current and next independent variable */ double Tmin, Tmax, delta_t; /* range of t and step size for output */ double h = 1e-6; /* starting step size for ode solver */ }
{ "alphanum_fraction": 0.676975945, "avg_line_length": 35.487804878, "ext": "c", "hexsha": "fb8bdc74b213bf59fa95d6e21737f4d590c14d67", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5914c5120ff2340914641b3e475e33392eeb7cbb", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ladanuzhna/Black-Holes-simulations", "max_forks_repo_path": "Geodesic_Orbit.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "5914c5120ff2340914641b3e475e33392eeb7cbb", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ladanuzhna/Black-Holes-simulations", "max_issues_repo_path": "Geodesic_Orbit.c", "max_line_length": 81, "max_stars_count": null, "max_stars_repo_head_hexsha": "5914c5120ff2340914641b3e475e33392eeb7cbb", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ladanuzhna/Black-Holes-simulations", "max_stars_repo_path": "Geodesic_Orbit.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 413, "size": 1455 }
/* specfunc/coulomb.c * * Copyright (C) 1996, 1997, 1998, 1999, 2000 Gerard Jungman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Author: G. Jungman */ /* Evaluation of Coulomb wave functions F_L(eta, x), G_L(eta, x), * and their derivatives. A combination of Steed's method, asymptotic * results, and power series. * * Steed's method: * [Barnett, CPC 21, 297 (1981)] * Power series and other methods: * [Biedenharn et al., PR 97, 542 (1954)] * [Bardin et al., CPC 3, 73 (1972)] * [Abad+Sesma, CPC 71, 110 (1992)] */ #include <config.h> #include <gsl/gsl_math.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_sf_exp.h> #include <gsl/gsl_sf_psi.h> #include <gsl/gsl_sf_airy.h> #include <gsl/gsl_sf_pow_int.h> #include <gsl/gsl_sf_gamma.h> #include <gsl/gsl_sf_coulomb.h> #include "error.h" /* the L=0 normalization constant * [Abramowitz+Stegun 14.1.8] */ static double C0sq(double eta) { double twopieta = 2.0*M_PI*eta; if(fabs(eta) < GSL_DBL_EPSILON) { return 1.0; } else if(twopieta > GSL_LOG_DBL_MAX) { return 0.0; } else { gsl_sf_result scale; gsl_sf_expm1_e(twopieta, &scale); return twopieta/scale.val; } } /* the full definition of C_L(eta) for any valid L and eta * [Abramowitz and Stegun 14.1.7] * This depends on the complex gamma function. For large * arguments the phase of the complex gamma function is not * very accurately determined. However the modulus is, and that * is all that we need to calculate C_L. * * This is not valid for L <= -3/2 or L = -1. */ static int CLeta(double L, double eta, gsl_sf_result * result) { gsl_sf_result ln1; /* log of numerator Gamma function */ gsl_sf_result ln2; /* log of denominator Gamma function */ double sgn = 1.0; double arg_val, arg_err; if(fabs(eta/(L+1.0)) < GSL_DBL_EPSILON) { gsl_sf_lngamma_e(L+1.0, &ln1); } else { gsl_sf_result p1; /* phase of numerator Gamma -- not used */ gsl_sf_lngamma_complex_e(L+1.0, eta, &ln1, &p1); /* should be ok */ } gsl_sf_lngamma_e(2.0*(L+1.0), &ln2); if(L < -1.0) sgn = -sgn; arg_val = L*M_LN2 - 0.5*eta*M_PI + ln1.val - ln2.val; arg_err = ln1.err + ln2.err; arg_err += GSL_DBL_EPSILON * (fabs(L*M_LN2) + fabs(0.5*eta*M_PI)); return gsl_sf_exp_err_e(arg_val, arg_err, result); } int gsl_sf_coulomb_CL_e(double lam, double eta, gsl_sf_result * result) { /* CHECK_POINTER(result) */ if(lam <= -1.0) { DOMAIN_ERROR(result); } else if(fabs(lam) < GSL_DBL_EPSILON) { /* saves a calculation of complex_lngamma(), otherwise not necessary */ result->val = sqrt(C0sq(eta)); result->err = 2.0 * GSL_DBL_EPSILON * result->val; return GSL_SUCCESS; } else { return CLeta(lam, eta, result); } } /* cl[0] .. cl[kmax] = C_{lam_min}(eta) .. C_{lam_min+kmax}(eta) */ int gsl_sf_coulomb_CL_array(double lam_min, int kmax, double eta, double * cl) { int k; gsl_sf_result cl_0; gsl_sf_coulomb_CL_e(lam_min, eta, &cl_0); cl[0] = cl_0.val; for(k=1; k<=kmax; k++) { double L = lam_min + k; cl[k] = cl[k-1] * hypot(L, eta)/(L*(2.0*L+1.0)); } return GSL_SUCCESS; } /* Evaluate the series for Phi_L(eta,x) and Phi_L*(eta,x) * [Abramowitz+Stegun 14.1.5] * [Abramowitz+Stegun 14.1.13] * * The sequence of coefficients A_k^L is * manifestly well-controlled for L >= -1/2 * and eta < 10. * * This makes sense since this is the region * away from threshold, and you expect * the evaluation to become easier as you * get farther from threshold. * * Empirically, this is quite well-behaved for * L >= -1/2 * eta < 10 * x < 10 */ #if 0 static int coulomb_Phi_series(const double lam, const double eta, const double x, double * result, double * result_star) { int kmin = 5; int kmax = 200; int k; double Akm2 = 1.0; double Akm1 = eta/(lam+1.0); double Ak; double xpow = x; double sum = Akm2 + Akm1*x; double sump = (lam+1.0)*Akm2 + (lam+2.0)*Akm1*x; double prev_abs_del = fabs(Akm1*x); double prev_abs_del_p = (lam+2.0) * prev_abs_del; for(k=2; k<kmax; k++) { double del; double del_p; double abs_del; double abs_del_p; Ak = (2.0*eta*Akm1 - Akm2)/(k*(2.0*lam + 1.0 + k)); xpow *= x; del = Ak*xpow; del_p = (k+lam+1.0)*del; sum += del; sump += del_p; abs_del = fabs(del); abs_del_p = fabs(del_p); if( abs_del/(fabs(sum)+abs_del) < GSL_DBL_EPSILON && prev_abs_del/(fabs(sum)+prev_abs_del) < GSL_DBL_EPSILON && abs_del_p/(fabs(sump)+abs_del_p) < GSL_DBL_EPSILON && prev_abs_del_p/(fabs(sump)+prev_abs_del_p) < GSL_DBL_EPSILON && k > kmin ) break; /* We need to keep track of the previous delta because when * eta is near zero the odd terms of the sum are very small * and this could lead to premature termination. */ prev_abs_del = abs_del; prev_abs_del_p = abs_del_p; Akm2 = Akm1; Akm1 = Ak; } *result = sum; *result_star = sump; if(k==kmax) { GSL_ERROR ("error", GSL_EMAXITER); } else { return GSL_SUCCESS; } } #endif /* 0 */ /* Determine the connection phase, phi_lambda. * See coulomb_FG_series() below. We have * to be careful about sin(phi)->0. Note that * there is an underflow condition for large * positive eta in any case. */ static int coulomb_connection(const double lam, const double eta, double * cos_phi, double * sin_phi) { if(eta > -GSL_LOG_DBL_MIN/2.0*M_PI-1.0) { *cos_phi = 1.0; *sin_phi = 0.0; GSL_ERROR ("error", GSL_EUNDRFLW); } else if(eta > -GSL_LOG_DBL_EPSILON/(4.0*M_PI)) { const double eps = 2.0 * exp(-2.0*M_PI*eta); const double tpl = tan(M_PI * lam); const double dth = eps * tpl / (tpl*tpl + 1.0); *cos_phi = -1.0 + 0.5 * dth*dth; *sin_phi = -dth; return GSL_SUCCESS; } else { double X = tanh(M_PI * eta) / tan(M_PI * lam); double phi = -atan(X) - (lam + 0.5) * M_PI; *cos_phi = cos(phi); *sin_phi = sin(phi); return GSL_SUCCESS; } } /* Evaluate the Frobenius series for F_lam(eta,x) and G_lam(eta,x). * Homegrown algebra. Evaluates the series for F_{lam} and * F_{-lam-1}, then uses * G_{lam} = (F_{lam} cos(phi) - F_{-lam-1}) / sin(phi) * where * phi = Arg[Gamma[1+lam+I eta]] - Arg[Gamma[-lam + I eta]] - (lam+1/2)Pi * = Arg[Sin[Pi(-lam+I eta)] - (lam+1/2)Pi * = atan2(-cos(lam Pi)sinh(eta Pi), -sin(lam Pi)cosh(eta Pi)) - (lam+1/2)Pi * * = -atan(X) - (lam+1/2) Pi, X = tanh(eta Pi)/tan(lam Pi) * * Not appropriate for lam <= -1/2, lam = 0, or lam >= 1/2. */ static int coulomb_FG_series(const double lam, const double eta, const double x, gsl_sf_result * F, gsl_sf_result * G) { const int max_iter = 800; gsl_sf_result ClamA; gsl_sf_result ClamB; int stat_A = CLeta(lam, eta, &ClamA); int stat_B = CLeta(-lam-1.0, eta, &ClamB); const double tlp1 = 2.0*lam + 1.0; const double pow_x = pow(x, lam); double cos_phi_lam; double sin_phi_lam; double uA_mm2 = 1.0; /* uA sum is for F_{lam} */ double uA_mm1 = x*eta/(lam+1.0); double uA_m; double uB_mm2 = 1.0; /* uB sum is for F_{-lam-1} */ double uB_mm1 = -x*eta/lam; double uB_m; double A_sum = uA_mm2 + uA_mm1; double B_sum = uB_mm2 + uB_mm1; double A_abs_del_prev = fabs(A_sum); double B_abs_del_prev = fabs(B_sum); gsl_sf_result FA, FB; int m = 2; int stat_conn = coulomb_connection(lam, eta, &cos_phi_lam, &sin_phi_lam); if(stat_conn == GSL_EUNDRFLW) { F->val = 0.0; /* FIXME: should this be set to Inf too like G? */ F->err = 0.0; OVERFLOW_ERROR(G); } while(m < max_iter) { double abs_dA; double abs_dB; uA_m = x*(2.0*eta*uA_mm1 - x*uA_mm2)/(m*(m+tlp1)); uB_m = x*(2.0*eta*uB_mm1 - x*uB_mm2)/(m*(m-tlp1)); A_sum += uA_m; B_sum += uB_m; abs_dA = fabs(uA_m); abs_dB = fabs(uB_m); if(m > 15) { /* Don't bother checking until we have gone out a little ways; * a minor optimization. Also make sure to check both the * current and the previous increment because the odd and even * terms of the sum can have very different behaviour, depending * on the value of eta. */ double max_abs_dA = GSL_MAX(abs_dA, A_abs_del_prev); double max_abs_dB = GSL_MAX(abs_dB, B_abs_del_prev); double abs_A = fabs(A_sum); double abs_B = fabs(B_sum); if( max_abs_dA/(max_abs_dA + abs_A) < 4.0*GSL_DBL_EPSILON && max_abs_dB/(max_abs_dB + abs_B) < 4.0*GSL_DBL_EPSILON ) break; } A_abs_del_prev = abs_dA; B_abs_del_prev = abs_dB; uA_mm2 = uA_mm1; uA_mm1 = uA_m; uB_mm2 = uB_mm1; uB_mm1 = uB_m; m++; } FA.val = A_sum * ClamA.val * pow_x * x; FA.err = fabs(A_sum) * ClamA.err * pow_x * x + 2.0*GSL_DBL_EPSILON*fabs(FA.val); FB.val = B_sum * ClamB.val / pow_x; FB.err = fabs(B_sum) * ClamB.err / pow_x + 2.0*GSL_DBL_EPSILON*fabs(FB.val); F->val = FA.val; F->err = FA.err; G->val = (FA.val * cos_phi_lam - FB.val)/sin_phi_lam; G->err = (FA.err * fabs(cos_phi_lam) + FB.err)/fabs(sin_phi_lam); if(m >= max_iter) GSL_ERROR ("error", GSL_EMAXITER); else return GSL_ERROR_SELECT_2(stat_A, stat_B); } /* Evaluate the Frobenius series for F_0(eta,x) and G_0(eta,x). * See [Bardin et al., CPC 3, 73 (1972), (14)-(17)]; * note the misprint in (17): nu_0=1 is correct, not nu_0=0. */ static int coulomb_FG0_series(const double eta, const double x, gsl_sf_result * F, gsl_sf_result * G) { const int max_iter = 800; const double x2 = x*x; const double tex = 2.0*eta*x; gsl_sf_result C0; int stat_CL = CLeta(0.0, eta, &C0); gsl_sf_result r1pie; int psi_stat = gsl_sf_psi_1piy_e(eta, &r1pie); double u_mm2 = 0.0; /* u_0 */ double u_mm1 = x; /* u_1 */ double u_m; double v_mm2 = 1.0; /* nu_0 */ double v_mm1 = tex*(2.0*M_EULER-1.0+r1pie.val); /* nu_1 */ double v_m; double u_sum = u_mm2 + u_mm1; double v_sum = v_mm2 + v_mm1; double u_abs_del_prev = fabs(u_sum); double v_abs_del_prev = fabs(v_sum); int m = 2; double u_sum_err = 2.0 * GSL_DBL_EPSILON * fabs(u_sum); double v_sum_err = 2.0 * GSL_DBL_EPSILON * fabs(v_sum); double ln2x = log(2.0*x); while(m < max_iter) { double abs_du; double abs_dv; double m_mm1 = m*(m-1.0); u_m = (tex*u_mm1 - x2*u_mm2)/m_mm1; v_m = (tex*v_mm1 - x2*v_mm2 - 2.0*eta*(2*m-1)*u_m)/m_mm1; u_sum += u_m; v_sum += v_m; abs_du = fabs(u_m); abs_dv = fabs(v_m); u_sum_err += 2.0 * GSL_DBL_EPSILON * abs_du; v_sum_err += 2.0 * GSL_DBL_EPSILON * abs_dv; if(m > 15) { /* Don't bother checking until we have gone out a little ways; * a minor optimization. Also make sure to check both the * current and the previous increment because the odd and even * terms of the sum can have very different behaviour, depending * on the value of eta. */ double max_abs_du = GSL_MAX(abs_du, u_abs_del_prev); double max_abs_dv = GSL_MAX(abs_dv, v_abs_del_prev); double abs_u = fabs(u_sum); double abs_v = fabs(v_sum); if( max_abs_du/(max_abs_du + abs_u) < 40.0*GSL_DBL_EPSILON && max_abs_dv/(max_abs_dv + abs_v) < 40.0*GSL_DBL_EPSILON ) break; } u_abs_del_prev = abs_du; v_abs_del_prev = abs_dv; u_mm2 = u_mm1; u_mm1 = u_m; v_mm2 = v_mm1; v_mm1 = v_m; m++; } F->val = C0.val * u_sum; F->err = C0.err * fabs(u_sum); F->err += fabs(C0.val) * u_sum_err; F->err += 2.0 * GSL_DBL_EPSILON * fabs(F->val); G->val = (v_sum + 2.0*eta*u_sum * ln2x) / C0.val; G->err = (fabs(v_sum) + fabs(2.0*eta*u_sum * ln2x)) / fabs(C0.val) * fabs(C0.err/C0.val); G->err += (v_sum_err + fabs(2.0*eta*u_sum_err*ln2x)) / fabs(C0.val); G->err += 2.0 * GSL_DBL_EPSILON * fabs(G->val); if(m == max_iter) GSL_ERROR ("error", GSL_EMAXITER); else return GSL_ERROR_SELECT_2(psi_stat, stat_CL); } /* Evaluate the Frobenius series for F_{-1/2}(eta,x) and G_{-1/2}(eta,x). * Homegrown algebra. */ static int coulomb_FGmhalf_series(const double eta, const double x, gsl_sf_result * F, gsl_sf_result * G) { const int max_iter = 800; const double rx = sqrt(x); const double x2 = x*x; const double tex = 2.0*eta*x; gsl_sf_result Cmhalf; int stat_CL = CLeta(-0.5, eta, &Cmhalf); double u_mm2 = 1.0; /* u_0 */ double u_mm1 = tex * u_mm2; /* u_1 */ double u_m; double v_mm2, v_mm1, v_m; double f_sum, g_sum; double tmp1; gsl_sf_result rpsi_1pe; gsl_sf_result rpsi_1p2e; int m = 2; gsl_sf_psi_1piy_e(eta, &rpsi_1pe); gsl_sf_psi_1piy_e(2.0*eta, &rpsi_1p2e); v_mm2 = 2.0*M_EULER - M_LN2 - rpsi_1pe.val + 2.0*rpsi_1p2e.val; v_mm1 = tex*(v_mm2 - 2.0*u_mm2); f_sum = u_mm2 + u_mm1; g_sum = v_mm2 + v_mm1; while(m < max_iter) { double m2 = m*m; u_m = (tex*u_mm1 - x2*u_mm2)/m2; v_m = (tex*v_mm1 - x2*v_mm2 - 2.0*m*u_m)/m2; f_sum += u_m; g_sum += v_m; if( f_sum != 0.0 && g_sum != 0.0 && (fabs(u_m/f_sum) + fabs(v_m/g_sum) < 10.0*GSL_DBL_EPSILON)) break; u_mm2 = u_mm1; u_mm1 = u_m; v_mm2 = v_mm1; v_mm1 = v_m; m++; } F->val = Cmhalf.val * rx * f_sum; F->err = Cmhalf.err * fabs(rx * f_sum) + 2.0*GSL_DBL_EPSILON*fabs(F->val); tmp1 = f_sum*log(x); G->val = -rx*(tmp1 + g_sum)/Cmhalf.val; G->err = fabs(rx)*(fabs(tmp1) + fabs(g_sum))/fabs(Cmhalf.val) * fabs(Cmhalf.err/Cmhalf.val); if(m == max_iter) GSL_ERROR ("error", GSL_EMAXITER); else return stat_CL; } /* Evolve the backwards recurrence for F,F'. * * F_{lam-1} = (S_lam F_lam + F_lam') / R_lam * F_{lam-1}' = (S_lam F_{lam-1} - R_lam F_lam) * where * R_lam = sqrt(1 + (eta/lam)^2) * S_lam = lam/x + eta/lam * */ static int coulomb_F_recur(double lam_min, int kmax, double eta, double x, double F_lam_max, double Fp_lam_max, double * F_lam_min, double * Fp_lam_min ) { double x_inv = 1.0/x; double fcl = F_lam_max; double fpl = Fp_lam_max; double lam_max = lam_min + kmax; double lam = lam_max; int k; for(k=kmax-1; k>=0; k--) { double el = eta/lam; double rl = hypot(1.0, el); double sl = el + lam*x_inv; double fc_lm1; fc_lm1 = (fcl*sl + fpl)/rl; fpl = fc_lm1*sl - fcl*rl; fcl = fc_lm1; lam -= 1.0; } *F_lam_min = fcl; *Fp_lam_min = fpl; return GSL_SUCCESS; } /* Evolve the forward recurrence for G,G'. * * G_{lam+1} = (S_lam G_lam - G_lam')/R_lam * G_{lam+1}' = R_{lam+1} G_lam - S_lam G_{lam+1} * * where S_lam and R_lam are as above in the F recursion. */ static int coulomb_G_recur(const double lam_min, const int kmax, const double eta, const double x, const double G_lam_min, const double Gp_lam_min, double * G_lam_max, double * Gp_lam_max ) { double x_inv = 1.0/x; double gcl = G_lam_min; double gpl = Gp_lam_min; double lam = lam_min + 1.0; int k; for(k=1; k<=kmax; k++) { double el = eta/lam; double rl = hypot(1.0, el); double sl = el + lam*x_inv; double gcl1 = (sl*gcl - gpl)/rl; gpl = rl*gcl - sl*gcl1; gcl = gcl1; lam += 1.0; } *G_lam_max = gcl; *Gp_lam_max = gpl; return GSL_SUCCESS; } /* Evaluate the first continued fraction, giving * the ratio F'/F at the upper lambda value. * We also determine the sign of F at that point, * since it is the sign of the last denominator * in the continued fraction. */ static int coulomb_CF1(double lambda, double eta, double x, double * fcl_sign, double * result, int * count ) { const double CF1_small = 1.e-30; const double CF1_abort = 1.0e+05; const double CF1_acc = 2.0*GSL_DBL_EPSILON; const double x_inv = 1.0/x; const double px = lambda + 1.0 + CF1_abort; double pk = lambda + 1.0; double F = eta/pk + pk*x_inv; double D, C; double df; *fcl_sign = 1.0; *count = 0; if(fabs(F) < CF1_small) F = CF1_small; D = 0.0; C = F; do { double pk1 = pk + 1.0; double ek = eta / pk; double rk2 = 1.0 + ek*ek; double tk = (pk + pk1)*(x_inv + ek/pk1); D = tk - rk2 * D; C = tk - rk2 / C; if(fabs(C) < CF1_small) C = CF1_small; if(fabs(D) < CF1_small) D = CF1_small; D = 1.0/D; df = D * C; F = F * df; if(D < 0.0) { /* sign of result depends on sign of denominator */ *fcl_sign = - *fcl_sign; } pk = pk1; if( pk > px ) { *result = F; GSL_ERROR ("error", GSL_ERUNAWAY); } ++(*count); } while(fabs(df-1.0) > CF1_acc); *result = F; return GSL_SUCCESS; } #if 0 static int old_coulomb_CF1(const double lambda, double eta, double x, double * fcl_sign, double * result ) { const double CF1_abort = 1.e5; const double CF1_acc = 10.0*GSL_DBL_EPSILON; const double x_inv = 1.0/x; const double px = lambda + 1.0 + CF1_abort; double pk = lambda + 1.0; double D; double df; double F; double p; double pk1; double ek; double fcl = 1.0; double tk; while(1) { ek = eta/pk; F = (ek + pk*x_inv)*fcl + (fcl - 1.0)*x_inv; pk1 = pk + 1.0; if(fabs(eta*x + pk*pk1) > CF1_acc) break; fcl = (1.0 + ek*ek)/(1.0 + eta*eta/(pk1*pk1)); pk = 2.0 + pk; } D = 1.0/((pk + pk1)*(x_inv + ek/pk1)); df = -fcl*(1.0 + ek*ek)*D; if(fcl != 1.0) fcl = -1.0; if(D < 0.0) fcl = -fcl; F = F + df; p = 1.0; do { pk = pk1; pk1 = pk + 1.0; ek = eta / pk; tk = (pk + pk1)*(x_inv + ek/pk1); D = tk - D*(1.0+ek*ek); if(fabs(D) < sqrt(CF1_acc)) { p += 1.0; if(p > 2.0) { printf("HELP............\n"); } } D = 1.0/D; if(D < 0.0) { /* sign of result depends on sign of denominator */ fcl = -fcl; } df = df*(D*tk - 1.0); F = F + df; if( pk > px ) { GSL_ERROR ("error", GSL_ERUNAWAY); } } while(fabs(df) > fabs(F)*CF1_acc); *fcl_sign = fcl; *result = F; return GSL_SUCCESS; } #endif /* 0 */ /* Evaluate the second continued fraction to * obtain the ratio * (G' + i F')/(G + i F) := P + i Q * at the specified lambda value. */ static int coulomb_CF2(const double lambda, const double eta, const double x, double * result_P, double * result_Q, int * count ) { int status = GSL_SUCCESS; const double CF2_acc = 4.0*GSL_DBL_EPSILON; const double CF2_abort = 2.0e+05; const double wi = 2.0*eta; const double x_inv = 1.0/x; const double e2mm1 = eta*eta + lambda*(lambda + 1.0); double ar = -e2mm1; double ai = eta; double br = 2.0*(x - eta); double bi = 2.0; double dr = br/(br*br + bi*bi); double di = -bi/(br*br + bi*bi); double dp = -x_inv*(ar*di + ai*dr); double dq = x_inv*(ar*dr - ai*di); double A, B, C, D; double pk = 0.0; double P = 0.0; double Q = 1.0 - eta*x_inv; *count = 0; do { P += dp; Q += dq; pk += 2.0; ar += pk; ai += wi; bi += 2.0; D = ar*dr - ai*di + br; di = ai*dr + ar*di + bi; C = 1.0/(D*D + di*di); dr = C*D; di = -C*di; A = br*dr - bi*di - 1.; B = bi*dr + br*di; C = dp*A - dq*B; dq = dp*B + dq*A; dp = C; if(pk > CF2_abort) { status = GSL_ERUNAWAY; break; } ++(*count); } while(fabs(dp)+fabs(dq) > (fabs(P)+fabs(Q))*CF2_acc); if(Q < CF2_abort*GSL_DBL_EPSILON*fabs(P)) { status = GSL_ELOSS; } *result_P = P; *result_Q = Q; return status; } /* WKB evaluation of F, G. Assumes 0 < x < turning point. * Overflows are trapped, GSL_EOVRFLW is signalled, * and an exponent is returned such that: * * result_F = fjwkb * exp(-exponent) * result_G = gjwkb * exp( exponent) * * See [Biedenharn et al. Phys. Rev. 97, 542-554 (1955), Section IV] * * Unfortunately, this is not very accurate in general. The * test cases typically have 3-4 digits of precision. One could * argue that this is ok for general use because, for instance, * F is exponentially small in this region and so the absolute * accuracy is still roughly acceptable. But it would be better * to have a systematic method for improving the precision. See * the Abad+Sesma method discussion below. */ static int coulomb_jwkb(const double lam, const double eta, const double x, gsl_sf_result * fjwkb, gsl_sf_result * gjwkb, double * exponent) { const double llp1 = lam*(lam+1.0) + 6.0/35.0; const double llp1_eff = GSL_MAX(llp1, 0.0); const double rho_ghalf = sqrt(x*(2.0*eta - x) + llp1_eff); const double sinh_arg = sqrt(llp1_eff/(eta*eta+llp1_eff)) * rho_ghalf / x; const double sinh_inv = log(sinh_arg + hypot(1.0,sinh_arg)); const double phi = fabs(rho_ghalf - eta*atan2(rho_ghalf,x-eta) - sqrt(llp1_eff) * sinh_inv); const double zeta_half = pow(3.0*phi/2.0, 1.0/3.0); const double prefactor = sqrt(M_PI*phi*x/(6.0 * rho_ghalf)); double F = prefactor * 3.0/zeta_half; double G = prefactor * 3.0/zeta_half; /* Note the sqrt(3) from Bi normalization */ double F_exp; double G_exp; const double airy_scale_exp = phi; gsl_sf_result ai; gsl_sf_result bi; gsl_sf_airy_Ai_scaled_e(zeta_half*zeta_half, GSL_MODE_DEFAULT, &ai); gsl_sf_airy_Bi_scaled_e(zeta_half*zeta_half, GSL_MODE_DEFAULT, &bi); F *= ai.val; G *= bi.val; F_exp = log(F) - airy_scale_exp; G_exp = log(G) + airy_scale_exp; if(G_exp >= GSL_LOG_DBL_MAX) { fjwkb->val = F; gjwkb->val = G; fjwkb->err = 1.0e-3 * fabs(F); /* FIXME: real error here ... could be smaller */ gjwkb->err = 1.0e-3 * fabs(G); *exponent = airy_scale_exp; GSL_ERROR ("error", GSL_EOVRFLW); } else { fjwkb->val = exp(F_exp); gjwkb->val = exp(G_exp); fjwkb->err = 1.0e-3 * fabs(fjwkb->val); gjwkb->err = 1.0e-3 * fabs(gjwkb->val); *exponent = 0.0; return GSL_SUCCESS; } } /* Asymptotic evaluation of F and G below the minimal turning point. * * This is meant to be a drop-in replacement for coulomb_jwkb(). * It uses the expressions in [Abad+Sesma]. This requires some * work because I am not sure where it is valid. They mumble * something about |x| < |lam|^(-1/2) or 8|eta x| > lam when |x| < 1. * This seems true, but I thought the result was based on a uniform * expansion and could be controlled by simply using more terms. */ #if 0 static int coulomb_AS_xlt2eta(const double lam, const double eta, const double x, gsl_sf_result * f_AS, gsl_sf_result * g_AS, double * exponent) { /* no time to do this now... */ } #endif /* 0 */ /*-*-*-*-*-*-*-*-*-*-*-* Functions with Error Codes *-*-*-*-*-*-*-*-*-*-*-*/ int gsl_sf_coulomb_wave_FG_e(const double eta, const double x, const double lam_F, const int k_lam_G, /* lam_G = lam_F - k_lam_G */ gsl_sf_result * F, gsl_sf_result * Fp, gsl_sf_result * G, gsl_sf_result * Gp, double * exp_F, double * exp_G) { const double lam_G = lam_F - k_lam_G; if(x < 0.0 || lam_F <= -0.5 || lam_G <= -0.5) { GSL_SF_RESULT_SET(F, 0.0, 0.0); GSL_SF_RESULT_SET(Fp, 0.0, 0.0); GSL_SF_RESULT_SET(G, 0.0, 0.0); GSL_SF_RESULT_SET(Gp, 0.0, 0.0); *exp_F = 0.0; *exp_G = 0.0; GSL_ERROR ("domain error", GSL_EDOM); } else if(x == 0.0) { gsl_sf_result C0; CLeta(0.0, eta, &C0); GSL_SF_RESULT_SET(F, 0.0, 0.0); GSL_SF_RESULT_SET(Fp, 0.0, 0.0); GSL_SF_RESULT_SET(G, 0.0, 0.0); /* FIXME: should be Inf */ GSL_SF_RESULT_SET(Gp, 0.0, 0.0); /* FIXME: should be Inf */ *exp_F = 0.0; *exp_G = 0.0; if(lam_F == 0.0){ GSL_SF_RESULT_SET(Fp, C0.val, C0.err); } if(lam_G == 0.0) { GSL_SF_RESULT_SET(Gp, 1.0/C0.val, fabs(C0.err/C0.val)/fabs(C0.val)); } GSL_ERROR ("domain error", GSL_EDOM); /* After all, since we are asking for G, this is a domain error... */ } else if(x < 1.2 && 2.0*M_PI*eta < 0.9*(-GSL_LOG_DBL_MIN) && fabs(eta*x) < 10.0) { /* Reduce to a small lambda value and use the series * representations for F and G. We cannot allow eta to * be large and positive because the connection formula * for G_lam is badly behaved due to an underflow in sin(phi_lam) * [see coulomb_FG_series() and coulomb_connection() above]. * Note that large negative eta is ok however. */ const double SMALL = GSL_SQRT_DBL_EPSILON; const int N = (int)(lam_F + 0.5); const int span = GSL_MAX(k_lam_G, N); const double lam_min = lam_F - N; /* -1/2 <= lam_min < 1/2 */ double F_lam_F, Fp_lam_F; double G_lam_G, Gp_lam_G; double F_lam_F_err, Fp_lam_F_err; double Fp_over_F_lam_F; double F_sign_lam_F; double F_lam_min_unnorm, Fp_lam_min_unnorm; double Fp_over_F_lam_min; gsl_sf_result F_lam_min; gsl_sf_result G_lam_min, Gp_lam_min; double F_scale; double Gerr_frac; double F_scale_frac_err; double F_unnorm_frac_err; /* Determine F'/F at lam_F. */ int CF1_count; int stat_CF1 = coulomb_CF1(lam_F, eta, x, &F_sign_lam_F, &Fp_over_F_lam_F, &CF1_count); int stat_ser; int stat_Fr; int stat_Gr; /* Recurse down with unnormalized F,F' values. */ F_lam_F = SMALL; Fp_lam_F = Fp_over_F_lam_F * F_lam_F; if(span != 0) { stat_Fr = coulomb_F_recur(lam_min, span, eta, x, F_lam_F, Fp_lam_F, &F_lam_min_unnorm, &Fp_lam_min_unnorm ); } else { F_lam_min_unnorm = F_lam_F; Fp_lam_min_unnorm = Fp_lam_F; stat_Fr = GSL_SUCCESS; } /* Determine F and G at lam_min. */ if(lam_min == -0.5) { stat_ser = coulomb_FGmhalf_series(eta, x, &F_lam_min, &G_lam_min); } else if(lam_min == 0.0) { stat_ser = coulomb_FG0_series(eta, x, &F_lam_min, &G_lam_min); } else if(lam_min == 0.5) { /* This cannot happen. */ F->val = F_lam_F; F->err = 2.0 * GSL_DBL_EPSILON * fabs(F->val); Fp->val = Fp_lam_F; Fp->err = 2.0 * GSL_DBL_EPSILON * fabs(Fp->val); G->val = G_lam_G; G->err = 2.0 * GSL_DBL_EPSILON * fabs(G->val); Gp->val = Gp_lam_G; Gp->err = 2.0 * GSL_DBL_EPSILON * fabs(Gp->val); *exp_F = 0.0; *exp_G = 0.0; GSL_ERROR ("error", GSL_ESANITY); } else { stat_ser = coulomb_FG_series(lam_min, eta, x, &F_lam_min, &G_lam_min); } /* Determine remaining quantities. */ Fp_over_F_lam_min = Fp_lam_min_unnorm / F_lam_min_unnorm; Gp_lam_min.val = Fp_over_F_lam_min*G_lam_min.val - 1.0/F_lam_min.val; Gp_lam_min.err = fabs(Fp_over_F_lam_min)*G_lam_min.err; Gp_lam_min.err += fabs(1.0/F_lam_min.val) * fabs(F_lam_min.err/F_lam_min.val); F_scale = F_lam_min.val / F_lam_min_unnorm; /* Apply scale to the original F,F' values. */ F_scale_frac_err = fabs(F_lam_min.err/F_lam_min.val); F_unnorm_frac_err = 2.0*GSL_DBL_EPSILON*(CF1_count+span+1); F_lam_F *= F_scale; F_lam_F_err = fabs(F_lam_F) * (F_unnorm_frac_err + F_scale_frac_err); Fp_lam_F *= F_scale; Fp_lam_F_err = fabs(Fp_lam_F) * (F_unnorm_frac_err + F_scale_frac_err); /* Recurse up to get the required G,G' values. */ stat_Gr = coulomb_G_recur(lam_min, GSL_MAX(N-k_lam_G,0), eta, x, G_lam_min.val, Gp_lam_min.val, &G_lam_G, &Gp_lam_G ); F->val = F_lam_F; F->err = F_lam_F_err; F->err += 2.0 * GSL_DBL_EPSILON * fabs(F_lam_F); Fp->val = Fp_lam_F; Fp->err = Fp_lam_F_err; Fp->err += 2.0 * GSL_DBL_EPSILON * fabs(Fp_lam_F); Gerr_frac = fabs(G_lam_min.err/G_lam_min.val) + fabs(Gp_lam_min.err/Gp_lam_min.val); G->val = G_lam_G; G->err = Gerr_frac * fabs(G_lam_G); G->err += 2.0 * (CF1_count+1) * GSL_DBL_EPSILON * fabs(G->val); Gp->val = Gp_lam_G; Gp->err = Gerr_frac * fabs(Gp->val); Gp->err += 2.0 * (CF1_count+1) * GSL_DBL_EPSILON * fabs(Gp->val); *exp_F = 0.0; *exp_G = 0.0; return GSL_ERROR_SELECT_4(stat_ser, stat_CF1, stat_Fr, stat_Gr); } else if(x < 2.0*eta) { /* Use WKB approximation to obtain F and G at the two * lambda values, and use the Wronskian and the * continued fractions for F'/F to obtain F' and G'. */ gsl_sf_result F_lam_F, G_lam_F; gsl_sf_result F_lam_G, G_lam_G; double exp_lam_F, exp_lam_G; int stat_lam_F; int stat_lam_G; int stat_CF1_lam_F; int stat_CF1_lam_G; int CF1_count; double Fp_over_F_lam_F; double Fp_over_F_lam_G; double F_sign_lam_F; double F_sign_lam_G; stat_lam_F = coulomb_jwkb(lam_F, eta, x, &F_lam_F, &G_lam_F, &exp_lam_F); if(k_lam_G == 0) { stat_lam_G = stat_lam_F; F_lam_G = F_lam_F; G_lam_G = G_lam_F; exp_lam_G = exp_lam_F; } else { stat_lam_G = coulomb_jwkb(lam_G, eta, x, &F_lam_G, &G_lam_G, &exp_lam_G); } stat_CF1_lam_F = coulomb_CF1(lam_F, eta, x, &F_sign_lam_F, &Fp_over_F_lam_F, &CF1_count); if(k_lam_G == 0) { stat_CF1_lam_G = stat_CF1_lam_F; F_sign_lam_G = F_sign_lam_F; Fp_over_F_lam_G = Fp_over_F_lam_F; } else { stat_CF1_lam_G = coulomb_CF1(lam_G, eta, x, &F_sign_lam_G, &Fp_over_F_lam_G, &CF1_count); } F->val = F_lam_F.val; F->err = F_lam_F.err; G->val = G_lam_G.val; G->err = G_lam_G.err; Fp->val = Fp_over_F_lam_F * F_lam_F.val; Fp->err = fabs(Fp_over_F_lam_F) * F_lam_F.err; Fp->err += 2.0*GSL_DBL_EPSILON*fabs(Fp->val); Gp->val = Fp_over_F_lam_G * G_lam_G.val - 1.0/F_lam_G.val; Gp->err = fabs(Fp_over_F_lam_G) * G_lam_G.err; Gp->err += fabs(1.0/F_lam_G.val) * fabs(F_lam_G.err/F_lam_G.val); *exp_F = exp_lam_F; *exp_G = exp_lam_G; if(stat_lam_F == GSL_EOVRFLW || stat_lam_G == GSL_EOVRFLW) { GSL_ERROR ("overflow", GSL_EOVRFLW); } else { return GSL_ERROR_SELECT_2(stat_lam_F, stat_lam_G); } } else { /* x > 2 eta, so we know that we can find a lambda value such * that x is above the turning point. We do this, evaluate * using Steed's method at that oscillatory point, then * use recursion on F and G to obtain the required values. * * lam_0 = a value of lambda such that x is below the turning point * lam_min = minimum of lam_0 and the requested lam_G, since * we must go at least as low as lam_G */ const double SMALL = GSL_SQRT_DBL_EPSILON; const double C = sqrt(1.0 + 4.0*x*(x-2.0*eta)); const int N = ceil(lam_F - C + 0.5); const double lam_0 = lam_F - GSL_MAX(N, 0); const double lam_min = GSL_MIN(lam_0, lam_G); double F_lam_F, Fp_lam_F; double G_lam_G, Gp_lam_G; double F_lam_min_unnorm, Fp_lam_min_unnorm; double F_lam_min, Fp_lam_min; double G_lam_min, Gp_lam_min; double Fp_over_F_lam_F; double Fp_over_F_lam_min; double F_sign_lam_F, F_sign_lam_min; double P_lam_min, Q_lam_min; double alpha; double gamma; double F_scale; int CF1_count; int CF2_count; int stat_CF1 = coulomb_CF1(lam_F, eta, x, &F_sign_lam_F, &Fp_over_F_lam_F, &CF1_count); int stat_CF2; int stat_Fr; int stat_Gr; int F_recur_count; int G_recur_count; double err_amplify; F_lam_F = F_sign_lam_F * SMALL; /* unnormalized */ Fp_lam_F = Fp_over_F_lam_F * F_lam_F; /* Backward recurrence to get F,Fp at lam_min */ F_recur_count = GSL_MAX(k_lam_G, N); stat_Fr = coulomb_F_recur(lam_min, F_recur_count, eta, x, F_lam_F, Fp_lam_F, &F_lam_min_unnorm, &Fp_lam_min_unnorm ); Fp_over_F_lam_min = Fp_lam_min_unnorm / F_lam_min_unnorm; /* Steed evaluation to complete evaluation of F,Fp,G,Gp at lam_min */ stat_CF2 = coulomb_CF2(lam_min, eta, x, &P_lam_min, &Q_lam_min, &CF2_count); alpha = Fp_over_F_lam_min - P_lam_min; gamma = alpha/Q_lam_min; F_sign_lam_min = GSL_SIGN(F_lam_min_unnorm) ; F_lam_min = F_sign_lam_min / sqrt(alpha*alpha/Q_lam_min + Q_lam_min); Fp_lam_min = Fp_over_F_lam_min * F_lam_min; G_lam_min = gamma * F_lam_min; Gp_lam_min = (P_lam_min * gamma - Q_lam_min) * F_lam_min; /* Apply scale to values of F,Fp at lam_F (the top). */ F_scale = F_lam_min / F_lam_min_unnorm; F_lam_F *= F_scale; Fp_lam_F *= F_scale; /* Forward recurrence to get G,Gp at lam_G (the top). */ G_recur_count = GSL_MAX(N-k_lam_G,0); stat_Gr = coulomb_G_recur(lam_min, G_recur_count, eta, x, G_lam_min, Gp_lam_min, &G_lam_G, &Gp_lam_G ); err_amplify = CF1_count + CF2_count + F_recur_count + G_recur_count + 1; F->val = F_lam_F; F->err = 8.0*err_amplify*GSL_DBL_EPSILON * fabs(F->val); Fp->val = Fp_lam_F; Fp->err = 8.0*err_amplify*GSL_DBL_EPSILON * fabs(Fp->val); G->val = G_lam_G; G->err = 8.0*err_amplify*GSL_DBL_EPSILON * fabs(G->val); Gp->val = Gp_lam_G; Gp->err = 8.0*err_amplify*GSL_DBL_EPSILON * fabs(Gp->val); *exp_F = 0.0; *exp_G = 0.0; return GSL_ERROR_SELECT_4(stat_CF1, stat_CF2, stat_Fr, stat_Gr); } } int gsl_sf_coulomb_wave_F_array(double lam_min, int kmax, double eta, double x, double * fc_array, double * F_exp) { if(x == 0.0) { int k; *F_exp = 0.0; for(k=0; k<=kmax; k++) { fc_array[k] = 0.0; } if(lam_min == 0.0){ gsl_sf_result f_0; CLeta(0.0, eta, &f_0); fc_array[0] = f_0.val; } return GSL_SUCCESS; } else { const double x_inv = 1.0/x; const double lam_max = lam_min + kmax; gsl_sf_result F, Fp; gsl_sf_result G, Gp; double G_exp; int stat_FG = gsl_sf_coulomb_wave_FG_e(eta, x, lam_max, 0, &F, &Fp, &G, &Gp, F_exp, &G_exp); double fcl = F.val; double fpl = Fp.val; double lam = lam_max; int k; fc_array[kmax] = F.val; for(k=kmax-1; k>=0; k--) { double el = eta/lam; double rl = hypot(1.0, el); double sl = el + lam*x_inv; double fc_lm1 = (fcl*sl + fpl)/rl; fc_array[k] = fc_lm1; fpl = fc_lm1*sl - fcl*rl; fcl = fc_lm1; lam -= 1.0; } return stat_FG; } } int gsl_sf_coulomb_wave_FG_array(double lam_min, int kmax, double eta, double x, double * fc_array, double * gc_array, double * F_exp, double * G_exp) { const double x_inv = 1.0/x; const double lam_max = lam_min + kmax; gsl_sf_result F, Fp; gsl_sf_result G, Gp; int stat_FG = gsl_sf_coulomb_wave_FG_e(eta, x, lam_max, kmax, &F, &Fp, &G, &Gp, F_exp, G_exp); double fcl = F.val; double fpl = Fp.val; double lam = lam_max; int k; double gcl, gpl; fc_array[kmax] = F.val; for(k=kmax-1; k>=0; k--) { double el = eta/lam; double rl = hypot(1.0, el); double sl = el + lam*x_inv; double fc_lm1; fc_lm1 = (fcl*sl + fpl)/rl; fc_array[k] = fc_lm1; fpl = fc_lm1*sl - fcl*rl; fcl = fc_lm1; lam -= 1.0; } gcl = G.val; gpl = Gp.val; lam = lam_min + 1.0; gc_array[0] = G.val; for(k=1; k<=kmax; k++) { double el = eta/lam; double rl = hypot(1.0, el); double sl = el + lam*x_inv; double gcl1 = (sl*gcl - gpl)/rl; gc_array[k] = gcl1; gpl = rl*gcl - sl*gcl1; gcl = gcl1; lam += 1.0; } return stat_FG; } int gsl_sf_coulomb_wave_FGp_array(double lam_min, int kmax, double eta, double x, double * fc_array, double * fcp_array, double * gc_array, double * gcp_array, double * F_exp, double * G_exp) { const double x_inv = 1.0/x; const double lam_max = lam_min + kmax; gsl_sf_result F, Fp; gsl_sf_result G, Gp; int stat_FG = gsl_sf_coulomb_wave_FG_e(eta, x, lam_max, kmax, &F, &Fp, &G, &Gp, F_exp, G_exp); double fcl = F.val; double fpl = Fp.val; double lam = lam_max; int k; double gcl, gpl; fc_array[kmax] = F.val; fcp_array[kmax] = Fp.val; for(k=kmax-1; k>=0; k--) { double el = eta/lam; double rl = hypot(1.0, el); double sl = el + lam*x_inv; double fc_lm1; fc_lm1 = (fcl*sl + fpl)/rl; fc_array[k] = fc_lm1; fpl = fc_lm1*sl - fcl*rl; fcp_array[k] = fpl; fcl = fc_lm1; lam -= 1.0; } gcl = G.val; gpl = Gp.val; lam = lam_min + 1.0; gc_array[0] = G.val; gcp_array[0] = Gp.val; for(k=1; k<=kmax; k++) { double el = eta/lam; double rl = hypot(1.0, el); double sl = el + lam*x_inv; double gcl1 = (sl*gcl - gpl)/rl; gc_array[k] = gcl1; gpl = rl*gcl - sl*gcl1; gcp_array[k] = gpl; gcl = gcl1; lam += 1.0; } return stat_FG; } int gsl_sf_coulomb_wave_sphF_array(double lam_min, int kmax, double eta, double x, double * fc_array, double * F_exp) { if(x < 0.0 || lam_min < -0.5) { GSL_ERROR ("domain error", GSL_EDOM); } else if(x < 10.0/GSL_DBL_MAX) { int k; for(k=0; k<=kmax; k++) { fc_array[k] = 0.0; } if(lam_min == 0.0) { fc_array[0] = sqrt(C0sq(eta)); } *F_exp = 0.0; if(x == 0.0) return GSL_SUCCESS; else GSL_ERROR ("underflow", GSL_EUNDRFLW); } else { int k; int stat_F = gsl_sf_coulomb_wave_F_array(lam_min, kmax, eta, x, fc_array, F_exp); for(k=0; k<=kmax; k++) { fc_array[k] = fc_array[k] / x; } return stat_F; } }
{ "alphanum_fraction": 0.5875656076, "avg_line_length": 27.8131170663, "ext": "c", "hexsha": "7c68076f9abc1b3073d10d0594eb319370987c89", "lang": "C", "max_forks_count": 40, "max_forks_repo_forks_event_max_datetime": "2022-03-03T23:23:37.000Z", "max_forks_repo_forks_event_min_datetime": "2015-02-26T15:31:16.000Z", "max_forks_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "manggoguy/parsec-modified", "max_forks_repo_path": "pkgs/libs/gsl/src/specfunc/coulomb.c", "max_issues_count": 12, "max_issues_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_issues_repo_issues_event_max_datetime": "2022-03-13T03:54:24.000Z", "max_issues_repo_issues_event_min_datetime": "2020-12-15T08:30:19.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "manggoguy/parsec-modified", "max_issues_repo_path": "pkgs/libs/gsl/src/specfunc/coulomb.c", "max_line_length": 95, "max_stars_count": 64, "max_stars_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "manggoguy/parsec-modified", "max_stars_repo_path": "pkgs/libs/gsl/src/specfunc/coulomb.c", "max_stars_repo_stars_event_max_datetime": "2022-03-24T13:26:53.000Z", "max_stars_repo_stars_event_min_datetime": "2015-03-06T00:30:56.000Z", "num_tokens": 13964, "size": 39439 }
/* multimin/vector_bfgs2.c * * Copyright (C) 2007 Brian Gough * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ /* vector_bfgs2.c -- Fletcher's implementation of the BFGS method, from R.Fletcher, "Practical Method's of Optimization", Second Edition, ISBN 0471915475. Algorithms 2.6.2 and 2.6.4. */ /* Thanks to Alan Irwin irwin@beluga.phys.uvic.ca. for suggesting this algorithm and providing sample fortran benchmarks */ #include <config.h> #include <gsl/gsl_multimin.h> #include <gsl/gsl_blas.h> #include "linear_minimize.c" #include "linear_wrapper.c" typedef struct { int iter; double step; double g0norm; double pnorm; double delta_f; double fp0; /* f'(0) for f(x-alpha*p) */ gsl_vector *x0; gsl_vector *g0; gsl_vector *p; /* work space */ gsl_vector *dx0; gsl_vector *dg0; gsl_vector *x_alpha; gsl_vector *g_alpha; /* wrapper function */ wrapper_t wrap; /* minimization parameters */ double rho; double sigma; double tau1; double tau2; double tau3; int order; } vector_bfgs2_state_t; static int vector_bfgs2_alloc (void *vstate, size_t n) { vector_bfgs2_state_t *state = (vector_bfgs2_state_t *) vstate; state->p = gsl_vector_calloc (n); if (state->p == 0) { GSL_ERROR ("failed to allocate space for p", GSL_ENOMEM); } state->x0 = gsl_vector_calloc (n); if (state->x0 == 0) { gsl_vector_free (state->p); GSL_ERROR ("failed to allocate space for g0", GSL_ENOMEM); } state->g0 = gsl_vector_calloc (n); if (state->g0 == 0) { gsl_vector_free (state->x0); gsl_vector_free (state->p); GSL_ERROR ("failed to allocate space for g0", GSL_ENOMEM); } state->dx0 = gsl_vector_calloc (n); if (state->dx0 == 0) { gsl_vector_free (state->g0); gsl_vector_free (state->x0); gsl_vector_free (state->p); GSL_ERROR ("failed to allocate space for g0", GSL_ENOMEM); } state->dg0 = gsl_vector_calloc (n); if (state->dg0 == 0) { gsl_vector_free (state->dx0); gsl_vector_free (state->g0); gsl_vector_free (state->x0); gsl_vector_free (state->p); GSL_ERROR ("failed to allocate space for g0", GSL_ENOMEM); } state->x_alpha = gsl_vector_calloc (n); if (state->x_alpha == 0) { gsl_vector_free (state->dg0); gsl_vector_free (state->dx0); gsl_vector_free (state->g0); gsl_vector_free (state->x0); gsl_vector_free (state->p); GSL_ERROR ("failed to allocate space for g0", GSL_ENOMEM); } state->g_alpha = gsl_vector_calloc (n); if (state->g_alpha == 0) { gsl_vector_free (state->x_alpha); gsl_vector_free (state->dg0); gsl_vector_free (state->dx0); gsl_vector_free (state->g0); gsl_vector_free (state->x0); gsl_vector_free (state->p); GSL_ERROR ("failed to allocate space for g0", GSL_ENOMEM); } return GSL_SUCCESS; } static int vector_bfgs2_set (void *vstate, gsl_multimin_function_fdf * fdf, const gsl_vector * x, double *f, gsl_vector * gradient, double step_size, double tol) { vector_bfgs2_state_t *state = (vector_bfgs2_state_t *) vstate; state->iter = 0; state->step = step_size; state->delta_f = 0; GSL_MULTIMIN_FN_EVAL_F_DF (fdf, x, f, gradient); /* Use the gradient as the initial direction */ gsl_vector_memcpy (state->x0, x); gsl_vector_memcpy (state->g0, gradient); state->g0norm = gsl_blas_dnrm2 (state->g0); gsl_vector_memcpy (state->p, gradient); gsl_blas_dscal (-1 / state->g0norm, state->p); state->pnorm = gsl_blas_dnrm2 (state->p); /* should be 1 */ state->fp0 = -state->g0norm; /* Prepare the wrapper */ prepare_wrapper (&state->wrap, fdf, state->x0, *f, state->g0, state->p, state->x_alpha, state->g_alpha); /* Prepare 1d minimisation parameters */ state->rho = 0.01; state->sigma = tol; state->tau1 = 9; state->tau2 = 0.05; state->tau3 = 0.5; state->order = 3; /* use cubic interpolation where possible */ return GSL_SUCCESS; } static void vector_bfgs2_free (void *vstate) { vector_bfgs2_state_t *state = (vector_bfgs2_state_t *) vstate; gsl_vector_free (state->x_alpha); gsl_vector_free (state->g_alpha); gsl_vector_free (state->dg0); gsl_vector_free (state->dx0); gsl_vector_free (state->g0); gsl_vector_free (state->x0); gsl_vector_free (state->p); } static int vector_bfgs2_restart (void *vstate) { vector_bfgs2_state_t *state = (vector_bfgs2_state_t *) vstate; state->iter = 0; return GSL_SUCCESS; } static int vector_bfgs2_iterate (void *vstate, gsl_multimin_function_fdf * fdf, gsl_vector * x, double *f, gsl_vector * gradient, gsl_vector * dx) { vector_bfgs2_state_t *state = (vector_bfgs2_state_t *) vstate; double alpha = 0.0, alpha1; gsl_vector *x0 = state->x0; gsl_vector *g0 = state->g0; gsl_vector *p = state->p; double g0norm = state->g0norm; double pnorm = state->pnorm; double delta_f = state->delta_f; double pg, dir; int status; double f0 = *f; if (pnorm == 0.0 || g0norm == 0.0 || state->fp0 == 0) { gsl_vector_set_zero (dx); return GSL_ENOPROG; } if (delta_f < 0) { double del = GSL_MAX_DBL (-delta_f, 10 * GSL_DBL_EPSILON * fabs(f0)); alpha1 = GSL_MIN_DBL (1.0, 2.0 * del / (-state->fp0)); } else { alpha1 = fabs(state->step); } /* line minimisation, with cubic interpolation (order = 3) */ status = minimize (&state->wrap.fdf_linear, state->rho, state->sigma, state->tau1, state->tau2, state->tau3, state->order, alpha1, &alpha); if (status != GSL_SUCCESS) { return status; } update_position (&(state->wrap), alpha, x, f, gradient); state->delta_f = *f - f0; /* Choose a new direction for the next step */ { /* This is the BFGS update: */ /* p' = g1 - A dx - B dg */ /* A = - (1+ dg.dg/dx.dg) B + dg.g/dx.dg */ /* B = dx.g/dx.dg */ gsl_vector *dx0 = state->dx0; gsl_vector *dg0 = state->dg0; double dxg, dgg, dxdg, dgnorm, A, B; /* dx0 = x - x0 */ gsl_vector_memcpy (dx0, x); gsl_blas_daxpy (-1.0, x0, dx0); gsl_vector_memcpy (dx, dx0); /* keep a copy */ /* dg0 = g - g0 */ gsl_vector_memcpy (dg0, gradient); gsl_blas_daxpy (-1.0, g0, dg0); gsl_blas_ddot (dx0, gradient, &dxg); gsl_blas_ddot (dg0, gradient, &dgg); gsl_blas_ddot (dx0, dg0, &dxdg); dgnorm = gsl_blas_dnrm2 (dg0); if (dxdg != 0) { B = dxg / dxdg; A = -(1.0 + dgnorm * dgnorm / dxdg) * B + dgg / dxdg; } else { B = 0; A = 0; } gsl_vector_memcpy (p, gradient); gsl_blas_daxpy (-A, dx0, p); gsl_blas_daxpy (-B, dg0, p); } gsl_vector_memcpy (g0, gradient); gsl_vector_memcpy (x0, x); state->g0norm = gsl_blas_dnrm2 (g0); state->pnorm = gsl_blas_dnrm2 (p); /* update direction and fp0 */ gsl_blas_ddot (p, gradient, &pg); dir = (pg >= 0.0) ? -1.0 : +1.0; gsl_blas_dscal (dir / state->pnorm, p); state->pnorm = gsl_blas_dnrm2 (p); gsl_blas_ddot (p, g0, &state->fp0); change_direction (&state->wrap); return GSL_SUCCESS; } static const gsl_multimin_fdfminimizer_type vector_bfgs2_type = { "vector_bfgs2", /* name */ sizeof (vector_bfgs2_state_t), &vector_bfgs2_alloc, &vector_bfgs2_set, &vector_bfgs2_iterate, &vector_bfgs2_restart, &vector_bfgs2_free }; const gsl_multimin_fdfminimizer_type * gsl_multimin_fdfminimizer_vector_bfgs2 = &vector_bfgs2_type;
{ "alphanum_fraction": 0.6361353581, "avg_line_length": 25.2658610272, "ext": "c", "hexsha": "d4f5cb3d6ca676305a6b520fe327d4709459f232", "lang": "C", "max_forks_count": 40, "max_forks_repo_forks_event_max_datetime": "2022-03-03T23:23:37.000Z", "max_forks_repo_forks_event_min_datetime": "2015-02-26T15:31:16.000Z", "max_forks_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "manggoguy/parsec-modified", "max_forks_repo_path": "pkgs/libs/gsl/src/multimin/vector_bfgs2.c", "max_issues_count": 12, "max_issues_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_issues_repo_issues_event_max_datetime": "2022-03-13T03:54:24.000Z", "max_issues_repo_issues_event_min_datetime": "2020-12-15T08:30:19.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "manggoguy/parsec-modified", "max_issues_repo_path": "pkgs/libs/gsl/src/multimin/vector_bfgs2.c", "max_line_length": 75, "max_stars_count": 64, "max_stars_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "manggoguy/parsec-modified", "max_stars_repo_path": "pkgs/libs/gsl/src/multimin/vector_bfgs2.c", "max_stars_repo_stars_event_max_datetime": "2022-03-24T13:26:53.000Z", "max_stars_repo_stars_event_min_datetime": "2015-03-06T00:30:56.000Z", "num_tokens": 2630, "size": 8363 }
/* * Copyright (c) 2016-2021 lymastee, All rights reserved. * Contact: lymastee@hotmail.com * * This file is part of the gslib project. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #ifndef utilities_9904f6c1_3075_4335_b065_c7099f52e80b_h #define utilities_9904f6c1_3075_4335_b065_c7099f52e80b_h #include <ariel/type.h> #include <gslib/std.h> #include <gslib/string.h> #include <gslib/file.h> __ariel_begin__ /* * Text format elements: * [section] { ... } * [section]:[notation] { ... } * * where sections could be nested */ extern bool io_bad_eof(const string& src, int32 curr); extern int32 io_skip_blank_charactors(const string& src, int32 start); extern int32 io_read_section_name(const string& src, string& name, int32 start); extern int32 io_read_notation(const string& src, string& notation, int32 start); extern int32 io_skip_section(const string& src, int32 start); extern int32 io_enter_section(const string& src, int32 start, gchar st = _t('{')); extern int32 io_read_line_of_section(const string& src, string& line, int32 start); /* * Binary format elements: * #[uint32][string]@[uint32] section about length(in bytes) * $[uint32][string] notation string(ASCII only) */ using gs::file; class __gs_novtable io_binary_stream abstract { public: typedef vector<int32> section_stack; enum control_type { ctl_unknown, ctl_section, ctl_notation, ctl_counter, ctl_byte_stream_field, ctl_word_stream_field, ctl_dword_stream_field, ctl_qword_stream_field, }; public: io_binary_stream(int32 size); virtual ~io_binary_stream() {} control_type read_control_type(); bool next_byte_valid() const { return next_n_bytes_valid(1); } bool next_word_valid() const { return next_n_bytes_valid(2); } bool next_dword_valid() const { return next_n_bytes_valid(4); } bool next_qword_valid() const { return next_n_bytes_valid(8); } bool next_n_bytes_valid(int32 bytes) const; void seek_to(int32 bytes); void seek_by(int32 bytes); float read_float(); double read_double(); int32 read_nstring(string& str); int32 read_string(string& str, const string& stopch); void enter_section(int32 size) { _section_stack.push_back(size); } bool exit_section(); bool skip_current_section(); bool skip_next_section(); protected: int32 _size; int32 _current; section_stack _section_stack; public: virtual byte read_byte() = 0; virtual word read_word() = 0; virtual dword read_dword() = 0; virtual qword read_qword() = 0; virtual bool read_field_to_buf(void* ptr, int32 bytes) = 0; virtual void rewind_to(int32 bytes) = 0; virtual int32 current_dev_pos() const = 0; protected: void rewind_by(int32 bytes); void take_next_n_bytes(int32 n); bool section_stack_valid(int32 bytes) const; }; class io_binary_memory: public io_binary_stream { public: io_binary_memory(const void* ptr, int32 size); virtual byte read_byte() override; virtual word read_word() override; virtual dword read_dword() override; virtual qword read_qword() override; virtual bool read_field_to_buf(void* ptr, int32 bytes) override; virtual void rewind_to(int32 bytes) override {} virtual int32 current_dev_pos() const override { return _current; } protected: const byte* _mem; }; class io_binary_file: public io_binary_stream { public: io_binary_file(file& pf); virtual byte read_byte() override; virtual word read_word() override; virtual dword read_dword() override; virtual qword read_qword() override; virtual bool read_field_to_buf(void* ptr, int32 bytes) override; virtual void rewind_to(int32 bytes) override { _file.seek(bytes, SEEK_SET); } virtual int32 current_dev_pos() const override { return _file.current(); } protected: file& _file; }; __ariel_end__ #endif
{ "alphanum_fraction": 0.6946623302, "avg_line_length": 33.5064102564, "ext": "h", "hexsha": "a38a3cfe0ddf5f38e5c4dd8c5432652ee00c9b36", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2016-10-19T15:20:58.000Z", "max_forks_repo_forks_event_min_datetime": "2016-10-19T15:20:58.000Z", "max_forks_repo_head_hexsha": "1b165b7a812526c4b2a3179588df9a7c2ff602a6", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "lymastee/gslib", "max_forks_repo_path": "include/ariel/io/utilities.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "1b165b7a812526c4b2a3179588df9a7c2ff602a6", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "lymastee/gslib", "max_issues_repo_path": "include/ariel/io/utilities.h", "max_line_length": 84, "max_stars_count": 9, "max_stars_repo_head_hexsha": "1b165b7a812526c4b2a3179588df9a7c2ff602a6", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "lymastee/gslib", "max_stars_repo_path": "include/ariel/io/utilities.h", "max_stars_repo_stars_event_max_datetime": "2022-02-11T09:44:51.000Z", "max_stars_repo_stars_event_min_datetime": "2016-10-18T09:40:09.000Z", "num_tokens": 1224, "size": 5227 }
/* * author: Achim Gaedke * created: January 2001 * file: pygsl/src/histogrammodule.c * $Id: histogrammodule.c,v 1.3 2008/10/26 17:03:23 schnizer Exp $ * * * May 2005 * Pierre Schnizer * maintainance: replaced direct python error calls with pygsl calls * added CAST and GET macros to reduce code duplication * * Now the warnings are all handled by a separate function. */ #include <pygsl/error_helpers.h> #include <pygsl/block_helpers.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_histogram.h> #include <gsl/gsl_histogram2d.h> enum hist_error{ NOHIST = 0, NOHIST2D, ARGNOHIST, ARGNOHIST2D, HISTP_NULL }; static const char * filename = __FILE__; static PyObject * module = NULL; #include "histogram_doc.ic" static int PyGSL_hist_error_helper(const char * function, int line, int myerrno, enum hist_error errtype) { char *tmp; switch (errtype){ case NOHIST: tmp = "Object was not a histogramm"; break; case NOHIST2D: tmp = "Object was not a 2D histogramm"; break; case ARGNOHIST: tmp = "Argument was not a histogramm"; break; case ARGNOHIST2D: tmp = "Argument was not a 2D histogramm"; break; case HISTP_NULL: tmp = "Pointer to GSL histogramm(2d) object was NULL!"; break; default: tmp = "Unknown case in function hist_error_helper"; myerrno = GSL_ESANITY; break; } PyGSL_add_traceback(module, filename, function, line); pygsl_error(tmp, filename, line, myerrno); return myerrno; } /* * Check if the recieved object is of approbriate type and that the histogram is defined. * Invokes PyGSL_hist_error_helper if it fails. * returns GSL_SUCCESS on success * * ob ... the object to check * type ... the Python type object * errcode ... the errorcode used to describe the failure type for * PyGSL_hist_error_helper */ #define _PyGSL_HIST_CHECK_INT(ob, type, errcode) \ ((ob->ob_type == &(type)) ? GSL_SUCCESS : \ PyGSL_hist_error_helper(__FUNCTION__, __LINE__, (errcode), GSL_ESANITY)) /* * returns the gsl histogram from the object */ #define _PyGSL_HIST_CAST(ob, type) (( ((type *)(ob)) ->h )) /* * Check if it the gsl_histogram is not NULL. Will give a descriptive error * message if it fails. */ #define _PyGSL_HIST_CAST_SAVE(ob, type) \ (\ (_PyGSL_HIST_CAST((ob), type) == NULL ) \ ? \ (PyGSL_hist_error_helper(__FUNCTION__, __LINE__, GSL_EFAULT, HISTP_NULL), NULL) \ : \ ( _PyGSL_HIST_CAST((ob), type) ) \ ) /* * Try to get the gsl_histogram from the python object. * Checks for errors ... */ #define _PyGSL_HIST_GET_INT(ob, type, cast, errcode) \ ( \ ( _PyGSL_HIST_CHECK_INT((ob), type, (errcode)) == GSL_SUCCESS ) \ ? \ _PyGSL_HIST_CAST_SAVE((ob), cast) \ : \ NULL \ ) #define _PyGSL_HIST_GET(ob, type, errcode) _PyGSL_HIST_GET_INT(ob, type ## Type, type ## Object, errcode) #define _PyGSL_HIST_CHECK(ob, errcode) _PyGSL_HIST_CHECK_INT((ob), (histogram_histogramType), (errcode)) #define _PyGSL_HIST2D_CHECK(ob, errcode) _PyGSL_HIST_CHECK_INT((ob), (histogram_histogram2dType), (errcode)) #define PyGSL_HIST_CHECK(ob) _PyGSL_HIST_CHECK((ob), (NOHIST)) #define PyGSL_HIST2D_CHECK(ob) _PyGSL_HIST2D_CHECK((ob), (NOHIST2D)) #define PyGSL_HIST_ARG_CHECK(ob) _PyGSL_HIST_CHECK((ob), (ARGNOHIST)) #define PyGSL_HIST2D_ARG_CHECK(ob) _PyGSL_HIST2D_CHECK((ob), (ARGNOHIST2D)) #define PyGSL_HIST_CAST(ob) _PyGSL_HIST_CAST((ob), histogram_histogramObject) #define PyGSL_HIST2D_CAST(ob) _PyGSL_HIST_CAST((ob), histogram_histogram2dObject) #define PyGSL_HIST_GET(ob) _PyGSL_HIST_GET((ob), histogram_histogram, (NOHIST)) #define PyGSL_HIST2D_GET(ob) _PyGSL_HIST_GET((ob), histogram_histogram2d, (NOHIST2D)) #define _PyGSL_HIST_GET_ARG(ob) _PyGSL_HIST_GET((ob), histogram_histogram, (ARGNOHIST)) #define _PyGSL_HIST2D_GET_ARG(ob) _PyGSL_HIST_GET((ob), histogram_histogram2d, (ARGNOHIST2D)) #define PyGSL_HIST_GET_ARG(ob) ( ((ob) == NULL) ? NULL : _PyGSL_HIST_GET_ARG((ob)) ) #define PyGSL_HIST2D_GET_ARG(ob) ( ((ob) == NULL) ? NULL : _PyGSL_HIST2D_GET_ARG((ob)) ) /* * Helper function for dealing with warning and errors ... */ static int PyGSL_warn_err(int rcode, int errcode, const char * errdes, const char * file, int line) { int warn_result; if (errcode==rcode) { warn_result = PyGSL_warning(errdes, file, line, errcode); if (warn_result==-1) /* exception is raised by PyErr_Warn */ return GSL_EFAILED; } else if (PyGSL_ERROR_FLAG(rcode) != GSL_SUCCESS) return rcode; return GSL_SUCCESS; } #define PyGSL_WARN_ERR(ob, errcode, errdes) \ (\ ((ob) == (GSL_SUCCESS)) \ ? \ GSL_SUCCESS \ : \ ((ob) == (errcode)) \ ? PyGSL_warn_err(ob, errcode, errdes, filename, __LINE__) \ : PyGSL_error_flag((ob)) \ ) static const char edom_message[] = "value out of histogram range"; #define PyGSL_HIST_EDOM_WARN(ob) PyGSL_WARN_ERR((ob), GSL_EDOM, edom_message) typedef int (*hist_op)(void *, const void *); typedef int (*hist_file)(FILE *, void *); static PyObject * histogram_histogram_op(PyObject *self, PyObject * arg, hist_op fptr); static PyObject * histogram_histogram2d_op(PyObject *self, PyObject * arg, hist_op fptr); static PyObject * histogram_histogram_file(PyObject *self, PyObject * arg, hist_file fptr); static PyObject * histogram_histogram2d_file(PyObject *self, PyObject * arg, hist_file fptr); /* * * histogram type * for 1d histogram data * */ /* my typedef */ staticforward PyTypeObject histogram_histogramType; staticforward PyMethodDef histogram_histogram_methods[]; typedef struct { PyObject_HEAD gsl_histogram* h; } histogram_histogramObject; #define _CONCAT2(class, suffix) class ## _ ## suffix #include "histogram.ic" #define HISTTYPE gsl_histogram #define PyGSLHISTTYPE histogram_histogramType #define PyGSL_HIST_TYPE_GET(ob) PyGSL_HIST_GET((ob)) #define PyGSL_HIST_TYPE_CAST(ob) PyGSL_HIST_CAST((ob)) #define PyGSL_HIST_TYPE_ARG_GET(ob) PyGSL_HIST_GET_ARG((ob)) #define GSLNAME(suffix) _CONCAT2(gsl_histogram, suffix) #define FUNCNAME(suffix) _CONCAT2(histogram_histogram, suffix) #ifdef HISTOGRAM2D #undef HISTOGRAM2D #endif #include "histogram_pdf_common.ic" #include "histogram_common.ic" #undef HISTTYPE #undef PyGSLHISTTYPE #undef PyGSL_HIST_TYPE_GET #undef PyGSL_HIST_TYPE_CAST #undef PyGSL_HIST_TYPE_ARG_GET #undef FUNCNAME #undef GSLNAME static PyTypeObject histogram_histogramType = { PyObject_HEAD_INIT(NULL) 0, "pygsl.histogram.histogram", sizeof(histogram_histogramObject), 0, (destructor)histogram_histogram_dealloc, /* tp_dealloc */ 0, /* tp_print */ histogram_histogram_getattr, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ &histogram_histogram_as_mapping, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ 0, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)histogram_histogram_init, /* tp_init */ NULL, /* tp_alloc */ NULL, /* tp_new */ NULL /* tp_free */ }; /* * * here begins the section for the 2d histogram * */ /* my typedef */ staticforward PyTypeObject histogram_histogram2dType; staticforward PyMethodDef histogram_histogram2d_methods[]; typedef struct { PyObject_HEAD gsl_histogram2d* h; } histogram_histogram2dObject; static PyObject * histogram_histogram2d_reset(PyObject *); #include "histogram2d.ic" #define HISTOGRAM2D 1 #define HISTTYPE gsl_histogram2d #define PyGSLHISTTYPE histogram_histogram2dType #define PyGSL_HIST_TYPE_GET(ob) PyGSL_HIST2D_GET((ob)) #define PyGSL_HIST_TYPE_ARG_GET(ob) PyGSL_HIST2D_GET_ARG((ob)) #define PyGSL_HIST_TYPE_CAST(ob) PyGSL_HIST2D_CAST((ob)) #define GSLNAME(suffix) _CONCAT2(gsl_histogram2d, suffix) #define FUNCNAME(suffix) _CONCAT2(histogram_histogram2d, suffix) #include "histogram_pdf_common.ic" #include "histogram_common.ic" static PyTypeObject histogram_histogram2dType = { PyObject_HEAD_INIT(NULL) 0, "pygsl.histogram.histogram2d", sizeof(histogram_histogram2dObject), 0, (destructor)histogram_histogram2d_dealloc, /* tp_dealloc */ 0, /* tp_print */ histogram_histogram2d_getattr, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ &histogram_histogram2d_as_mapping, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ 0, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)histogram_histogram2d_init, /* tp_init */ NULL, /* tp_alloc */ NULL, /* tp_new */ NULL /* tp_free */ }; #include "histogram_pdf.ic" /* * * module specific stuff * */ static PyMethodDef histogramMethods[] = { {NULL, NULL, 0, NULL} /* Sentinel */ }; void register_type(PyTypeObject *p, char *name) { p->ob_type = &PyType_Type; p->tp_alloc = PyType_GenericAlloc; p->tp_new = PyType_GenericNew; p->tp_free = _PyObject_Del; /* install histogram type */ /* important! must increment histogram type reference counter */ Py_INCREF((PyObject*)p); PyModule_AddObject(module, name, (PyObject*)p); } void inithistogram(void) { PyObject* m; m=Py_InitModule("histogram", histogramMethods); if(!m) return; module = m; init_pygsl(); /* init histogram type */ register_type(&histogram_histogramType, "histogram"); register_type(&histogram_histogram_pdfType, "histogram_pdf"); register_type(&histogram_histogram2dType, "histogram2d"); register_type(&histogram_histogram2d_pdfType, "histogram2d_pdf"); }
{ "alphanum_fraction": 0.6476633009, "avg_line_length": 29.6843501326, "ext": "c", "hexsha": "7fc3c05679ec52d902e8228dce0defc666a735d9", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2018-10-02T06:18:07.000Z", "max_forks_repo_forks_event_min_datetime": "2018-10-02T06:18:07.000Z", "max_forks_repo_head_hexsha": "457e7afb5cab424296dff95e1acf10ebf70d32a9", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "juhnowski/FishingRod", "max_forks_repo_path": "production/pygsl-0.9.5/src/histogram/histogrammodule.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "457e7afb5cab424296dff95e1acf10ebf70d32a9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "juhnowski/FishingRod", "max_issues_repo_path": "production/pygsl-0.9.5/src/histogram/histogrammodule.c", "max_line_length": 109, "max_stars_count": null, "max_stars_repo_head_hexsha": "457e7afb5cab424296dff95e1acf10ebf70d32a9", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "juhnowski/FishingRod", "max_stars_repo_path": "production/pygsl-0.9.5/src/histogram/histogrammodule.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3196, "size": 11191 }
/* linalg/qrpt.c * * Copyright (C) 1996, 1997, 1998, 1999, 2000, 2007 Gerard Jungman, Brian Gough * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <config.h> #include <stdlib.h> #include <string.h> #include <gsl/gsl_math.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_permute_vector.h> #include <gsl/gsl_blas.h> #include <gsl/gsl_linalg.h> #include "apply_givens.c" /* Factorise a general M x N matrix A into * * A P = Q R * * where Q is orthogonal (M x M) and R is upper triangular (M x N). * When A is rank deficient, r = rank(A) < n, then the permutation is * used to ensure that the lower n - r rows of R are zero and the first * r columns of Q form an orthonormal basis for A. * * Q is stored as a packed set of Householder transformations in the * strict lower triangular part of the input matrix. * * R is stored in the diagonal and upper triangle of the input matrix. * * P: column j of P is column k of the identity matrix, where k = * permutation->data[j] * * The full matrix for Q can be obtained as the product * * Q = Q_k .. Q_2 Q_1 * * where k = MIN(M,N) and * * Q_i = (I - tau_i * v_i * v_i') * * and where v_i is a Householder vector * * v_i = [1, m(i+1,i), m(i+2,i), ... , m(M,i)] * * This storage scheme is the same as in LAPACK. See LAPACK's * dgeqpf.f for details. * */ int gsl_linalg_QRPT_decomp (gsl_matrix * A, gsl_vector * tau, gsl_permutation * p, int *signum, gsl_vector * norm) { const size_t M = A->size1; const size_t N = A->size2; if (tau->size != GSL_MIN (M, N)) { GSL_ERROR ("size of tau must be MIN(M,N)", GSL_EBADLEN); } else if (p->size != N) { GSL_ERROR ("permutation size must be N", GSL_EBADLEN); } else if (norm->size != N) { GSL_ERROR ("norm size must be N", GSL_EBADLEN); } else { size_t i; *signum = 1; gsl_permutation_init (p); /* set to identity */ /* Compute column norms and store in workspace */ for (i = 0; i < N; i++) { gsl_vector_view c = gsl_matrix_column (A, i); double x = gsl_blas_dnrm2 (&c.vector); gsl_vector_set (norm, i, x); } for (i = 0; i < GSL_MIN (M, N); i++) { /* Bring the column of largest norm into the pivot position */ double max_norm = gsl_vector_get(norm, i); size_t j, kmax = i; for (j = i + 1; j < N; j++) { double x = gsl_vector_get (norm, j); if (x > max_norm) { max_norm = x; kmax = j; } } if (kmax != i) { gsl_matrix_swap_columns (A, i, kmax); gsl_permutation_swap (p, i, kmax); gsl_vector_swap_elements(norm,i,kmax); (*signum) = -(*signum); } /* Compute the Householder transformation to reduce the j-th column of the matrix to a multiple of the j-th unit vector */ { gsl_vector_view c_full = gsl_matrix_column (A, i); gsl_vector_view c = gsl_vector_subvector (&c_full.vector, i, M - i); double tau_i = gsl_linalg_householder_transform (&c.vector); gsl_vector_set (tau, i, tau_i); /* Apply the transformation to the remaining columns */ if (i + 1 < N) { gsl_matrix_view m = gsl_matrix_submatrix (A, i, i + 1, M - i, N - (i+1)); gsl_linalg_householder_hm (tau_i, &c.vector, &m.matrix); } } /* Update the norms of the remaining columns too */ if (i + 1 < M) { for (j = i + 1; j < N; j++) { double x = gsl_vector_get (norm, j); if (x > 0.0) { double y = 0; double temp= gsl_matrix_get (A, i, j) / x; if (fabs (temp) >= 1) y = 0.0; else y = x * sqrt (1 - temp * temp); /* recompute norm to prevent loss of accuracy */ if (fabs (y / x) < sqrt (20.0) * GSL_SQRT_DBL_EPSILON) { gsl_vector_view c_full = gsl_matrix_column (A, j); gsl_vector_view c = gsl_vector_subvector(&c_full.vector, i+1, M - (i+1)); y = gsl_blas_dnrm2 (&c.vector); } gsl_vector_set (norm, j, y); } } } } return GSL_SUCCESS; } } int gsl_linalg_QRPT_decomp2 (const gsl_matrix * A, gsl_matrix * q, gsl_matrix * r, gsl_vector * tau, gsl_permutation * p, int *signum, gsl_vector * norm) { const size_t M = A->size1; const size_t N = A->size2; if (q->size1 != M || q->size2 !=M) { GSL_ERROR ("q must be M x M", GSL_EBADLEN); } else if (r->size1 != M || r->size2 !=N) { GSL_ERROR ("r must be M x N", GSL_EBADLEN); } else if (tau->size != GSL_MIN (M, N)) { GSL_ERROR ("size of tau must be MIN(M,N)", GSL_EBADLEN); } else if (p->size != N) { GSL_ERROR ("permutation size must be N", GSL_EBADLEN); } else if (norm->size != N) { GSL_ERROR ("norm size must be N", GSL_EBADLEN); } gsl_matrix_memcpy (r, A); gsl_linalg_QRPT_decomp (r, tau, p, signum, norm); /* FIXME: aliased arguments depends on behavior of unpack routine! */ gsl_linalg_QR_unpack (r, tau, q, r); return GSL_SUCCESS; } /* Solves the system A x = b using the Q R P^T factorisation, R z = Q^T b x = P z; to obtain x. Based on SLATEC code. */ int gsl_linalg_QRPT_solve (const gsl_matrix * QR, const gsl_vector * tau, const gsl_permutation * p, const gsl_vector * b, gsl_vector * x) { if (QR->size1 != QR->size2) { GSL_ERROR ("QR matrix must be square", GSL_ENOTSQR); } else if (QR->size1 != p->size) { GSL_ERROR ("matrix size must match permutation size", GSL_EBADLEN); } else if (QR->size1 != b->size) { GSL_ERROR ("matrix size must match b size", GSL_EBADLEN); } else if (QR->size2 != x->size) { GSL_ERROR ("matrix size must match solution size", GSL_EBADLEN); } else { gsl_vector_memcpy (x, b); gsl_linalg_QRPT_svx (QR, tau, p, x); return GSL_SUCCESS; } } int gsl_linalg_QRPT_svx (const gsl_matrix * QR, const gsl_vector * tau, const gsl_permutation * p, gsl_vector * x) { if (QR->size1 != QR->size2) { GSL_ERROR ("QR matrix must be square", GSL_ENOTSQR); } else if (QR->size1 != p->size) { GSL_ERROR ("matrix size must match permutation size", GSL_EBADLEN); } else if (QR->size2 != x->size) { GSL_ERROR ("matrix size must match solution size", GSL_EBADLEN); } else { /* compute sol = Q^T b */ gsl_linalg_QR_QTvec (QR, tau, x); /* Solve R x = sol, storing x inplace in sol */ gsl_blas_dtrsv (CblasUpper, CblasNoTrans, CblasNonUnit, QR, x); gsl_permute_vector_inverse (p, x); return GSL_SUCCESS; } } /* Find the least squares solution to the overdetermined system * * A x = b * * for M >= N using the QRPT factorization A P = Q R. Assumes * A has full column rank. */ int gsl_linalg_QRPT_lssolve (const gsl_matrix * QR, const gsl_vector * tau, const gsl_permutation * p, const gsl_vector * b, gsl_vector * x, gsl_vector * residual) { const size_t N = QR->size2; int status = gsl_linalg_QRPT_lssolve2(QR, tau, p, b, N, x, residual); return status; } /* Find the least squares solution to the overdetermined system * * A x = b * * for M >= N using the QRPT factorization A P = Q R, where A * is assumed rank deficient with a given rank. */ int gsl_linalg_QRPT_lssolve2 (const gsl_matrix * QR, const gsl_vector * tau, const gsl_permutation * p, const gsl_vector * b, const size_t rank, gsl_vector * x, gsl_vector * residual) { const size_t M = QR->size1; const size_t N = QR->size2; if (M < N) { GSL_ERROR ("QR matrix must have M>=N", GSL_EBADLEN); } else if (M != b->size) { GSL_ERROR ("matrix size must match b size", GSL_EBADLEN); } else if (rank == 0 || rank > N) { GSL_ERROR ("rank must have 0 < rank <= N", GSL_EBADLEN); } else if (N != x->size) { GSL_ERROR ("matrix size must match solution size", GSL_EBADLEN); } else if (M != residual->size) { GSL_ERROR ("matrix size must match residual size", GSL_EBADLEN); } else { gsl_matrix_const_view R11 = gsl_matrix_const_submatrix (QR, 0, 0, rank, rank); gsl_vector_view QTb1 = gsl_vector_subvector(residual, 0, rank); gsl_vector_view x1 = gsl_vector_subvector(x, 0, rank); size_t i; /* compute work = Q^T b */ gsl_vector_memcpy(residual, b); gsl_linalg_QR_QTvec (QR, tau, residual); /* solve R_{11} x(1:r) = [Q^T b](1:r) */ gsl_vector_memcpy(&(x1.vector), &(QTb1.vector)); gsl_blas_dtrsv (CblasUpper, CblasNoTrans, CblasNonUnit, &(R11.matrix), &(x1.vector)); /* x(r+1:N) = 0 */ for (i = rank; i < N; ++i) gsl_vector_set(x, i, 0.0); /* compute x = P y */ gsl_permute_vector_inverse (p, x); /* compute residual = b - A x = Q (Q^T b - R x) */ gsl_vector_set_zero(&(QTb1.vector)); gsl_linalg_QR_Qvec(QR, tau, residual); return GSL_SUCCESS; } } int gsl_linalg_QRPT_QRsolve (const gsl_matrix * Q, const gsl_matrix * R, const gsl_permutation * p, const gsl_vector * b, gsl_vector * x) { if (Q->size1 != Q->size2 || R->size1 != R->size2) { return GSL_ENOTSQR; } else if (Q->size1 != p->size || Q->size1 != R->size1 || Q->size1 != b->size) { return GSL_EBADLEN; } else { /* compute b' = Q^T b */ gsl_blas_dgemv (CblasTrans, 1.0, Q, b, 0.0, x); /* Solve R x = b', storing x inplace */ gsl_blas_dtrsv (CblasUpper, CblasNoTrans, CblasNonUnit, R, x); /* Apply permutation to solution in place */ gsl_permute_vector_inverse (p, x); return GSL_SUCCESS; } } int gsl_linalg_QRPT_Rsolve (const gsl_matrix * QR, const gsl_permutation * p, const gsl_vector * b, gsl_vector * x) { if (QR->size1 != QR->size2) { GSL_ERROR ("QR matrix must be square", GSL_ENOTSQR); } else if (QR->size1 != b->size) { GSL_ERROR ("matrix size must match b size", GSL_EBADLEN); } else if (QR->size2 != x->size) { GSL_ERROR ("matrix size must match x size", GSL_EBADLEN); } else if (p->size != x->size) { GSL_ERROR ("permutation size must match x size", GSL_EBADLEN); } else { /* Copy x <- b */ gsl_vector_memcpy (x, b); /* Solve R x = b, storing x inplace */ gsl_blas_dtrsv (CblasUpper, CblasNoTrans, CblasNonUnit, QR, x); gsl_permute_vector_inverse (p, x); return GSL_SUCCESS; } } int gsl_linalg_QRPT_Rsvx (const gsl_matrix * QR, const gsl_permutation * p, gsl_vector * x) { if (QR->size1 != QR->size2) { GSL_ERROR ("QR matrix must be square", GSL_ENOTSQR); } else if (QR->size2 != x->size) { GSL_ERROR ("matrix size must match x size", GSL_EBADLEN); } else if (p->size != x->size) { GSL_ERROR ("permutation size must match x size", GSL_EBADLEN); } else { /* Solve R x = b, storing x inplace */ gsl_blas_dtrsv (CblasUpper, CblasNoTrans, CblasNonUnit, QR, x); gsl_permute_vector_inverse (p, x); return GSL_SUCCESS; } } /* Update a Q R P^T factorisation for A P= Q R , A' = A + u v^T, Q' R' P^-1 = QR P^-1 + u v^T = Q (R + Q^T u v^T P ) P^-1 = Q (R + w v^T P) P^-1 where w = Q^T u. Algorithm from Golub and Van Loan, "Matrix Computations", Section 12.5 (Updating Matrix Factorizations, Rank-One Changes) */ int gsl_linalg_QRPT_update (gsl_matrix * Q, gsl_matrix * R, const gsl_permutation * p, gsl_vector * w, const gsl_vector * v) { const size_t M = R->size1; const size_t N = R->size2; if (Q->size1 != M || Q->size2 != M) { GSL_ERROR ("Q matrix must be M x M if R is M x N", GSL_ENOTSQR); } else if (w->size != M) { GSL_ERROR ("w must be length M if R is M x N", GSL_EBADLEN); } else if (v->size != N) { GSL_ERROR ("v must be length N if R is M x N", GSL_EBADLEN); } else { size_t j, k; double w0; /* Apply Given's rotations to reduce w to (|w|, 0, 0, ... , 0) J_1^T .... J_(n-1)^T w = +/- |w| e_1 simultaneously applied to R, H = J_1^T ... J^T_(n-1) R so that H is upper Hessenberg. (12.5.2) */ for (k = M - 1; k > 0; k--) { double c, s; double wk = gsl_vector_get (w, k); double wkm1 = gsl_vector_get (w, k - 1); gsl_linalg_givens (wkm1, wk, &c, &s); gsl_linalg_givens_gv (w, k - 1, k, c, s); apply_givens_qr (M, N, Q, R, k - 1, k, c, s); } w0 = gsl_vector_get (w, 0); /* Add in w v^T (Equation 12.5.3) */ for (j = 0; j < N; j++) { double r0j = gsl_matrix_get (R, 0, j); size_t p_j = gsl_permutation_get (p, j); double vj = gsl_vector_get (v, p_j); gsl_matrix_set (R, 0, j, r0j + w0 * vj); } /* Apply Givens transformations R' = G_(n-1)^T ... G_1^T H Equation 12.5.4 */ for (k = 1; k < GSL_MIN(M,N+1); k++) { double c, s; double diag = gsl_matrix_get (R, k - 1, k - 1); double offdiag = gsl_matrix_get (R, k, k - 1); gsl_linalg_givens (diag, offdiag, &c, &s); apply_givens_qr (M, N, Q, R, k - 1, k, c, s); gsl_matrix_set (R, k, k - 1, 0.0); /* exact zero of G^T */ } return GSL_SUCCESS; } } /* gsl_linalg_QRPT_rank() Estimate rank of triangular matrix from QRPT decomposition Inputs: QR - QRPT decomposed matrix tol - tolerance for rank determination; if < 0, a default value is used: 20 * (M + N) * eps(max(|diag(R)|)) Return: rank estimate */ size_t gsl_linalg_QRPT_rank (const gsl_matrix * QR, const double tol) { const size_t M = QR->size1; const size_t N = QR->size2; gsl_vector_const_view diag = gsl_matrix_const_diagonal(QR); double eps; size_t i; size_t r = 0; if (tol < 0.0) { double min, max, absmax; int ee; gsl_vector_minmax(&diag.vector, &min, &max); absmax = GSL_MAX(fabs(min), fabs(max)); ee = (int) (log(absmax) / M_LN2); /* can't use log2 since its not ANSI */ eps = 20.0 * (M + N) * pow(2.0, (double) ee) * GSL_DBL_EPSILON; } else eps = tol; /* count number of diagonal elements with |di| > eps */ for (i = 0; i < GSL_MIN(M, N); ++i) { double di = gsl_vector_get(&diag.vector, i); if (fabs(di) > eps) ++r; } return r; } int gsl_linalg_QRPT_rcond(const gsl_matrix * QR, double * rcond, gsl_vector * work) { const size_t M = QR->size1; const size_t N = QR->size2; if (M < N) { GSL_ERROR ("M must be >= N", GSL_EBADLEN); } else if (work->size != 3 * N) { GSL_ERROR ("work vector must have length 3*N", GSL_EBADLEN); } else { gsl_matrix_const_view R = gsl_matrix_const_submatrix (QR, 0, 0, N, N); int status; status = gsl_linalg_tri_upper_rcond(&R.matrix, rcond, work); return status; } }
{ "alphanum_fraction": 0.5443609905, "avg_line_length": 26.4627329193, "ext": "c", "hexsha": "df8b062a4975dc05e88a275a64707cdb1fc8af48", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1b4ee4c146f526ea6e2f4f8607df7e9687204a9e", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "Brian-ning/HMNE", "max_forks_repo_path": "Source/BaselineMethods/MNE/C++/gsl-2.4/linalg/qrpt.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "1b4ee4c146f526ea6e2f4f8607df7e9687204a9e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "Brian-ning/HMNE", "max_issues_repo_path": "Source/BaselineMethods/MNE/C++/gsl-2.4/linalg/qrpt.c", "max_line_length": 149, "max_stars_count": 1, "max_stars_repo_head_hexsha": "857b6ee8866a2950aa5721d575d2d7d0797c4302", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "peterahrens/FillEstimationIPDPS2017", "max_stars_repo_path": "gsl-2.4/linalg/qrpt.c", "max_stars_repo_stars_event_max_datetime": "2021-01-13T05:01:59.000Z", "max_stars_repo_stars_event_min_datetime": "2021-01-13T05:01:59.000Z", "num_tokens": 4997, "size": 17042 }
/*! \file allvars.h * \brief declares global variables. * * This file declares all global variables. Further variables should be added here, and declared as * 'extern'. The actual existence of these variables is provided by the file 'allvars.c'. To produce * 'allvars.c' from 'allvars.h', do the following: * * - Erase all #define statements * - add #include "allvars.h" * - delete all keywords 'extern' * - delete all struct definitions enclosed in {...}, e.g. * "extern struct global_data_all_processes {....} All;" * becomes "struct global_data_all_processes All;" */ #ifndef ALLVARS_H #define ALLVARS_H #include <mpi.h> #include <stdio.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_math.h> #include <gsl/gsl_integration.h> #include <gsl/gsl_spline.h> #include <gsl/gsl_errno.h> #ifdef MPISENDRECV_CHECKSUM #define MPI_Sendrecv MPI_Check_Sendrecv #endif #ifdef MPISENDRECV_SIZELIMIT #define MPI_Sendrecv MPI_Sizelimited_Sendrecv #endif #include "tags.h" #ifdef CHEMISTRY #include "chemistry.h" #endif #include "assert.h" #define GADGETVERSION "3.0" /*!< code version string */ #define GENERATIONS 2 /*!< Number of star particles that may be created per gas particle */ #define TIMEBINS 29 #define TIMEBASE (1<<TIMEBINS) /*!< The simulated timespan is mapped onto the integer interval [0,TIMESPAN], * where TIMESPAN needs to be a power of 2. Note that (1<<28) corresponds * to 2^29 */ #ifndef MULTIPLEDOMAINS #define MULTIPLEDOMAINS 1 #endif #ifndef TOPNODEFACTOR #define TOPNODEFACTOR 2.5 #endif #define NODELISTLENGTH 8 typedef unsigned long long peanokey; #define BITS_PER_DIMENSION 21 /* for Peano-Hilbert order. Note: Maximum is 10 to fit in 32-bit integer ! */ #define PEANOCELLS (((peanokey)1)<<(3*BITS_PER_DIMENSION)) #ifndef GAMMA #define GAMMA (5.0/3) /*!< adiabatic index of simulated gas */ #endif #define GAMMA_MINUS1 (GAMMA-1) #define HYDROGEN_MASSFRAC 0.76 /*!< mass fraction of hydrogen, relevant only for radiative cooling */ #define METAL_YIELD 0.02 /*!< effective metal yield for star formation */ #define MAX_REAL_NUMBER 1e37 #define MIN_REAL_NUMBER 1e-37 #define RNDTABLE 8192 /* ... often used physical constants (cgs units) */ #define GRAVITY 6.672e-8 #define SOLAR_MASS 1.989e33 #define SOLAR_LUM 3.826e33 #define RAD_CONST 7.565e-15 #define AVOGADRO 6.0222e23 #define BOLTZMANN 1.3806e-16 #define GAS_CONST 8.31425e7 #define C 2.9979e10 #define PLANCK 6.6262e-27 #define CM_PER_MPC 3.085678e24 #define PROTONMASS 1.6726e-24 #define ELECTRONMASS 9.10953e-28 #define THOMPSON 6.65245e-25 #define ELECTRONCHARGE 4.8032e-10 #define HUBBLE 3.2407789e-18 /* in h/sec */ #define LYMAN_ALPHA 1215.6e-8 /* 1215.6 Angstroem */ #define LYMAN_ALPHA_HeII 303.8e-8 /* 303.8 Angstroem */ #define OSCILLATOR_STRENGTH 0.41615 #define OSCILLATOR_STRENGTH_HeII 0.41615 #ifdef NAVIERSTOKES #define LOG_LAMBDA 37.8 /* logarithmic Coulomb factor */ #endif #ifdef CHEMISTRY #define T_CMB0 2.728 /* present-day CMB temperature */ #endif #define SEC_PER_MEGAYEAR 3.155e13 #define SEC_PER_YEAR 3.155e7 /*Determines the maximum size of arrays related to the number of CR populations */ #ifndef NUMCRPOP /*!< Number of CR populations pressent in parameter file */ #define NUMCRPOP 1 #endif #ifndef FOF_PRIMARY_LINK_TYPES #define FOF_PRIMARY_LINK_TYPES 2 #endif #ifndef FOF_SECONDARY_LINK_TYPES #define FOF_SECONDARY_LINK_TYPES 0 #endif /* some flags for the field "flag_ic_info" in the file header */ #define FLAG_ZELDOVICH_ICS 1 #define FLAG_SECOND_ORDER_ICS 2 #define FLAG_EVOLVED_ZELDOVICH 3 #define FLAG_EVOLVED_2LPT 4 #define FLAG_NORMALICS_2LPT 5 #ifndef ASMTH /*! ASMTH gives the scale of the short-range/long-range force split in units of FFT-mesh cells */ #define ASMTH 1.25 #endif #ifndef RCUT /*! RCUT gives the maximum distance (in units of the scale used for the force split) out to which short-range * forces are evaluated in the short-range tree walk. */ #define RCUT 4.5 #endif #define COND_TIMESTEP_PARAMETER 0.25 #define VISC_TIMESTEP_PARAMETER 0.25 #define MAXLEN_OUTPUTLIST 350 /*!< maxmimum number of entries in output list */ #define DRIFT_TABLE_LENGTH 1000 /*!< length of the lookup table used to hold the drift and kick factors */ #define MAXITER 150 #ifndef LINKLENGTH #define LINKLENGTH 0.2 #endif #ifndef FOF_GROUP_MIN_LEN #define FOF_GROUP_MIN_LEN 32 #endif #define MINRESTFAC 0.05 #ifndef LONGIDS typedef unsigned int MyIDType; #else typedef unsigned long long MyIDType; #endif #ifndef DOUBLEPRECISION /* default is single-precision */ typedef float MyFloat; typedef float MyDouble; #else #if (DOUBLEPRECISION == 2) /* mixed precision */ typedef float MyFloat; typedef double MyDouble; #else /* everything double-precision */ typedef double MyFloat; typedef double MyDouble; #endif #endif #ifdef OUTPUT_IN_DOUBLEPRECISION typedef double MyOutputFloat; #else typedef float MyOutputFloat; #endif #ifdef INPUT_IN_DOUBLEPRECISION typedef double MyInputFloat; #else typedef float MyInputFloat; #endif struct unbind_data { int index; }; #ifdef FIX_PATHSCALE_MPI_STATUS_IGNORE_BUG extern MPI_Status mpistat; #undef MPI_STATUS_IGNORE #define MPI_STATUS_IGNORE &mpistat #endif #ifdef FLTROUNDOFFREDUCTION #define FLT(x) ((MyFloat)(x)) #ifdef SOFTDOUBLEDOUBLE /* this requires a C++ compilation */ #include "dd.h" typedef dd MyLongDouble; #else typedef long double MyLongDouble; #endif #else /* not enabled */ #define FLT(x) (x) typedef MyFloat MyLongDouble; #endif /* end FLTROUNDOFFREDUCTION */ #define CPU_ALL 0 #define CPU_TREEWALK1 1 #define CPU_TREEWALK2 2 #define CPU_TREEWAIT1 3 #define CPU_TREEWAIT2 4 #define CPU_TREESEND 5 #define CPU_TREERECV 6 #define CPU_TREEMISC 7 #define CPU_TREEBUILD 8 #define CPU_TREEUPDATE 9 #define CPU_TREEHMAXUPDATE 10 #define CPU_DOMAIN 11 #define CPU_DENSCOMPUTE 12 #define CPU_DENSWAIT 13 #define CPU_DENSCOMM 14 #define CPU_DENSMISC 15 #define CPU_HYDCOMPUTE 16 #define CPU_HYDWAIT 17 #define CPU_HYDCOMM 18 #define CPU_HYDMISC 19 #define CPU_DRIFT 20 #define CPU_TIMELINE 21 #define CPU_POTENTIAL 22 #define CPU_MESH 23 #define CPU_PEANO 24 #define CPU_COOLINGSFR 25 #define CPU_SNAPSHOT 26 #define CPU_FOF 27 #define CPU_BLACKHOLES 28 #define CPU_MISC 29 #define CPU_SMTHCOMPUTE 30 #define CPU_SMTHWAIT 31 #define CPU_SMTHCOMM 32 #define CPU_SMTHMISC 33 #define CPU_HOTNGBS 34 #define CPU_WEIGHTS_HOT 35 #define CPU_ENRICH_HOT 36 #define CPU_WEIGHTS_COLD 37 #define CPU_ENRICH_COLD 38 #define CPU_CSMISC 39 #define CPU_PARTS 40 /* this gives the number of parts above (must be last) */ #define CPU_STRING_LEN 120 #ifndef TWODIMS #define NUMDIMS 3 /*!< For 3D-normalized kernel */ #define KERNEL_COEFF_1 2.546479089470 /*!< Coefficients for SPH spline kernel and its derivative */ #define KERNEL_COEFF_2 15.278874536822 #define KERNEL_COEFF_3 45.836623610466 #define KERNEL_COEFF_4 30.557749073644 #define KERNEL_COEFF_5 5.092958178941 #define KERNEL_COEFF_6 (-15.278874536822) #define NORM_COEFF 4.188790204786 /*!< Coefficient for kernel normalization. Note: 4.0/3 * PI = 4.188790204786 */ #else #define NUMDIMS 2 /*!< For 2D-normalized kernel */ #define KERNEL_COEFF_1 (5.0/7*2.546479089470) /*!< Coefficients for SPH spline kernel and its derivative */ #define KERNEL_COEFF_2 (5.0/7*15.278874536822) #define KERNEL_COEFF_3 (5.0/7*45.836623610466) #define KERNEL_COEFF_4 (5.0/7*30.557749073644) #define KERNEL_COEFF_5 (5.0/7*5.092958178941) #define KERNEL_COEFF_6 (5.0/7*(-15.278874536822)) #define NORM_COEFF M_PI /*!< Coefficient for kernel normalization. */ #endif #if defined (BLACK_HOLES) || defined(CS_MODEL) || defined(RADTRANSFER) || defined(SNIA_HEATING) #define PPP P #else #define PPP SphP #endif #define DMAX(a,b) (dmax1=(a),dmax2=(b),(dmax1>dmax2)?dmax1:dmax2) #define DMIN(a,b) (dmin1=(a),dmin2=(b),(dmin1<dmin2)?dmin1:dmin2) #define IMAX(a,b) (imax1=(a),imax2=(b),(imax1>imax2)?imax1:imax2) #define IMIN(a,b) (imin1=(a),imin2=(b),(imin1<imin2)?imin1:imin2) #ifdef PERIODIC extern MyDouble boxSize, boxHalf; #ifdef LONG_X extern MyDouble boxSize_X, boxHalf_X; #else #define boxSize_X boxSize #define boxHalf_X boxHalf #endif #ifdef LONG_Y extern MyDouble boxSize_Y, boxHalf_Y; #else #define boxSize_Y boxSize #define boxHalf_Y boxHalf #endif #ifdef LONG_Z extern MyDouble boxSize_Z, boxHalf_Z; #else #define boxSize_Z boxSize #define boxHalf_Z boxHalf #endif #endif #ifdef PERIODIC #define NGB_PERIODIC_LONG_X(x) (xtmp=fabs(x),(xtmp>boxHalf_X)?(boxSize_X-xtmp):xtmp) #define NGB_PERIODIC_LONG_Y(x) (xtmp=fabs(x),(xtmp>boxHalf_Y)?(boxSize_Y-xtmp):xtmp) #define NGB_PERIODIC_LONG_Z(x) (xtmp=fabs(x),(xtmp>boxHalf_Z)?(boxSize_Z-xtmp):xtmp) #else #define NGB_PERIODIC_LONG_X(x) fabs(x) #define NGB_PERIODIC_LONG_Y(x) fabs(x) #define NGB_PERIODIC_LONG_Z(x) fabs(x) #endif #define FACT1 0.366025403785 /* FACT1 = 0.5 * (sqrt(3)-1) */ /*********************************************************/ /* Global variables */ /*********************************************************/ extern int FirstActiveParticle; extern int *NextActiveParticle; extern unsigned char *ProcessedFlag; extern int TimeBinCount[TIMEBINS]; extern int TimeBinCountSph[TIMEBINS]; extern int TimeBinActive[TIMEBINS]; extern int FirstInTimeBin[TIMEBINS]; extern int LastInTimeBin[TIMEBINS]; extern int *NextInTimeBin; extern int *PrevInTimeBin; #ifdef SFR extern double TimeBinSfr[TIMEBINS]; #endif #ifdef BLACK_HOLES extern double TimeBin_BH_mass[TIMEBINS]; extern double TimeBin_BH_dynamicalmass[TIMEBINS]; extern double TimeBin_BH_Mdot[TIMEBINS]; extern double TimeBin_BH_Medd[TIMEBINS]; #endif extern int ThisTask; /*!< the number of the local processor */ extern int NTask; /*!< number of processors */ extern int PTask; /*!< note: NTask = 2^PTask */ #ifdef INVARIANCETEST extern int World_ThisTask; extern int World_NTask; extern int Color; extern MPI_Comm MPI_CommLocal; #ifndef DO_NOT_REDEFINE_MPI_COMM_WORLD #undef MPI_COMM_WORLD #define MPI_COMM_WORLD MPI_CommLocal #endif #endif extern double CPUThisRun; /*!< Sums CPU time of current process */ extern int NumForceUpdate; /*!< number of active particles on local processor in current timestep */ extern long long GlobNumForceUpdate; extern int NumSphUpdate; /*!< number of active SPH particles on local processor in current timestep */ extern int MaxTopNodes; /*!< Maximum number of nodes in the top-level tree used for domain decomposition */ extern int RestartFlag; /*!< taken from command line used to start code. 0 is normal start-up from initial conditions, 1 is resuming a run from a set of restart files, while 2 marks a restart from a snapshot file. */ extern int RestartSnapNum; extern int *Exportflag; /*!< Buffer used for flagging whether a particle needs to be exported to another process */ extern int *Exportnodecount; extern int *Exportindex; extern int *Send_offset, *Send_count, *Recv_count, *Recv_offset, *Sendcount_matrix; extern size_t AllocatedBytes; extern size_t FreeBytes; extern double CPU_Step[CPU_PARTS]; extern char CPU_Symbol[CPU_PARTS]; extern char CPU_SymbolImbalance[CPU_PARTS]; extern char CPU_String[CPU_STRING_LEN + 1]; extern double WallclockTime; /*!< This holds the last wallclock time measurement for timings measurements */ extern int Flag_FullStep; /*!< Flag used to signal that the current step involves all particles */ extern int TreeReconstructFlag; extern int GlobFlag; extern char DumpFlag; extern int NumPart; /*!< number of particles on the LOCAL processor */ extern int N_gas; /*!< number of gas particles on the LOCAL processor */ extern long long Ntype[6]; /*!< total number of particles of each type */ extern int NtypeLocal[6]; /*!< local number of particles of each type */ extern gsl_rng *random_generator; /*!< the random number generator used */ #ifdef SFR extern int Stars_converted; /*!< current number of star particles in gas particle block */ #endif extern double TimeOfLastTreeConstruction; /*!< holds what it says */ extern int *Ngblist; /*!< Buffer to hold indices of neighbours retrieved by the neighbour search routines */ extern double *R2ngblist; extern double DomainCorner[3], DomainCenter[3], DomainLen, DomainFac; extern int *DomainStartList, *DomainEndList; extern double *DomainWork; extern int *DomainCount; extern int *DomainCountSph; extern int *DomainTask; extern int *DomainNodeIndex; extern int *DomainList, DomainNumChanged; extern peanokey *Key, *KeySorted; extern struct topnode_data { peanokey Size; peanokey StartKey; long long Count; MyFloat GravCost; int Daughter; int Pstart; int Blocks; int Leaf; } *TopNodes; extern int NTopnodes, NTopleaves; extern double RndTable[RNDTABLE]; #ifdef SUBFIND extern int GrNr; extern int NumPartGroup; #endif /* variables for input/output , usually only used on process 0 */ extern char ParameterFile[100]; /*!< file name of parameterfile used for starting the simulation */ extern FILE *FdInfo, /*!< file handle for info.txt log-file. */ *FdEnergy, /*!< file handle for energy.txt log-file. */ *FdTimings, /*!< file handle for timings.txt log-file. */ *FdBalance, /*!< file handle for balance.txt log-file. */ *FdCPU; /*!< file handle for cpu.txt log-file. */ #ifdef SFR extern FILE *FdSfr; /*!< file handle for sfr.txt log-file. */ #endif #ifdef RADTRANSFER extern FILE *FdEddington; /*!< file handle for eddington.txt log-file. */ extern FILE *FdRadtransfer; /*!< file handle for radtransfer.txt log-file. */ #endif #ifdef DISTORTIONTENSORPS #ifdef PMGRID extern FILE *FdTidaltensor; /*!< file handle for tidaltensor.txt log-file. */ #endif extern FILE *FdCaustics; /*!< file handle for Caustics.txt log-file. */ #endif #ifdef BLACK_HOLES extern FILE *FdBlackHoles; /*!< file handle for blackholes.txt log-file. */ extern FILE *FdBlackHolesDetails; #endif #ifdef FORCETEST extern FILE *FdForceTest; /*!< file handle for forcetest.txt log-file. */ #endif #ifdef DARKENERGY extern FILE *FdDE; /*!< file handle for darkenergy.txt log-file. */ #endif #ifdef XXLINFO extern FILE *FdXXL; /*!< file handle for xxl.txt log-file. */ #ifdef MAGNETIC extern double MeanB; #ifdef TRACEDIVB extern double MaxDivB; #endif #endif #ifdef TIME_DEP_ART_VISC extern double MeanAlpha; #endif #endif /*! table for the cosmological drift factors */ extern double DriftTable[DRIFT_TABLE_LENGTH]; /*! table for the cosmological kick factor for gravitational forces */ extern double GravKickTable[DRIFT_TABLE_LENGTH]; /*! table for the cosmological kick factor for hydrodynmical forces */ extern double HydroKickTable[DRIFT_TABLE_LENGTH]; extern void *CommBuffer; /*!< points to communication buffer, which is used at a few places */ /*! This structure contains data which is the SAME for all tasks (mostly code parameters read from the * parameter file). Holding this data in a structure is convenient for writing/reading the restart file, and * it allows the introduction of new global variables in a simple way. The only thing to do is to introduce * them into this structure. */ extern struct global_data_all_processes { long long TotNumPart; /*!< total particle numbers (global value) */ long long TotN_gas; /*!< total gas particle number (global value) */ #ifdef BLACK_HOLES int TotBHs; #endif int MaxPart; /*!< This gives the maxmimum number of particles that can be stored on one processor. */ int MaxPartSph; /*!< This gives the maxmimum number of SPH particles that can be stored on one processor. */ int ICFormat; /*!< selects different versions of IC file-format */ int SnapFormat; /*!< selects different versions of snapshot file-formats */ int DoDynamicUpdate; int NumFilesPerSnapshot; /*!< number of files in multi-file snapshot dumps */ int NumFilesWrittenInParallel; /*!< maximum number of files that may be written simultaneously when writing/reading restart-files, or when writing snapshot files */ int BufferSize; /*!< size of communication buffer in MB */ int BunchSize; /*!< number of particles fitting into the buffer in the parallel tree algorithm */ double PartAllocFactor; /*!< in order to maintain work-load balance, the particle load will usually NOT be balanced. Each processor allocates memory for PartAllocFactor times the average number of particles to allow for that */ double TreeAllocFactor; /*!< Each processor allocates a number of nodes which is TreeAllocFactor times the maximum(!) number of particles. Note: A typical local tree for N particles needs usually about ~0.65*N nodes. */ double TopNodeAllocFactor; /*!< Each processor allocates a number of nodes which is TreeAllocFactor times the maximum(!) number of particles. Note: A typical local tree for N particles needs usually about ~0.65*N nodes. */ #ifdef SCALARFIELD double ScalarBeta; double ScalarScreeningLength; #endif /* some SPH parameters */ int DesNumNgb; /*!< Desired number of SPH neighbours */ #ifdef SUBFIND int DesLinkNgb; double ErrTolThetaSubfind; #endif double MaxNumNgbDeviation; /*!< Maximum allowed deviation neighbour number */ #ifdef START_WITH_EXTRA_NGBDEV double MaxNumNgbDeviationStart; /*!< Maximum allowed deviation neighbour number to start with*/ #endif double ArtBulkViscConst; /*!< Sets the parameter \f$\alpha\f$ of the artificial viscosity */ double InitGasTemp; /*!< may be used to set the temperature in the IC's */ double InitGasU; /*!< the same, but converted to thermal energy per unit mass */ double MinGasTemp; /*!< may be used to set a floor for the gas temperature */ double MinEgySpec; /*!< the minimum allowed temperature expressed as energy per unit mass */ /* some force counters */ long long TotNumOfForces; /*!< counts total number of force computations */ long long NumForcesSinceLastDomainDecomp; /*!< count particle updates since last domain decomposition */ /* some variable for dynamic work-load adjustment based on CPU measurements */ double Cadj_Cost; double Cadj_Cpu; /* system of units */ double UnitTime_in_s, /*!< factor to convert internal time unit to seconds/h */ UnitMass_in_g, /*!< factor to convert internal mass unit to grams/h */ UnitVelocity_in_cm_per_s, /*!< factor to convert intqernal velocity unit to cm/sec */ UnitLength_in_cm, /*!< factor to convert internal length unit to cm/h */ UnitPressure_in_cgs, /*!< factor to convert internal pressure unit to cgs units (little 'h' still around!) */ UnitDensity_in_cgs, /*!< factor to convert internal length unit to g/cm^3*h^2 */ UnitCoolingRate_in_cgs, /*!< factor to convert internal cooling rate to cgs units */ UnitEnergy_in_cgs, /*!< factor to convert internal energy to cgs units */ UnitTime_in_Megayears, /*!< factor to convert internal time to megayears/h */ GravityConstantInternal, /*!< If set to zero in the parameterfile, the internal value of the gravitational constant is set to the Newtonian value based on the system of units specified. Otherwise the value provided is taken as internal gravity constant G. */ G; /*!< Gravity-constant in internal units */ #ifdef ANNIHILATION_RADIATION double UnitDensity_in_Gev_per_cm3; /*!< factor to convert internal density unit to GeV/c^2 / cm^3 */ #endif /* Cosmology */ double Hubble; /*!< Hubble-constant in internal units */ double Omega0, /*!< matter density in units of the critical density (at z=0) */ OmegaLambda, /*!< vaccum energy density relative to crictical density (at z=0) */ OmegaBaryon, /*!< baryon density in units of the critical density (at z=0) */ HubbleParam; /*!< little `h', i.e. Hubble constant in units of 100 km/s/Mpc. Only needed to get absolute * physical values for cooling physics */ double BoxSize; /*!< Boxsize in case periodic boundary conditions are used */ /* Code options */ int ComovingIntegrationOn; /*!< flags that comoving integration is enabled */ int PeriodicBoundariesOn; /*!< flags that periodic boundaries are enabled */ int ResubmitOn; /*!< flags that automatic resubmission of job to queue system is enabled */ int TypeOfOpeningCriterion; /*!< determines tree cell-opening criterion: 0 for Barnes-Hut, 1 for relative criterion */ int TypeOfTimestepCriterion; /*!< gives type of timestep criterion (only 0 supported right now - unlike gadget-1.1) */ int OutputListOn; /*!< flags that output times are listed in a specified file */ int CoolingOn; /*!< flags that cooling is enabled */ int StarformationOn; /*!< flags that star formation is enabled */ /* parameters determining output frequency */ int SnapshotFileCount; /*!< number of snapshot that is written next */ double TimeBetSnapshot, /*!< simulation time interval between snapshot files */ TimeOfFirstSnapshot, /*!< simulation time of first snapshot files */ CpuTimeBetRestartFile, /*!< cpu-time between regularly generated restart files */ TimeLastRestartFile, /*!< cpu-time when last restart-file was written */ TimeBetStatistics, /*!< simulation time interval between computations of energy statistics */ TimeLastStatistics; /*!< simulation time when the energy statistics was computed the last time */ int NumCurrentTiStep; /*!< counts the number of system steps taken up to this point */ /* Current time of the simulation, global step, and end of simulation */ double Time, /*!< current time of the simulation */ TimeBegin, /*!< time of initial conditions of the simulation */ TimeStep, /*!< difference between current times of previous and current timestep */ TimeMax; /*!< marks the point of time until the simulation is to be evolved */ /* variables for organizing discrete timeline */ double Timebase_interval; /*!< factor to convert from floating point time interval to integer timeline */ int Ti_Current; /*!< current time on integer timeline */ int Ti_nextoutput; /*!< next output time on integer timeline */ #ifdef PMGRID int PM_Ti_endstep, PM_Ti_begstep; double Asmth[2], Rcut[2]; double Corner[2][3], UpperCorner[2][3], Xmintot[2][3], Xmaxtot[2][3]; double TotalMeshSize[2]; #endif #ifdef CHEMISTRY double Epsilon; #endif int Ti_nextlineofsight; #ifdef OUTPUTLINEOFSIGHT double TimeFirstLineOfSight; #endif /* variables that keep track of cumulative CPU consumption */ double TimeLimitCPU; double CPU_Sum[CPU_PARTS]; /*!< sums wallclock time/CPU consumption in whole run */ /* tree code opening criterion */ double ErrTolTheta; /*!< BH tree opening angle */ double ErrTolForceAcc; /*!< parameter for relative opening criterion in tree walk */ /* adjusts accuracy of time-integration */ double ErrTolIntAccuracy; /*!< accuracy tolerance parameter \f$ \eta \f$ for timestep criterion. The timesteps is \f$ \Delta t = \sqrt{\frac{2 \eta eps}{a}} \f$ */ double MinSizeTimestep, /*!< minimum allowed timestep. Normally, the simulation terminates if the timestep determined by the timestep criteria falls below this limit. */ MaxSizeTimestep; /*!< maximum allowed timestep */ double MaxRMSDisplacementFac; /*!< this determines a global timestep criterion for cosmological simulations in comoving coordinates. To this end, the code computes the rms velocity of all particles, and limits the timestep such that the rms displacement is a fraction of the mean particle separation (determined from the particle mass and the cosmological parameters). This parameter specifies this fraction. */ double CourantFac; /*!< SPH-Courant factor */ /* frequency of tree reconstruction/domain decomposition */ double TreeDomainUpdateFrequency; /*!< controls frequency of domain decompositions */ /* gravitational and hydrodynamical softening lengths (given in terms of an `equivalent' Plummer softening * length) * * five groups of particles are supported 0=gas,1=halo,2=disk,3=bulge,4=stars */ double MinGasHsmlFractional, /*!< minimum allowed SPH smoothing length in units of SPH gravitational softening length */ MinGasHsml; /*!< minimum allowed SPH smoothing length */ double SofteningGas, /*!< for type 0 */ SofteningHalo, /*!< for type 1 */ SofteningDisk, /*!< for type 2 */ SofteningBulge, /*!< for type 3 */ SofteningStars, /*!< for type 4 */ SofteningBndry; /*!< for type 5 */ double SofteningGasMaxPhys, /*!< for type 0 */ SofteningHaloMaxPhys, /*!< for type 1 */ SofteningDiskMaxPhys, /*!< for type 2 */ SofteningBulgeMaxPhys, /*!< for type 3 */ SofteningStarsMaxPhys, /*!< for type 4 */ SofteningBndryMaxPhys; /*!< for type 5 */ double SofteningTable[6]; /*!< current (comoving) gravitational softening lengths for each particle type */ double ForceSoftening[6]; /*!< the same, but multiplied by a factor 2.8 - at that scale the force is Newtonian */ /*! If particle masses are all equal for one type, the corresponding entry in MassTable is set to this * value, * allowing the size of the snapshot files to be reduced */ double MassTable[6]; /* some filenames */ char InitCondFile[100], OutputDir[100], SnapshotFileBase[100], EnergyFile[100], CpuFile[100], InfoFile[100], TimingsFile[100], RestartFile[100], ResubmitCommand[100], OutputListFilename[100]; /*! table with desired output times */ double OutputListTimes[MAXLEN_OUTPUTLIST]; char OutputListFlag[MAXLEN_OUTPUTLIST]; int OutputListLength; /*!< number of times stored in table of desired output times */ #if defined(ADAPTIVE_GRAVSOFT_FORGAS) && !defined(ADAPTIVE_GRAVSOFT_FORGAS_HSML) double ReferenceGasMass; #endif #ifdef VORONOI_MESHRELAX double MeanMass; double MeanPressure; #endif #ifdef RADTRANSFER double IonizingLumPerSolarMass; double IonizingLumPerSFR; int Radiation_Ti_begstep; int Radiation_Ti_endstep; #endif #if defined(SIM_ADAPTIVE_SOFT) || defined(REINIT_AT_TURNAROUND) double CurrentTurnaroundRadius; double SIM_epsilon; #endif #ifdef ANNIHILATION_RADIATION /* present day velocity dispersion of DM particle in cm/s (e.g. Neutralino = 0.03 cm/s) */ double DM_velocity_dispersion; #endif #ifdef SFR /* star formation and feedback sector */ double CritOverDensity; double CritPhysDensity; double OverDensThresh; double PhysDensThresh; double EgySpecSN; double FactorSN; double EgySpecCold; double FactorEVP; double FeedbackEnergy; double TempSupernova; double TempClouds; double MaxSfrTimescale; double WindEfficiency; double WindEnergyFraction; double WindFreeTravelLength; double WindFreeTravelDensFac; double FactorForSofterEQS; #endif #ifdef CS_MODEL double FactorSFR; double DecouplingParam; double MinTlifeSNI; double MaxTlifeSNI; double TlifeSNII; int Raiteri_TlifeSNII; double RateSNI; double SN_Energy_cgs; double Tcrit_Phase; double DensFrac_Phase; double SN_Energy_frac_cold; double MaxHotHsmlParam; double InitialHotHsmlFactor; double DensityTailThreshold; double MaxNumHotNgbDeviation; /*!< Maximum allowed deviation HOT neighbour number */ #endif #ifdef DARKENERGY double DarkEnergyParam; /*!< fixed w for equation of state */ #ifdef TIMEDEPDE char DarkEnergyFile[100]; /*!< tabelized w for equation of state */ #ifdef TIMEDEPGRAV double Gini; #endif #endif #endif #ifdef RESCALEVINI double VelIniScale; /*!< Scale the initial velocities by this amount */ #endif #if defined(SNIA_HEATING) double SnIaHeatingRate; #endif #ifdef TIME_DEP_ART_VISC double ViscSource0; /*!< Given sourceterm in viscosity evolution */ double DecayLength; /*!< Number of h for the viscosity decay */ double ViscSource; /*!< Reduced sourceterm in viscosity evolution */ double DecayTime; /*!< Calculated decaytimescale */ double AlphaMin; /*!< Minimum of allowed viscosity parameter */ #endif #ifdef CONDUCTION double ConductionCoeff; /*!< Thermal Conductivity */ #ifdef CONDUCTION_SATURATION double ElectronFreePathFactor; /*!< Factor to get electron mean free path */ #endif int Conduction_Ti_begstep, Conduction_Ti_endstep; double MaxSizeConductionStep; #endif #ifdef MAGNETIC #ifdef BINISET double BiniX, BiniY, BiniZ; /*!< Initial values for B */ #endif #ifdef BSMOOTH int BSmoothInt; double BSmoothFrac; int MainTimestepCounts; #ifdef SETMAINTIMESTEPCOUNT int MainTimestepCountIni; #endif #endif #ifdef MAGNETIC_DISSIPATION double ArtMagDispConst; /*!< Sets the parameter \f$\alpha\f$ of the artificial magnetic disipation */ #ifdef TIME_DEP_MAGN_DISP double ArtMagDispMin; double ArtMagDispSource; double ArtMagDispTime; #endif #endif #ifdef DIVBCLEANING_DEDNER double DivBcleanParabolicSigma; double DivBcleanHyperbolicSigma; #endif #ifdef HEALPIX //change this to read in the Parameterfile int Nside; #define NSIDE2NPIX(nside) (12*nside*nside) float *healpixmap; #endif #ifdef MAGNETIC_DIFFUSION double MagneticEta; #endif #endif #ifdef BLACK_HOLES double TimeNextBlackHoleCheck; double TimeBetBlackHoleSearch; double BlackHoleAccretionFactor; /*!< Fraction of BH bondi accretion rate */ double BlackHoleFeedbackFactor; /*!< Fraction of the black luminosity feed into thermal feedback */ double SeedBlackHoleMass; /*!< Seed black hole mass */ double MinFoFMassForNewSeed; /*!< Halo mass required before new seed is put in */ double BlackHoleNgbFactor; /*!< Factor by which the normal SPH neighbour should be increased/decreased */ double BlackHoleMaxAccretionRadius; double BlackHoleEddingtonFactor; /*! Factor above Eddington */ #ifdef FOF double massDMpart; #endif #ifdef MODIFIEDBONDI double BlackHoleRefDensity; double BlackHoleRefSoundspeed; #endif #endif #ifdef COSMIC_RAYS double CR_Alpha[NUMCRPOP]; /*!< Cosmic ray spectral index [2..3] */ double CR_SNEff; /*!< SN injection efficiency [0..1] */ double CR_SNAlpha; /*!< SN injection spectral index [2..3] */ int bDebugFlag; /*!< enables debug outputs after triggered */ #if defined(CR_DIFFUSION) double CR_DiffusionCoeff; /*!< (temporary) fixed value for CR diffusivity */ double CR_DiffusionDensScaling; /*!< grade of density dependence of diffusivity */ double CR_DiffusionDensZero; /*!< Reference point density for diffusivity */ double CR_DiffusionEntropyScaling; /*!< grade of specific energy dependence of diffusivity */ double CR_DiffusionEntropyZero; /*!< Reference Entropic function for diffusivity */ double CR_DiffusionMaxSizeTimestep; int CR_Diffusion_Ti_begstep, CR_Diffusion_Ti_endstep; #endif /* CR_DIFFUSION */ #if defined(CR_SHOCK) #if (CR_SHOCK == 1) double CR_ShockAlpha; /*!< spectral index to be used in shock injection */ #else double CR_ShockCutoff; /*!< Cutoff factor x_inj for CR accel */ #endif double CR_ShockEfficiency; /*!< energy fraction of shock energy fed into CR */ #endif /* CR_SHOCK */ #ifdef FIX_QINJ double Shock_Fix_Qinj; /*!< inject only CRps with threshold cutoff Shock_Fix_Qinj */ #endif #ifdef CR_BUBBLES double CR_AGNEff; /*!< AGN injection efficiency [0..1] */ #endif #endif /* COSMIC_RAYS */ #ifdef MACHNUM double Shock_Length; /*!< length scale on which the shock is smoothed out */ double Shock_DeltaDecayTimeMax; /*!< maximum time interval (Dloga) for which the Mach number is kept at its maximum */ #endif #ifdef REIONIZATION int not_yet_reionized; /*!< flag that makes sure that there is only one reionization */ #endif #ifdef BUBBLES double BubbleDistance; double BubbleRadius; double BubbleTimeInterval; double BubbleEnergy; double TimeOfNextBubble; double FirstBubbleRedshift; #ifdef FOF int BiggestGroupLen; float BiggestGroupCM[3]; double BiggestGroupMass; #endif #endif #ifdef BH_BUBBLES double BubbleDistance; double BubbleRadius; double BubbleEnergy; double BlackHoleRadioTriggeringFactor; double DefaultICMDensity; double RadioFeedbackFactor; #ifdef UNIFIED_FEEDBACK double RadioThreshold; #endif #endif #if defined(MULTI_BUBBLES) && defined(FOF) #ifndef BLACK_HOLES double MinFoFMassForNewSeed; /*!< Halo mass required before new seed is put in */ double massDMpart; #endif double BubbleDistance; double BubbleRadius; double BubbleTimeInterval; double BubbleEnergy; double TimeOfNextBubble; double ClusterMass200; double FirstBubbleRedshift; #endif #ifdef NAVIERSTOKES double NavierStokes_ShearViscosity; double FractionSpitzerViscosity; double ShearViscosityTemperature; #endif #ifdef NAVIERSTOKES_BULK double NavierStokes_BulkViscosity; #endif #ifdef VISCOSITY_SATURATION double IonMeanFreePath; #endif #ifdef EOS_DEGENERATE char EosTable[100]; #endif } All; /*! This structure holds all the information that is * stored for each particle of the simulation. */ extern struct particle_data { MyDouble Pos[3]; /*!< particle position at its current time */ MyDouble Vel[3]; /*!< particle velocity at its current time */ MyDouble Mass; /*!< particle mass */ MyIDType ID; union { MyFloat GravAccel[3]; /*!< particle acceleration due to gravity */ MyLongDouble dGravAccel[3]; } g; #ifdef PMGRID MyFloat GravPM[3]; /*!< particle acceleration due to long-range PM gravity force */ #endif #ifdef FORCETEST MyFloat GravAccelDirect[3]; /*!< particle acceleration calculated by direct summation */ #endif #if defined(EVALPOTENTIAL) || defined(COMPUTE_POTENTIAL_ENERGY) || defined(OUTPUTPOTENTIAL) union { MyFloat Potential; /*!< gravitational potential */ MyLongDouble dPotential; } p; #endif #ifdef DISTORTIONTENSORPS MyLongDouble distortion_tensorps[6][6]; /*!< Phase Space Distortion tensor */ MyLongDouble tidal_tensorps[3][3]; /*!< tidal tensor (=second derivatives of grav. potential) */ MyLongDouble V_matrix[3][3]; /*!< initial orientation of CDM sheet the particle is embedded in */ MyDouble init_density; /*!< initial stream density */ MyFloat caustic_counter; /*!< caustic counter */ MyDouble last_stream_determinant; /*!< last stream density determinant, needed to identify caustics */ #ifdef REINIT_AT_TURNAROUND int turnaround_flag; /*!< mark when a particle turned around */ #endif #ifdef ANNIHILATION_RADIATION MyDouble annihilation; /*!< integrated annihilation rate */ MyDouble rho_normed_cutoff_current; /*!< current and last normed_cutoff density in rho_max/rho_init * sqrt(sigma) */ MyDouble rho_normed_cutoff_last; MyDouble stream_density; /*!< physical stream density that is going to be integrated (in terms of rho_crit) */ MyFloat analytic_caustics; /*!< number of caustics that were integrated analyticall, i.e. where the physical caustic density was higher than the numerical GDE density */ #endif #ifdef OUTPUT_LAST_CAUSTIC MyDouble lc_Time; /*!< time of caustic passage */ MyDouble lc_Pos[3]; /*!< position of caustic */ MyDouble lc_Vel[3]; /*!< particle velocity when passing through caustic */ MyDouble lc_rho_normed_cutoff; /*!< normed_cutoff density at caustic */ MyDouble lc_Dir_x[3]; /*!< principal axis frame of smear out */ MyDouble lc_Dir_y[3]; MyDouble lc_Dir_z[3]; MyDouble lc_smear_x; /*!< smear out length */ MyDouble lc_smear_y; MyDouble lc_smear_z; #endif #ifdef PMGRID MyLongDouble tidal_tensorpsPM[3][3]; /*!< for TreePM simulations, long range tidal field */ #endif #endif MyFloat OldAcc; /*!< magnitude of old gravitational force. Used in relative opening criterion */ #if defined(EVALPOTENTIAL) && defined(PMGRID) MyFloat PM_Potential; #endif #ifdef STELLARAGE MyFloat StellarAge; /*!< formation time of star particle */ #endif #ifdef METALS MyFloat Metallicity; /*!< metallicity of gas or star particle */ #endif /* closes METALS */ #if defined (BLACK_HOLES) || defined(CS_MODEL) || defined(RADTRANSFER) || defined(SNIA_HEATING) MyFloat Hsml; union { MyFloat NumNgb; MyLongDouble dNumNgb; } n; #if defined(RADTRANSFER) || defined(SNIA_HEATING) MyFloat DensAroundStar; #endif #endif #ifdef BLACK_HOLES int SwallowID; #ifdef BH_COUNTPROGS int BH_CountProgs; #endif MyFloat BH_Mass; MyFloat BH_Mdot; #ifdef BH_BUBBLES MyFloat BH_Mass_bubbles; MyFloat BH_Mass_ini; #ifdef UNIFIED_FEEDBACK MyFloat BH_Mass_radio; #endif #endif union { MyFloat BH_Density; MyLongDouble dBH_Density; } b1; union { MyFloat BH_Entropy; MyLongDouble dBH_Entropy; } b2; union { MyFloat BH_SurroundingGasVel[3]; MyLongDouble dBH_SurroundingGasVel[3]; } b3; union { MyFloat BH_accreted_Mass; MyLongDouble dBH_accreted_Mass; } b4; union { MyFloat BH_accreted_BHMass; MyLongDouble dBH_accreted_BHMass; } b5; union { MyFloat BH_accreted_momentum[3]; MyLongDouble dBH_accreted_momentum[3]; } b6; #ifdef BH_BUBBLES union { MyFloat BH_accreted_BHMass_bubbles; MyLongDouble dBH_accreted_BHMass_bubbles; } b7; #ifdef UNIFIED_FEEDBACK union { MyFloat BH_accreted_BHMass_radio; MyLongDouble dBH_accreted_BHMass_radio; } b8; #endif #endif #ifdef REPOSITION_ON_POTMIN MyFloat BH_MinPotPos[3]; MyFloat BH_MinPot; #endif #ifdef BH_KINETICFEEDBACK MyFloat ActiveTime; MyFloat ActiveEnergy; #endif #endif #ifdef SUBFIND unsigned int GrNr; int SubNr; int DM_NumNgb; int targettask, origintask, submark, origindex; MyFloat DM_Hsml; union { MyFloat DM_Density; MyFloat DM_Potential; } u; union { MyFloat DM_VelDisp; MyFloat DM_BindingEnergy; } v; #ifdef DENSITY_SPLIT_BY_TYPE union { MyFloat int_energy; MyFloat density_sum; } w; #endif #ifdef SUBFIND_RESHUFFLE_CATALOGUE_WITH_VORONOI MyFloat DM_Hsml_V; MyFloat DM_Density_V; #endif #ifdef SAVE_HSML_IN_IC_ORDER MyIDType ID_ic_order; #endif #ifdef SUBFIND_ALTERNATIVE_COLLECTIVE peanokey Key; #endif #endif #if defined(ORDER_SNAPSHOTS_BY_ID) && !defined(SUBFIND) int GrNr; int SubNr; #endif #ifdef SHELL_CODE MyDouble radius; MyDouble enclosed_mass; MyDouble dMdr; #endif #ifdef CS_MODEL MyFloat Zm[12]; MyFloat ZmReservoir[12]; #ifdef CS_FEEDBACK MyFloat EnergySN; MyFloat EnergySNCold; #endif #endif float GravCost; /*!< weight factor used for balancing the work-load */ int Ti_begstep; /*!< marks start of current timestep of particle on integer timeline */ int Ti_current; /*!< current time of the particle */ short int Type; /*!< flags particle type. 0=gas, 1=halo, 2=disk, 3=bulge, 4=stars, 5=bndry */ short int TimeBin; #ifdef WAKEUP int dt_step; #endif } *P, /*!< holds particle data on local processor */ *DomainPartBuf; /*!< buffer for particle data used in domain decomposition */ /* the following struture holds data that is stored for each SPH particle in addition to the collisionless * variables. */ extern struct sph_particle_data { MyDouble Entropy; /*!< current value of entropy (actually entropic function) of particle */ MyFloat Pressure; /*!< current pressure */ MyFloat VelPred[3]; /*!< predicted SPH particle velocity at the current time */ #ifdef ALTERNATIVE_VISCOUS_TIMESTEP MyFloat MinViscousDt; #else MyFloat MaxSignalVel; /*!< maximum signal velocity */ #endif #ifdef VOLUME_CORRECTION MyFloat DensityOld; MyFloat DensityStd; #endif #ifdef VORONOI MyFloat MaxDelaunayRadius; MyFloat Volume; MyFloat Center[3]; #endif union { MyFloat Density; /*!< current baryonic mass density of particle */ MyLongDouble dDensity; } d; union { MyFloat DtEntropy; /*!< rate of change of entropy */ MyLongDouble dDtEntropy; } e; union { MyFloat HydroAccel[3]; /*!< acceleration due to hydrodynamical force */ MyLongDouble dHydroAccel[3]; } a; union { MyFloat DhsmlDensityFactor; /*!< correction factor needed in entropy formulation of SPH */ MyLongDouble dDhsmlDensityFactor; } h; union { MyFloat DivVel; /*!< local velocity divergence */ MyLongDouble dDivVel; } v; #ifndef NAVIERSTOKES union { MyFloat CurlVel; /*!< local velocity curl */ MyFloat Rot[3]; /*!< local velocity curl */ MyLongDouble dRot[3]; } r; #else union { MyFloat DV[3][3]; struct { MyFloat DivVel; MyFloat CurlVel; MyFloat StressDiag[3]; MyFloat StressOffDiag[3]; #ifdef NAVIERSTOKES_BULK MyFloat StressBulk; #endif } s; } u; #endif #if !(defined(BLACK_HOLES) || defined(CS_MODEL) || defined(RADTRANSFER) || defined(SNIA_HEATING)) MyFloat Hsml; /*!< current smoothing length */ union { MyFloat NumNgb; MyLongDouble dNumNgb; } n; #endif #if defined(BH_THERMALFEEDBACK) || defined(BH_KINETICFEEDBACK) union { MyFloat Injected_BH_Energy; MyLongDouble dInjected_BH_Energy; } i; #endif #ifdef COOLING MyFloat Ne; /*!< electron fraction, expressed as local electron number density normalized to the hydrogen number density. Gives indirectly ionization state and mean molecular weight. */ #endif #ifdef SFR MyFloat Sfr; #endif #ifdef WINDS MyFloat DelayTime; /*!< remaining maximum decoupling time of wind particle */ #endif #ifdef MAGNETIC MyFloat BPred[3]; #ifdef EULERPOTENTIALS MyFloat EulerA,EulerB; MyFloat dEulerA[3],dEulerB[3]; #else MyFloat B[3]; MyFloat DtB[3]; #endif #if defined(TRACEDIVB) || defined(TIME_DEP_MAGN_DISP) MyFloat divB; #endif #if defined(BSMOOTH) || defined(BFROMROTA) MyFloat BSmooth[3]; #endif #ifdef TIME_DEP_MAGN_DISP MyFloat Balpha, DtBalpha; #endif #ifdef DIVBCLEANING_DEDNER MyFloat Phi, PhiPred, DtPhi; #ifdef SMOOTH_PHI MyFloat SmoothPhi; MyFloat SmoothDivB; #endif #endif #if defined(MAGNETIC_DIFFUSION) || defined(ROT_IN_MAG_DIS) MyFloat RotB[3]; #ifdef SMOOTH_ROTB MyFloat SmoothedRotB[3]; #endif #endif #if defined(BSMOOTH) || defined(SMOOTH_ROTB) || defined(SMOOTH_PHI) MyFloat DensityNorm; #endif #endif #ifdef TIME_DEP_ART_VISC MyFloat alpha, Dtalpha; #endif #ifdef NS_TIMESTEP MyFloat ViscEntropyChange; #endif #ifdef CONDUCTION_SATURATION MyFloat GradEntr[3]; #endif #ifdef MHM MyFloat FeedbackEnergy; #endif #ifdef COSMIC_RAYS MyFloat CR_C0[NUMCRPOP]; /*!< Cosmic ray amplitude adiabatic invariable */ MyFloat CR_q0[NUMCRPOP]; /*!< Cosmic ray cutoff adiabatic invariable */ MyFloat CR_E0[NUMCRPOP]; /*!< Specific Energy at Rho0 */ MyFloat CR_n0[NUMCRPOP]; /*!< baryon fraction in cosmic rays */ MyFloat CR_DeltaE[NUMCRPOP]; /*!< Specific Energy growth during timestep */ MyFloat CR_DeltaN[NUMCRPOP]; /*!< baryon fraction growth during timestep */ #ifdef MACHNUM MyFloat CR_Gamma0[NUMCRPOP]; #endif #ifdef CR_OUTPUT_INJECTION MyFloat CR_Specific_SupernovaHeatingRate; #endif #endif /* COSMIC_RAYS */ #ifdef MACHNUM MyFloat Shock_MachNumber; /*!< Mach number */ MyFloat Shock_DecayTime; /*!< Shock decay time */ #ifdef COSMIC_RAYS MyFloat Shock_DensityJump; /*!< Density jump at the shock */ MyFloat Shock_EnergyJump; /*!< Energy jump at the shock */ MyFloat PreShock_PhysicalDensity; /*!< Specific energy in the preshock regime */ MyFloat PreShock_PhysicalEnergy; /*!< Density in the preshock regime */ MyFloat PreShock_XCR; /*!< XCR = PCR / Pth in the preshock regime */ #endif #ifdef MACHSTATISTIC MyFloat Shock_DtEnergy; /*!< Change of thermal specific energy at Shocks */ #endif #ifdef OUTPUT_PRESHOCK_CSND MyFloat PreShock_PhysicalSoundSpeed; /*!< Sound speed in the preshock regime */ MyFloat PreShock_PhysicalDensity; /*!< Specific energy in the preshock regime */ #endif #endif /* Mach number estimate */ #ifdef CHEMISTRY MyFloat elec; MyFloat HI; MyFloat HII; MyFloat HeI; MyFloat HeII; MyFloat HeIII; MyFloat H2I; MyFloat H2II; MyFloat HM; MyFloat Gamma; MyFloat t_elec, t_cool; #endif #ifdef RADTRANSFER MyFloat ET[6]; /* eddington tensor - symmetric -> only 6 elements needed */ MyFloat Je; /* emissivity */ MyFloat nHI; /* HI number density */ MyFloat nHII; /* HII number density */ MyFloat n_elec; /* electron number density */ MyFloat n_gamma; /* photon number density */ #ifdef RADTRANSFER_FLUXLIMITER MyFloat Grad_ngamma[3]; #endif #ifndef CG MyFloat n_gamma_old; #endif #endif #if defined CS_MODEL MyFloat DensityOld; #ifdef CS_FEEDBACK union { MyFloat DensityAvg; /*!< current baryonic mass density of particle */ MyLongDouble dDensityAvg; } da; union { MyFloat EntropyAvg; /*!< current baryonic mass density of particle */ MyLongDouble dEntropyAvg; } ea; MyFloat HotHsml; int HotNgbNum; MyFloat DensPromotion; MyFloat TempPromotion; #endif #endif #ifdef EOS_DEGENERATE MyFloat u; /* internal energy density */ MyFloat temp; /* temperature */ MyFloat xnuc[EOS_NSPECIES]; /* nuclear mass fractions */ #endif #ifdef WAKEUP short int wakeup; /*!< flag to wake up particle */ #endif } *SphP, /*!< holds SPH particle data on local processor */ *DomainSphBuf; /*!< buffer for SPH particle data in domain decomposition */ extern peanokey *DomainKeyBuf; /* global state of system */ extern struct state_of_system { double Mass, EnergyKin, EnergyPot, EnergyInt, EnergyTot, Momentum[4], AngMomentum[4], CenterOfMass[4], MassComp[6], EnergyKinComp[6], EnergyPotComp[6], EnergyIntComp[6], EnergyTotComp[6], MomentumComp[6][4], AngMomentumComp[6][4], CenterOfMassComp[6][4]; } SysState, SysStateAtStart, SysStateAtEnd; /* Various structures for communication during the gravity computation. */ extern struct data_index { int Task; int Index; int IndexGet; } *DataIndexTable; /*!< the particles to be exported are grouped by task-number. This table allows the results to be disentangled again and to be assigned to the correct particle */ extern struct data_nodelist { int NodeList[NODELISTLENGTH]; } *DataNodeList; extern struct gravdata_in { MyFloat Pos[3]; #ifdef UNEQUALSOFTENINGS int Type; #ifdef ADAPTIVE_GRAVSOFT_FORGAS MyFloat Soft; #endif #endif MyFloat OldAcc; int NodeList[NODELISTLENGTH]; } *GravDataIn, /*!< holds particle data to be exported to other processors */ *GravDataGet; /*!< holds particle data imported from other processors */ extern struct gravdata_out { MyLongDouble Acc[3]; #ifdef EVALPOTENTIAL MyLongDouble Potential; #endif #ifdef DISTORTIONTENSORPS MyLongDouble tidal_tensorps[3][3]; #endif int Ninteractions; } *GravDataResult, /*!< holds the partial results computed for imported particles. Note: We use GravDataResult = GravDataGet, such that the result replaces the imported data */ *GravDataOut; /*!< holds partial results received from other processors. This will overwrite the GravDataIn array */ extern struct potdata_out { MyLongDouble Potential; } *PotDataResult, /*!< holds the partial results computed for imported particles. Note: We use GravDataResult = GravDataGet, such that the result replaces the imported data */ *PotDataOut; /*!< holds partial results received from other processors. This will overwrite the GravDataIn array */ /*! Header for the standard file format. */ extern struct io_header { int npart[6]; /*!< number of particles of each type in this file */ double mass[6]; /*!< mass of particles of each type. If 0, then the masses are explicitly stored in the mass-block of the snapshot file, otherwise they are omitted */ #ifdef COSMIC_RAYS double SpectralIndex_CR_Pop[NUMCRPOP]; /*!< spectral indices of cosmic ray populations */ #endif double time; /*!< time of snapshot file */ double redshift; /*!< redshift of snapshot file */ int flag_sfr; /*!< flags whether the simulation was including star formation */ int flag_feedback; /*!< flags whether feedback was included (obsolete) */ unsigned int npartTotal[6]; /*!< total number of particles of each type in this snapshot. This can be different from npart if one is dealing with a multi-file snapshot. */ int flag_cooling; /*!< flags whether cooling was included */ int num_files; /*!< number of files in multi-file snapshot */ double BoxSize; /*!< box-size of simulation in case periodic boundaries were used */ double Omega0; /*!< matter density in units of critical density */ double OmegaLambda; /*!< cosmological constant parameter */ double HubbleParam; /*!< Hubble parameter in units of 100 km/sec/Mpc */ int flag_stellarage; /*!< flags whether the file contains formation times of star particles */ int flag_metals; /*!< flags whether the file contains metallicity values for gas and star particles */ unsigned int npartTotalHighWord[6]; /*!< High word of the total number of particles of each type */ int flag_entropy_instead_u; /*!< flags that IC-file contains entropy instead of u */ int flag_doubleprecision; /*!< flags that snapshot contains double-precision instead of single precision */ int flag_ic_info; /*!< flag to inform whether IC files are generated with ordinary Zeldovich approximation, or whether they ocontains 2nd order lagrangian perturbation theory initial conditions. For snapshots files, the value informs whether the simulation was evolved from Zeldoch or 2lpt ICs. Encoding is as follows: FLAG_ZELDOVICH_ICS (1) - IC file based on Zeldovich FLAG_SECOND_ORDER_ICS (2) - Special IC-file containing 2lpt masses FLAG_EVOLVED_ZELDOVICH (3) - snapshot evolved from Zeldovich ICs FLAG_EVOLVED_2LPT (4) - snapshot evolved from 2lpt ICs FLAG_NORMALICS_2LPT (5) - standard gadget file format with 2lpt ICs All other values, including 0 are interpreted as "don't know" for backwards compatability. */ float lpt_scalingfactor; /*!< scaling factor for 2lpt initial conditions */ #ifdef COSMIC_RAYS char fill[48-8*NUMCRPOP]; /*!< fills to 256 Bytes */ #else char fill[48]; /*!< fills to 256 Bytes */ #endif } header; /*!< holds header for snapshot files */ enum iofields { IO_POS, IO_VEL, IO_ID, IO_MASS, IO_SECONDORDERMASS, IO_U, IO_RHO, IO_NE, IO_NH, IO_HSML, IO_SFR, IO_AGE, IO_Z, IO_BHMASS, IO_BHMDOT, IO_BHPROGS, IO_BHMBUB, IO_BHMINI, IO_BHMRAD, IO_POT, IO_ACCEL, IO_CR_C0, IO_CR_Q0, IO_CR_P0, IO_CR_E0, IO_CR_n0, IO_CR_ThermalizationTime, IO_CR_DissipationTime, IO_ELECT, IO_HI, IO_HII, IO_HeI, IO_HeII, IO_HeIII, IO_H2I, IO_H2II, IO_HM, IO_DTENTR, IO_STRESSDIAG, IO_STRESSOFFDIAG, IO_STRESSBULK, IO_SHEARCOEFF, IO_TSTP, IO_BFLD, IO_BSMTH, IO_DBDT, IO_DIVB, IO_ABVC, IO_AMDC, IO_PHI, IO_ROTB, IO_SROTB, IO_COOLRATE, IO_CONDRATE, IO_DENN, IO_EGYPROM, IO_EGYCOLD, IO_MACH, IO_DTENERGY, IO_PRESHOCK_CSND, IO_PRESHOCK_DENSITY, IO_PRESHOCK_ENERGY, IO_PRESHOCK_XCR, IO_DENSITY_JUMP, IO_ENERGY_JUMP, IO_CRINJECT, IO_TIDALTENSORPS, IO_DISTORTIONTENSORPS, IO_EULERA, IO_EULERB, IO_FLOW_DETERMINANT, IO_PHASE_SPACE_DETERMINANT, IO_ANNIHILATION_RADIATION, IO_STREAM_DENSITY, IO_EOSTEMP, IO_EOSXNUC, IO_PRESSURE, IO_nHII, IO_RADGAMMA, IO_LAST_CAUSTIC, IO_SHEET_ORIENTATION, IO_INIT_DENSITY, IO_CAUSTIC_COUNTER, IO_SHELL_INFO, IO_DMHSML, /* for 'SUBFIND_RESHUFFLE_CATALOGUE' option */ IO_DMDENSITY, IO_DMVELDISP, IO_DMHSML_V, /* for 'SUBFIND_RESHUFFLE_CATALOGUE_WITH_VORONOI' option */ IO_DMDENSITY_V, IO_LASTENTRY /* This should be kept - it signals the end of the list */ }; /* * Variables for Tree * ------------------ */ extern struct NODE { MyFloat len; /*!< sidelength of treenode */ MyFloat center[3]; /*!< geometrical center of node */ #ifdef RADTRANSFER MyFloat stellar_mass; /*!< mass in stars in the node*/ MyFloat stellar_s[3]; /*!< enter of mass for the stars in the node*/ #endif #ifdef ADAPTIVE_GRAVSOFT_FORGAS MyFloat maxsoft; /*!< hold the maximum gravitational softening of particles in the node if the ADAPTIVE_GRAVSOFT_FORGAS option is selected */ #endif union { int suns[8]; /*!< temporary pointers to daughter nodes */ struct { MyFloat s[3]; /*!< center of mass of node */ MyFloat mass; /*!< mass of node */ unsigned int bitflags; /*!< flags certain node properties */ int sibling; /*!< this gives the next node in the walk in case the current node can be used */ int nextnode; /*!< this gives the next node in case the current node needs to be opened */ int father; /*!< this gives the parent node of each node (or -1 if we have the root node) */ } d; } u; #ifdef SCALARFIELD MyFloat s_dm[3]; MyFloat mass_dm; #endif int Ti_current; } *Nodes_base, /*!< points to the actual memory allocted for the nodes */ *Nodes; /*!< this is a pointer used to access the nodes which is shifted such that Nodes[All.MaxPart] gives the first allocated node */ extern struct extNODE { MyLongDouble dp[3]; #ifdef SCALARFIELD MyLongDouble dp_dm[3]; MyFloat vs_dm[3]; #endif #ifdef FLTROUNDOFFREDUCTION MyFloat s_base[3]; MyFloat len_base; #ifdef SCALARFIELD MyFloat s_dm_base[3]; #endif #endif MyFloat vs[3]; MyFloat vmax; MyFloat divVmax; MyFloat hmax; /*!< maximum SPH smoothing length in node. Only used for gas particles */ int Ti_lastkicked; int Flag; } *Extnodes, *Extnodes_base; extern int MaxNodes; /*!< maximum allowed number of internal nodes */ extern int Numnodestree; /*!< number of (internal) nodes in each tree */ extern int *Nextnode; /*!< gives next node in tree walk (nodes array) */ extern int *Father; /*!< gives parent node in tree (Prenodes array) */ #ifdef STATICNFW extern double Rs, R200; extern double Dc; extern double RhoCrit, V200; extern double fac; #endif #ifdef CHEMISTRY /* ----- chemistry part ------- */ #define H_number_fraction 0.76 #define He_number_fraction 0.06 /* ----- Tables ------- */ extern double T[N_T], J0_nu[N_nu], J_nu[N_nu], nu[N_nu]; extern double k1a[N_T], k2a[N_T], k3a[N_T], k4a[N_T], k5a[N_T], k6a[N_T], k7a[N_T], k8a[N_T], k9a[N_T], k10a[N_T], k11a[N_T]; extern double k12a[N_T], k13a[N_T], k14a[N_T], k15a[N_T], k16a[N_T], k17a[N_T], k18a[N_T], k19a[N_T], k20a[N_T], k21a[N_T]; extern double ciHIa[N_T], ciHeIa[N_T], ciHeIIa[N_T], ciHeISa[N_T], reHIIa[N_T], brema[N_T]; extern double ceHIa[N_T], ceHeIa[N_T], ceHeIIa[N_T], reHeII1a[N_T], reHeII2a[N_T], reHeIIIa[N_T]; /* cross-sections */ #ifdef RADIATION extern double sigma24[N_nu], sigma25[N_nu], sigma26[N_nu], sigma27[N_nu], sigma28[N_nu], sigma29[N_nu], sigma30[N_nu], sigma31[N_nu]; #endif #endif #endif
{ "alphanum_fraction": 0.7091813259, "avg_line_length": 29.5070496084, "ext": "h", "hexsha": "cf7f9ef4e7fdbf8635170ec114566a246a151b86", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5e82c2de9e6884795b4ee89f2b15ed5dde70388f", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "egpbos/egp", "max_forks_repo_path": "testing/icgen/random_verschillende_resoluties_N-GenIC/gadget3_64/allvars.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "5e82c2de9e6884795b4ee89f2b15ed5dde70388f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "egpbos/egp", "max_issues_repo_path": "testing/icgen/random_verschillende_resoluties_N-GenIC/gadget3_64/allvars.h", "max_line_length": 176, "max_stars_count": null, "max_stars_repo_head_hexsha": "5e82c2de9e6884795b4ee89f2b15ed5dde70388f", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "egpbos/egp", "max_stars_repo_path": "testing/icgen/random_verschillende_resoluties_N-GenIC/gadget3_64/allvars.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 15639, "size": 56506 }
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <Python.h> #include <stdio.h> #include <stdlib.h> #include <numpy/arrayobject.h> #include <lapacke.h> #include "phonoc_array.h" #include "phonon4_h/fc4.h" #include "phonon4_h/real_to_reciprocal.h" #include "phonon4_h/frequency_shift.h" static PyObject * py_get_fc4_normal_for_frequency_shift(PyObject *self, PyObject *args); static PyObject * py_get_fc4_frequency_shifts(PyObject *self, PyObject *args); static PyObject * py_real_to_reciprocal4(PyObject *self, PyObject *args); static PyObject * py_reciprocal_to_normal4(PyObject *self, PyObject *args); static PyObject * py_set_phonons_grid_points(PyObject *self, PyObject *args); static PyObject * py_distribute_fc4(PyObject *self, PyObject *args); static PyObject * py_rotate_delta_fc3s_elem(PyObject *self, PyObject *args); static PyObject * py_set_translational_invariance_fc4(PyObject *self, PyObject *args); static PyObject * py_set_permutation_symmetry_fc4(PyObject *self, PyObject *args); static PyObject * py_get_drift_fc4(PyObject *self, PyObject *args); static PyMethodDef functions[] = { {"fc4_normal_for_frequency_shift", py_get_fc4_normal_for_frequency_shift, METH_VARARGS, "Calculate fc4 normal for frequency shift"}, {"fc4_frequency_shifts", py_get_fc4_frequency_shifts, METH_VARARGS, "Calculate fc4 frequency shift"}, {"real_to_reciprocal4", py_real_to_reciprocal4, METH_VARARGS, "Transform fc4 of real space to reciprocal space"}, {"reciprocal_to_normal4", py_reciprocal_to_normal4, METH_VARARGS, "Transform fc4 of reciprocal space to normal coordinate in special case for frequency shift"}, {"phonons_grid_points", py_set_phonons_grid_points, METH_VARARGS, "Set phonons on grid points"}, {"distribute_fc4", py_distribute_fc4, METH_VARARGS, "Distribute least fc4 to full fc4"}, {"rotate_delta_fc3s_elem", py_rotate_delta_fc3s_elem, METH_VARARGS, "Rotate delta fc3s for a set of atomic indices"}, {"translational_invariance_fc4", py_set_translational_invariance_fc4, METH_VARARGS, "Set translational invariance for fc4"}, {"permutation_symmetry_fc4", py_set_permutation_symmetry_fc4, METH_VARARGS, "Set permutation symmetry for fc4"}, {"drift_fc4", py_get_drift_fc4, METH_VARARGS, "Get drifts of fc4"}, {NULL, NULL, 0, NULL} }; PyMODINIT_FUNC init_phono4py(void) { Py_InitModule3("_phono4py", functions, "C-extension for phono4py\n\n...\n"); return; } static PyObject * py_get_fc4_normal_for_frequency_shift(PyObject *self, PyObject *args) { PyArrayObject* fc4_normal_py; PyArrayObject* frequencies_py; PyArrayObject* eigenvectors_py; PyArrayObject* grid_points1_py; PyArrayObject* grid_address_py; PyArrayObject* mesh_py; PyArrayObject* fc4_py; PyArrayObject* shortest_vectors_py; PyArrayObject* multiplicity_py; PyArrayObject* masses_py; PyArrayObject* p2s_map_py; PyArrayObject* s2p_map_py; PyArrayObject* band_indicies_py; double cutoff_frequency; int grid_point0; if (!PyArg_ParseTuple(args, "OOOiOOOOOOOOOOd", &fc4_normal_py, &frequencies_py, &eigenvectors_py, &grid_point0, &grid_points1_py, &grid_address_py, &mesh_py, &fc4_py, &shortest_vectors_py, &multiplicity_py, &masses_py, &p2s_map_py, &s2p_map_py, &band_indicies_py, &cutoff_frequency)) { return NULL; } double* fc4_normal = (double*)fc4_normal_py->data; double* freqs = (double*)frequencies_py->data; /* npy_cdouble and lapack_complex_double may not be compatible. */ /* So eigenvectors should not be used in Python side */ lapack_complex_double* eigvecs = (lapack_complex_double*)eigenvectors_py->data; Iarray* grid_points1 = convert_to_iarray(grid_points1_py); const int* grid_address = (int*)grid_address_py->data; const int* mesh = (int*)mesh_py->data; double* fc4 = (double*)fc4_py->data; Darray* svecs = convert_to_darray(shortest_vectors_py); Iarray* multi = convert_to_iarray(multiplicity_py); const double* masses = (double*)masses_py->data; const int* p2s = (int*)p2s_map_py->data; const int* s2p = (int*)s2p_map_py->data; Iarray* band_indicies = convert_to_iarray(band_indicies_py); get_fc4_normal_for_frequency_shift(fc4_normal, freqs, eigvecs, grid_point0, grid_points1, grid_address, mesh, fc4, svecs, multi, masses, p2s, s2p, band_indicies, cutoff_frequency); free(grid_points1); free(svecs); free(multi); free(band_indicies); Py_RETURN_NONE; } static PyObject * py_get_fc4_frequency_shifts(PyObject *self, PyObject *args) { PyArrayObject* frequency_shifts_py; PyArrayObject* fc4_normal_py; PyArrayObject* frequencies_py; PyArrayObject* grid_points1_py; PyArrayObject* temperatures_py; PyArrayObject* band_indicies_py; double unit_conversion_factor; if (!PyArg_ParseTuple(args, "OOOOOOd", &frequency_shifts_py, &fc4_normal_py, &frequencies_py, &grid_points1_py, &temperatures_py, &band_indicies_py, &unit_conversion_factor)) { return NULL; } double* freq_shifts = (double*)frequency_shifts_py->data; double* fc4_normal = (double*)fc4_normal_py->data; double* freqs = (double*)frequencies_py->data; Iarray* grid_points1 = convert_to_iarray(grid_points1_py); Darray* temperatures = convert_to_darray(temperatures_py); int* band_indicies = (int*)band_indicies_py->data; const int num_band0 = (int)band_indicies_py->dimensions[0]; const int num_band = (int)frequencies_py->dimensions[1]; get_fc4_frequency_shifts(freq_shifts, fc4_normal, freqs, grid_points1, temperatures, band_indicies, num_band0, num_band, unit_conversion_factor); free(grid_points1); free(temperatures); Py_RETURN_NONE; } static PyObject * py_real_to_reciprocal4(PyObject *self, PyObject *args) { PyArrayObject* fc4_py; PyArrayObject* fc4_reciprocal_py; PyArrayObject* q_py; PyArrayObject* shortest_vectors; PyArrayObject* multiplicity; PyArrayObject* p2s_map; PyArrayObject* s2p_map; if (!PyArg_ParseTuple(args, "OOOOOOO", &fc4_reciprocal_py, &fc4_py, &q_py, &shortest_vectors, &multiplicity, &p2s_map, &s2p_map)) { return NULL; } double* fc4 = (double*)fc4_py->data; lapack_complex_double* fc4_reciprocal = (lapack_complex_double*)fc4_reciprocal_py->data; Darray* svecs = convert_to_darray(shortest_vectors); Iarray* multi = convert_to_iarray(multiplicity); const int* p2s = (int*)p2s_map->data; const int* s2p = (int*)s2p_map->data; const double* q = (double*)q_py->data; real_to_reciprocal4(fc4_reciprocal, q, fc4, svecs, multi, p2s, s2p); free(svecs); free(multi); Py_RETURN_NONE; } static PyObject * py_reciprocal_to_normal4(PyObject *self, PyObject *args) { PyArrayObject* fc4_normal_py; PyArrayObject* fc4_reciprocal_py; PyArrayObject* frequencies_py; PyArrayObject* eigenvectors_py; PyArrayObject* grid_points_py; PyArrayObject* masses_py; PyArrayObject* band_indicies_py; double cutoff_frequency; if (!PyArg_ParseTuple(args, "OOOOOOOd", &fc4_normal_py, &fc4_reciprocal_py, &frequencies_py, &eigenvectors_py, &grid_points_py, &masses_py, &band_indicies_py, &cutoff_frequency)) { return NULL; } lapack_complex_double* fc4_normal = (lapack_complex_double*)fc4_normal_py->data; const lapack_complex_double* fc4_reciprocal = (lapack_complex_double*)fc4_reciprocal_py->data; const lapack_complex_double* eigenvectors = (lapack_complex_double*)eigenvectors_py->data; const double* frequencies = (double*)frequencies_py->data; const int* grid_points = (int*)grid_points_py->data; const double* masses = (double*)masses_py->data; const int* band_indices = (int*)band_indicies_py->data; const int num_band0 = (int)band_indicies_py->dimensions[0]; const int num_band = (int)frequencies_py->dimensions[1]; reciprocal_to_normal4(fc4_normal, fc4_reciprocal, frequencies + grid_points[0] * num_band, frequencies + grid_points[1] * num_band, eigenvectors + grid_points[0] * num_band * num_band, eigenvectors + grid_points[1] * num_band * num_band, masses, band_indices, num_band0, num_band, cutoff_frequency); Py_RETURN_NONE; } static PyObject * py_set_phonons_grid_points(PyObject *self, PyObject *args) { PyArrayObject* frequencies; PyArrayObject* eigenvectors; PyArrayObject* phonon_done_py; PyArrayObject* grid_points_py; PyArrayObject* grid_address_py; PyArrayObject* mesh_py; PyArrayObject* shortest_vectors_fc2; PyArrayObject* multiplicity_fc2; PyArrayObject* fc2_py; PyArrayObject* atomic_masses_fc2; PyArrayObject* p2s_map_fc2; PyArrayObject* s2p_map_fc2; PyArrayObject* reciprocal_lattice; PyArrayObject* born_effective_charge; PyArrayObject* q_direction; PyArrayObject* dielectric_constant; double nac_factor, unit_conversion_factor; char uplo; if (!PyArg_ParseTuple(args, "OOOOOOOOOOOOdOOOOdc", &frequencies, &eigenvectors, &phonon_done_py, &grid_points_py, &grid_address_py, &mesh_py, &fc2_py, &shortest_vectors_fc2, &multiplicity_fc2, &atomic_masses_fc2, &p2s_map_fc2, &s2p_map_fc2, &unit_conversion_factor, &born_effective_charge, &dielectric_constant, &reciprocal_lattice, &q_direction, &nac_factor, &uplo)) { return NULL; } double* born; double* dielectric; double *q_dir; Darray* freqs = convert_to_darray(frequencies); /* npy_cdouble and lapack_complex_double may not be compatible. */ /* So eigenvectors should not be used in Python side */ Carray* eigvecs = convert_to_carray(eigenvectors); char* phonon_done = (char*)phonon_done_py->data; Iarray* grid_points = convert_to_iarray(grid_points_py); const int* grid_address = (int*)grid_address_py->data; const int* mesh = (int*)mesh_py->data; Darray* fc2 = convert_to_darray(fc2_py); Darray* svecs_fc2 = convert_to_darray(shortest_vectors_fc2); Iarray* multi_fc2 = convert_to_iarray(multiplicity_fc2); const double* masses_fc2 = (double*)atomic_masses_fc2->data; const int* p2s_fc2 = (int*)p2s_map_fc2->data; const int* s2p_fc2 = (int*)s2p_map_fc2->data; const double* rec_lat = (double*)reciprocal_lattice->data; if ((PyObject*)born_effective_charge == Py_None) { born = NULL; } else { born = (double*)born_effective_charge->data; } if ((PyObject*)dielectric_constant == Py_None) { dielectric = NULL; } else { dielectric = (double*)dielectric_constant->data; } if ((PyObject*)q_direction == Py_None) { q_dir = NULL; } else { q_dir = (double*)q_direction->data; } set_phonons_for_frequency_shift(freqs, eigvecs, phonon_done, grid_points, grid_address, mesh, fc2, svecs_fc2, multi_fc2, masses_fc2, p2s_fc2, s2p_fc2, unit_conversion_factor, born, dielectric, rec_lat, q_dir, nac_factor, uplo); free(freqs); free(eigvecs); free(grid_points); free(fc2); free(svecs_fc2); free(multi_fc2); Py_RETURN_NONE; } static PyObject * py_distribute_fc4(PyObject *self, PyObject *args) { PyArrayObject* fc4_copy_py; PyArrayObject* fc4_py; int fourth_atom; PyArrayObject* rotation_cart_inv; PyArrayObject* atom_mapping_py; if (!PyArg_ParseTuple(args, "OOiOO", &fc4_copy_py, &fc4_py, &fourth_atom, &atom_mapping_py, &rotation_cart_inv)) { return NULL; } double* fc4_copy = (double*)fc4_copy_py->data; const double* fc4 = (double*)fc4_py->data; const double* rot_cart_inv = (double*)rotation_cart_inv->data; const int* atom_mapping = (int*)atom_mapping_py->data; const int num_atom = (int)atom_mapping_py->dimensions[0]; return PyInt_FromLong((long) distribute_fc4(fc4_copy, fc4, fourth_atom, atom_mapping, num_atom, rot_cart_inv)); } static PyObject * py_rotate_delta_fc3s_elem(PyObject *self, PyObject *args) { PyArrayObject* rotated_delta_fc3s_py; PyArrayObject* delta_fc3s_py; PyArrayObject* atom_mappings_of_rotations_py; PyArrayObject* site_symmetries_cartesian_py; int atom1, atom2, atom3; if (!PyArg_ParseTuple(args, "OOOOiii", &rotated_delta_fc3s_py, &delta_fc3s_py, &atom_mappings_of_rotations_py, &site_symmetries_cartesian_py, &atom1, &atom2, &atom3)) { return NULL; } double* rotated_delta_fc3s = (double*)rotated_delta_fc3s_py->data; const double* delta_fc3s = (double*)delta_fc3s_py->data; const int* rot_map_syms = (int*)atom_mappings_of_rotations_py->data; const double* site_syms_cart = (double*)site_symmetries_cartesian_py->data; const int num_rot = (int)site_symmetries_cartesian_py->dimensions[0]; const int num_delta_fc3s = (int)delta_fc3s_py->dimensions[0]; const int num_atom = (int)delta_fc3s_py->dimensions[1]; return PyInt_FromLong((long) rotate_delta_fc3s_elem(rotated_delta_fc3s, delta_fc3s, rot_map_syms, site_syms_cart, num_rot, num_delta_fc3s, atom1, atom2, atom3, num_atom)); } static PyObject * py_set_translational_invariance_fc4(PyObject *self, PyObject *args) { PyArrayObject* fc4_py; int index; if (!PyArg_ParseTuple(args, "Oi", &fc4_py, &index)) { return NULL; } double* fc4 = (double*)fc4_py->data; const int num_atom = (int)fc4_py->dimensions[0]; set_translational_invariance_fc4_per_index(fc4, num_atom, index); Py_RETURN_NONE; } static PyObject * py_set_permutation_symmetry_fc4(PyObject *self, PyObject *args) { PyArrayObject* fc4_py; if (!PyArg_ParseTuple(args, "O", &fc4_py)) { return NULL; } double* fc4 = (double*)fc4_py->data; const int num_atom = (int)fc4_py->dimensions[0]; set_permutation_symmetry_fc4(fc4, num_atom); Py_RETURN_NONE; } static PyObject * py_get_drift_fc4(PyObject *self, PyObject *args) { PyArrayObject* fc4_py; if (!PyArg_ParseTuple(args, "O", &fc4_py)) { return NULL; } double* fc4 = (double*)fc4_py->data; const int num_atom = (int)fc4_py->dimensions[0]; int i; double drift[4]; PyObject* drift_py; get_drift_fc4(drift, fc4, num_atom); drift_py = PyList_New(4); for (i = 0; i < 4; i++) { PyList_SetItem(drift_py, i, PyFloat_FromDouble(drift[i])); } return drift_py; }
{ "alphanum_fraction": 0.7237818452, "avg_line_length": 30.0594795539, "ext": "c", "hexsha": "1a3f7eaeaa44bc9b4d844e47b0034ccc7fc0c9e9", "lang": "C", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2019-01-30T08:36:46.000Z", "max_forks_repo_forks_event_min_datetime": "2018-08-02T13:53:25.000Z", "max_forks_repo_head_hexsha": "faa1aea23a31faa3d642b99c51ebb8756e53c934", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "atztogo/forcefit", "max_forks_repo_path": "c/_phono4py.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "faa1aea23a31faa3d642b99c51ebb8756e53c934", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "atztogo/forcefit", "max_issues_repo_path": "c/_phono4py.c", "max_line_length": 162, "max_stars_count": 1, "max_stars_repo_head_hexsha": "faa1aea23a31faa3d642b99c51ebb8756e53c934", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "atztogo/forcefit", "max_stars_repo_path": "c/_phono4py.c", "max_stars_repo_stars_event_max_datetime": "2021-07-20T23:19:49.000Z", "max_stars_repo_stars_event_min_datetime": "2021-07-20T23:19:49.000Z", "num_tokens": 4654, "size": 16172 }
/** * * @file sgecon.c * * PLASMA computational routines * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @version 2.6.0 * @author Jakub Kurzak * @author Ichitaro Yamazaki * @author Mathieu Faverge * @date 2012-10-04 * @generated s Tue Jan 7 11:45:10 2014 * **/ #include <lapacke.h> #include "common.h" #include <math.h> #undef COMPLEX #define REAL /***************************************************************************//** * * @ingroup float * * PLASMA_sgecon - estimates the reciprocal of the condition number * of a general complex matrix A, in either the 1-norm or the infinity-norm, * using the LU factorization computed by PLASMA_sgetrf(). * * An estimate is obtained for norm(inv(A)), and the reciprocal of the condition * number is computed as * * \f[ rcond = \frac{1}{\|\|A\-\| \times \-\|A^{-1}\|\|} \f] * ******************************************************************************* * * @param[in] norm * Specifies whether the 1-norm condition number * or the infinity-norm condition number is required: * = PlasmaOneNorm: One norm * = PlasmaInfNorm: Infinity norm * * @param[in] N * The order of the matrix A. N >= 0. * * @param[in] A * The N-by-N matrix A. * * @param[in] LDA * The leading dimension of the array A. LDA >= max(1,N). * * @param[in] Anorm * If norm = PlasmaOneNorm, the 1-norm of the original matrix A. * If norm = PlasmaInfNorm, the infinity-norm of the original matrix A. * * \param[out] rcond * The reciprocal of the condition number of the matrix A, * computed as stated above. * ******************************************************************************* * * @return * \retval PLASMA_SUCCESS successful exit * \retval <0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa PLASMA_sgecon_Tile * @sa PLASMA_sgecon_Tile_Async * @sa PLASMA_cgecon * @sa PLASMA_dgecon * @sa PLASMA_sgecon * ******************************************************************************/ int PLASMA_sgecon(PLASMA_enum norm, int N, float *A, int LDA, float Anorm, float *rcond) { int NB; int status; plasma_context_t *plasma; PLASMA_sequence *sequence = NULL; PLASMA_request request = PLASMA_REQUEST_INITIALIZER; PLASMA_desc descA; plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA_sgecon", "PLASMA not initialized"); return PLASMA_ERR_NOT_INITIALIZED; } /* Check input arguments */ if (norm != PlasmaOneNorm && norm != PlasmaInfNorm) { plasma_error("PLASMA_sgecon", "illegal value of norm"); return -1; } if (N < 0) { plasma_error("PLASMA_sgecon", "illegal value of N"); return -2; } if (LDA < max(1,N)) { plasma_error("PLASMA_sgecon", "illegal value of LDA"); return -4; } if (Anorm < 0.) { plasma_error("PLASMA_sgecon", "illegal value of Anorm"); return -5; } /* Quick return */ *rcond = (float)0.; if (N == 0) { *rcond = (float)1.; return PLASMA_SUCCESS; } else if (Anorm == 0.) { return PLASMA_SUCCESS; } /* Tune NB depending on M, N & NRHS; Set NBNB */ status = plasma_tune(PLASMA_FUNC_SGESV, N, N, 0); if (status != PLASMA_SUCCESS) { plasma_error("PLASMA_sgecon", "plasma_tune() failed"); return status; } /* Set NT */ NB = PLASMA_NB; plasma_sequence_create(plasma, &sequence); if (PLASMA_TRANSLATION == PLASMA_OUTOFPLACE) { plasma_sooplap2tile( descA, A, NB, NB, LDA, N, 0, 0, N, N, sequence, &request, plasma_desc_mat_free(&(descA))); } else { plasma_siplap2tile( descA, A, NB, NB, LDA, N, 0, 0, N, N, sequence, &request); } /* Call the tile interface */ PLASMA_sgecon_Tile_Async(norm, &descA, Anorm, rcond, sequence, &request); if (PLASMA_TRANSLATION == PLASMA_OUTOFPLACE) { plasma_dynamic_sync(); plasma_desc_mat_free(&descA); } else { plasma_siptile2lap(descA, A, NB, NB, LDA, N, sequence, &request); plasma_dynamic_sync(); } status = sequence->status; plasma_sequence_destroy(plasma, sequence); return status; } /***************************************************************************//** * * @ingroup float_Tile * * PLASMA_sgecon_Tile - estimates the reciprocal of the condition number * of a general complex matrix A, in either the 1-norm or the infinity-norm. * Tile equivalent of PLASMA_sgecon(). * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * ******************************************************************************* * * @param[in] norm * Specifies whether the 1-norm condition number * or the infinity-norm condition number is required: * = PlasmaOneNorm: One norm * = PlasmaInfNorm: Infinity norm * * @param[in] A * The N-by-N matrix A. * * @param[in] Anorm * If norm = PlasmaOneNorm, the 1-norm of the original matrix A. * If norm = PlasmaInfNorm, the infinity-norm of the original matrix A. * * \param[out] rcond * The reciprocal of the condition number of the matrix A, * computed as stated above. * ******************************************************************************* * * @return * \retval PLASMA_SUCCESS successful exit * \retval <0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa PLASMA_sgecon * @sa PLASMA_sgecon_Tile_Async * @sa PLASMA_cgecon_Tile * @sa PLASMA_dgecon_Tile * @sa PLASMA_sgecon_Tile * ******************************************************************************/ int PLASMA_sgecon_Tile(PLASMA_enum norm, PLASMA_desc *A, float Anorm, float *rcond) { plasma_context_t *plasma; PLASMA_sequence *sequence = NULL; PLASMA_request request = PLASMA_REQUEST_INITIALIZER; int status; plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA_sgecon_Tile", "PLASMA not initialized"); return PLASMA_ERR_NOT_INITIALIZED; } plasma_sequence_create(plasma, &sequence); PLASMA_sgecon_Tile_Async(norm, A, Anorm, rcond, sequence, &request); plasma_dynamic_sync(); status = sequence->status; plasma_sequence_destroy(plasma, sequence); return status; } /***************************************************************************//** * * @ingroup float_Tile_Async * * PLASMA_sgecon_Tile_Async - estimates the reciprocal of the condition number * of a general complex matrix A, in either the 1-norm or the infinity-norm. * Non-blocking equivalent of PLASMA_sgecon_Tile(). * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * ******************************************************************************* * * @sa PLASMA_sgecon * @sa PLASMA_sgecon_Tile * @sa PLASMA_cgecon_Tile_Async * @sa PLASMA_dgecon_Tile_Async * @sa PLASMA_sgecon_Tile_Async * ******************************************************************************/ int PLASMA_sgecon_Tile_Async(PLASMA_enum norm, PLASMA_desc *A, float Anorm, float *rcond, PLASMA_sequence *sequence, PLASMA_request *request) { plasma_context_t *plasma; PLASMA_desc descA; PLASMA_desc descW; float *workN; float Ainvnorm; int kase, kase1; int isave[3], itrs = 0; int fallback = PLASMA_FALSE; plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA_sgecon_Tile_Async", "PLASMA not initialized"); return PLASMA_ERR_NOT_INITIALIZED; } if (sequence == NULL) { plasma_fatal_error("PLASMA_sgecon_Tile_Async", "NULL sequence"); return PLASMA_ERR_UNALLOCATED; } if (request == NULL) { plasma_fatal_error("PLASMA_sgecon_Tile_Async", "NULL request"); return PLASMA_ERR_UNALLOCATED; } /* Check sequence status */ if (sequence->status == PLASMA_SUCCESS) request->status = PLASMA_SUCCESS; else return plasma_request_fail(sequence, request, PLASMA_ERR_SEQUENCE_FLUSHED); /* Check descriptors for correctness */ if ( A->m != A->n ) { plasma_error("PLASMA_sgecon_Tile_Async", "invalid A descriptor (not square)"); return plasma_request_fail(sequence, request, PLASMA_ERR_ILLEGAL_VALUE); } if (plasma_desc_check(A) != PLASMA_SUCCESS) { plasma_error("PLASMA_sgecon_Tile_Async", "invalid first descriptor"); return plasma_request_fail(sequence, request, PLASMA_ERR_ILLEGAL_VALUE); } else { descA = *A; } /* Check input arguments */ if (descA.nb != descA.mb) { plasma_error("PLASMA_sgecon_Tile_Async", "only square tiles supported"); return plasma_request_fail(sequence, request, PLASMA_ERR_ILLEGAL_VALUE); } /* Quick return */ *rcond = (float)0.; if (descA.m == 0) { *rcond = (float)1.; return PLASMA_SUCCESS; } else if (Anorm == 0.) { return PLASMA_SUCCESS; } /* Estimate the norm of inv(A). */ Ainvnorm = (float)0.; if (norm == PlasmaOneNorm) kase1 = 1; else kase1 = 2; kase = 0; #if defined(REAL) int *isgn = (int*)plasma_shared_alloc(plasma, descA.m, PlasmaInteger); #endif workN = (float*)plasma_shared_alloc(plasma, descA.m, PlasmaRealFloat); plasma_sdesc_alloc( descW, descA.mb, descA.nb, descA.m, 1, 0, 0, descA.m, 1, plasma_desc_mat_free(&(descW))); do { itrs ++; #if defined(REAL) LAPACKE_slacn2_work( descA.m, workN, descW.mat, isgn, &Ainvnorm, &kase, isave); #else LAPACKE_slacn2_work( descA.m, workN, descW.mat, &Ainvnorm, &kase, isave); #endif #define FALLBACK #ifdef FALLBACK /* * Fall back to LAPACK */ if( isnan(Ainvnorm) || isinf(Ainvnorm) || Ainvnorm > LAPACKE_slamch('O') ) { int info; float *Atmp = (float*)malloc(descA.m * descA.n * sizeof(float)); plasma_sooptile2lap( descA, Atmp, descA.mb, descA.nb, descA.m, descA.n, sequence, request); plasma_dynamic_sync(); info = LAPACKE_sgecon(LAPACK_COL_MAJOR, lapack_const(norm), descA.n, Atmp, descA.m, Anorm, rcond); free(Atmp); fallback = PLASMA_TRUE; sequence->status = info; kase = 0; } #endif if (kase != 0) { if (kase == kase1) { /* Multiply by inv(L). */ plasma_parallel_call_9(plasma_pstrsm, PLASMA_enum, PlasmaLeft, PLASMA_enum, PlasmaLower, PLASMA_enum, PlasmaNoTrans, PLASMA_enum, PlasmaUnit, float, 1.0, PLASMA_desc, descA, PLASMA_desc, descW, PLASMA_sequence*, sequence, PLASMA_request*, request); /* Multiply by inv(U). */ plasma_parallel_call_9(plasma_pstrsm, PLASMA_enum, PlasmaLeft, PLASMA_enum, PlasmaUpper, PLASMA_enum, PlasmaNoTrans, PLASMA_enum, PlasmaNonUnit, float, 1.0, PLASMA_desc, descA, PLASMA_desc, descW, PLASMA_sequence*, sequence, PLASMA_request*, request); } else { /* Multiply by inv(U**T). */ plasma_parallel_call_9(plasma_pstrsm, PLASMA_enum, PlasmaLeft, PLASMA_enum, PlasmaUpper, PLASMA_enum, PlasmaTrans, PLASMA_enum, PlasmaNonUnit, float, 1.0, PLASMA_desc, descA, PLASMA_desc, descW, PLASMA_sequence*, sequence, PLASMA_request*, request); /* Multiply by inv(L**T). */ plasma_parallel_call_9(plasma_pstrsm, PLASMA_enum, PlasmaLeft, PLASMA_enum, PlasmaLower, PLASMA_enum, PlasmaTrans, PLASMA_enum, PlasmaUnit, float, 1.0, PLASMA_desc, descA, PLASMA_desc, descW, PLASMA_sequence*, sequence, PLASMA_request*, request); } } plasma_dynamic_sync(); } while (kase != 0); /* Compute the estimate of the reciprocal condition number. */ if ((Ainvnorm != 0.0) && (fallback == PLASMA_FALSE)) { *rcond = ((float)1.0 / Ainvnorm) / Anorm; } #if defined(REAL) plasma_shared_free(plasma, isgn); #endif plasma_shared_free(plasma, workN); plasma_desc_mat_free(&descW); return PLASMA_SUCCESS; }
{ "alphanum_fraction": 0.5516840883, "avg_line_length": 33.035971223, "ext": "c", "hexsha": "049e448ce307a41b20f16d5f90a14adae1930fb0", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "bcc99c164a256bc7df7c936b9c43afd38c12aea2", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "zhuangsc/Plasma-ompss1", "max_forks_repo_path": "compute/sgecon.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "bcc99c164a256bc7df7c936b9c43afd38c12aea2", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "zhuangsc/Plasma-ompss1", "max_issues_repo_path": "compute/sgecon.c", "max_line_length": 110, "max_stars_count": null, "max_stars_repo_head_hexsha": "bcc99c164a256bc7df7c936b9c43afd38c12aea2", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "zhuangsc/Plasma-ompss1", "max_stars_repo_path": "compute/sgecon.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3510, "size": 13776 }
#include <stdio.h> #include <gsl/gsl_matrix.h> #define MATRIX_SIZE 3 static const double matrix_1[MATRIX_SIZE][MATRIX_SIZE] = { {1., 4., 2.}, {-1., -2., 1.}, {3., 20., 19.}, }; static const double vector_1[] = {8., 3., 71.}; static const double matrix_2[MATRIX_SIZE][MATRIX_SIZE] = { {1., 0.1, 0.225}, {-0.15, 1., 0.2}, {-0.4, -0.025, 1.} }; static double const vector_2[] = {0.1, 0.9, 0.8}; void ulDecomposition(const gsl_matrix *A, const gsl_vector *b) { gsl_matrix *L = gsl_matrix_calloc(MATRIX_SIZE, MATRIX_SIZE); gsl_matrix *U = gsl_matrix_calloc(MATRIX_SIZE, MATRIX_SIZE); for (size_t i = 0; i < MATRIX_SIZE; i++) { gsl_matrix_set(L, i, i, 1.); } } int main(void) { printf("Gauss.\n\n"); // Alloc matrix`s gsl_matrix_const_view matrix_1v = gsl_matrix_const_view_array((const double *) matrix_1, MATRIX_SIZE, MATRIX_SIZE); gsl_vector_const_view vector_1v = gsl_vector_const_view_array(vector_1, MATRIX_SIZE); const gsl_matrix *matrix1 = &matrix_1v.matrix; const gsl_vector *vector1 = &vector_1v.vector; ulDecomposition(matrix1, vector1); // gsl_matrix *orig_matrix = gsl_matrix_alloc(MATRIX_SIZE, MATRIX_SIZE); // gsl_vector *vector1 = gsl_vector_alloc(MATRIX_SIZE); // gsl_vector *orig_vector = gsl_vector_alloc(MATRIX_SIZE); // gsl_vector *result1 = gsl_vector_alloc(MATRIX_SIZE); // gsl_vector *result2 = gsl_vector_alloc(MATRIX_SIZE); // Init matrix // Init vector } void gaussSeidelSolve() { }
{ "alphanum_fraction": 0.686827957, "avg_line_length": 24.8, "ext": "c", "hexsha": "7c17e62eefc0819331b26d402d3551a5f9f04ad8", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e8e6c65521160b3dd7054043548d8d6536d7dd9a", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "unixs/numerical-la-matrix-forms", "max_forks_repo_path": "src/matrix.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "e8e6c65521160b3dd7054043548d8d6536d7dd9a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "unixs/numerical-la-matrix-forms", "max_issues_repo_path": "src/matrix.c", "max_line_length": 117, "max_stars_count": null, "max_stars_repo_head_hexsha": "e8e6c65521160b3dd7054043548d8d6536d7dd9a", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "unixs/numerical-la-matrix-forms", "max_stars_repo_path": "src/matrix.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 470, "size": 1488 }
/* Internal header file for cminpack, by Frederic Devernay. */ #ifndef __CMINPACKP_H__ #define __CMINPACKP_H__ #ifndef __CMINPACK_H__ #error "cminpackP.h in an internal cminpack header, and must be included after all other headers (including cminpack.h)" #endif #if (defined (USE_CBLAS) || defined (USE_LAPACK)) && !defined (__cminpack_double__) && !defined (__cminpack_float__) #error "cminpack can use cblas and lapack only in double or single precision mode" #endif #ifdef USE_CBLAS #ifdef __APPLE__ #include <Accelerate/Accelerate.h> #else #include <cblas.h> #endif #define __cminpack_enorm__(n,x) __cminpack_cblas__(nrm2)(n,x,1) #else #define __cminpack_enorm__(n,x) __cminpack_func__(enorm)(n,x) #endif #ifdef USE_LAPACK #ifdef __APPLE__ #include <Accelerate/Accelerate.h> #else #if defined(__LP64__) /* In LP64 match sizes with the 32 bit ABI */ typedef int __CLPK_integer; typedef int __CLPK_logical; typedef __CLPK_logical (*__CLPK_L_fp)(); typedef int __CLPK_ftnlen; #else typedef long int __CLPK_integer; typedef long int __CLPK_logical; typedef __CLPK_logical (*__CLPK_L_fp)(); typedef long int __CLPK_ftnlen; #endif int __cminpack_lapack__(lartg_)( __cminpack_real__ *f, __cminpack_real__ *g, __cminpack_real__ *cs, __cminpack_real__ *sn, __cminpack_real__ *r__); int __cminpack_lapack__(geqp3_)( __CLPK_integer *m, __CLPK_integer *n, __cminpack_real__ *a, __CLPK_integer * lda, __CLPK_integer *jpvt, __cminpack_real__ *tau, __cminpack_real__ *work, __CLPK_integer *lwork, __CLPK_integer *info); int __cminpack_lapack__(geqrf_)( __CLPK_integer *m, __CLPK_integer *n, __cminpack_real__ *a, __CLPK_integer * lda, __cminpack_real__ *tau, __cminpack_real__ *work, __CLPK_integer *lwork, __CLPK_integer *info); #endif #endif #include "minpackP.h" #endif /* !__CMINPACKP_H__ */
{ "alphanum_fraction": 0.7813876652, "avg_line_length": 33.0181818182, "ext": "h", "hexsha": "1b460ec8a245da1d3cb5b0f15bc82a234e8a4025", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "d5d77f3541f329bbb28142d18606b22f115b7df6", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ProjectZeroDays/Pyto", "max_forks_repo_path": "Extensions/SciPy/cminpack-master/cminpackP.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "d5d77f3541f329bbb28142d18606b22f115b7df6", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ProjectZeroDays/Pyto", "max_issues_repo_path": "Extensions/SciPy/cminpack-master/cminpackP.h", "max_line_length": 120, "max_stars_count": 2, "max_stars_repo_head_hexsha": "e69a9ab57d5ef86675041f9e1f4427e9b79bb8e7", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ianormy/Pyto", "max_stars_repo_path": "SciPy/cminpack-master/cminpackP.h", "max_stars_repo_stars_event_max_datetime": "2020-08-25T16:36:03.000Z", "max_stars_repo_stars_event_min_datetime": "2020-08-25T13:55:00.000Z", "num_tokens": 597, "size": 1816 }
#include <pygsl/solver.h> #include <gsl/gsl_min.h> static const char min_f_type_name[] = "F-Minimizer"; static const char min_x_minimum_doc[]= ""; static const char min_x_lower_doc[]= ""; static const char min_x_upper_doc[]= ""; static const char min_f_minimum_doc[]= ""; static const char min_f_lower_doc[]= ""; static const char min_f_upper_doc[]= ""; static const char min_test_delta_doc[]= ""; static const char min_set_f_doc[]= ""; static const char min_init_brent_doc[]= ""; static const char min_init_goldensection_doc[]= ""; const char * filename = __FILE__; PyObject *module = NULL; static PyObject* PyGSL_min_f_minimum(PyGSL_solver *self, PyObject *args) { return PyGSL_solver_ret_double(self, args, (double_m_t) gsl_min_fminimizer_f_minimum); } static PyObject* PyGSL_min_f_lower(PyGSL_solver *self, PyObject *args) { return PyGSL_solver_ret_double(self, args, (double_m_t) gsl_min_fminimizer_f_lower); } static PyObject* PyGSL_min_f_upper(PyGSL_solver *self, PyObject *args) { return PyGSL_solver_ret_double(self, args, (double_m_t) gsl_min_fminimizer_f_upper); } static PyObject* PyGSL_min_x_minimum(PyGSL_solver *self, PyObject *args) { return PyGSL_solver_ret_double(self, args, (double_m_t) gsl_min_fminimizer_x_minimum); } static PyObject* PyGSL_min_x_lower(PyGSL_solver *self, PyObject *args) { return PyGSL_solver_ret_double(self, args, (double_m_t) gsl_min_fminimizer_x_lower); } static PyObject* PyGSL_min_x_upper(PyGSL_solver *self, PyObject *args) { return PyGSL_solver_ret_double(self, args, (double_m_t) gsl_min_fminimizer_x_upper); } static PyObject* PyGSL_min_solver_test_interval(PyGSL_solver * self, PyObject *args) { double epsabs, epsrel; gsl_min_fminimizer *s = (gsl_min_fminimizer *) self->solver; if(!PyArg_ParseTuple(args, "dd", &epsabs, &epsrel)) return NULL; return PyInt_FromLong(gsl_min_test_interval(s->x_lower, s->x_upper, epsabs, epsrel)); } static PyObject* PyGSL_min_set_f(PyGSL_solver *self, PyObject *args, PyObject *kw) { return PyGSL_solver_set_f(self, args, kw, (void *)gsl_min_fminimizer_set, 0); } static PyMethodDef PyGSL_min_fmethods[] = { {"x_minimum", (PyCFunction)PyGSL_min_x_minimum, METH_NOARGS, (char *)min_x_minimum_doc}, {"x_lower", (PyCFunction)PyGSL_min_x_lower, METH_NOARGS, (char *)min_x_lower_doc}, {"x_upper", (PyCFunction)PyGSL_min_x_upper, METH_NOARGS, (char *)min_x_upper_doc}, {"f_minimum", (PyCFunction)PyGSL_min_f_minimum, METH_NOARGS, (char *)min_f_minimum_doc}, {"f_lower", (PyCFunction)PyGSL_min_f_lower, METH_NOARGS, (char *)min_f_lower_doc}, {"f_upper", (PyCFunction)PyGSL_min_f_upper, METH_NOARGS, (char *)min_f_upper_doc}, {"set", (PyCFunction)PyGSL_min_set_f, METH_VARARGS|METH_KEYWORDS, (char *)min_set_f_doc}, {"test_interval",(PyCFunction)PyGSL_min_solver_test_interval, METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; const struct _SolverStatic min_solver_f = {{(void_m_t) gsl_min_fminimizer_free, /* gsl_multimin_fminimizer_restart */ (void_m_t) NULL, (name_m_t) gsl_min_fminimizer_name, (int_m_t) gsl_min_fminimizer_iterate}, 1, PyGSL_min_fmethods, min_f_type_name}; static PyObject* PyGSL_min_f_init(PyObject *self, PyObject *args, const gsl_min_fminimizer_type * type) { PyObject *tmp=NULL; solver_alloc_struct s = {type, (void_an_t) gsl_min_fminimizer_alloc, &min_solver_f}; FUNC_MESS_BEGIN(); tmp = PyGSL_solver_dn_init(self, args, &s, 0); FUNC_MESS_END(); return tmp; } #define AMIN_F(name) \ static PyObject* PyGSL_min_init_ ## name (PyObject *self, PyObject *args)\ { \ PyObject *tmp = NULL; \ FUNC_MESS_BEGIN(); \ tmp = PyGSL_min_f_init(self, args, gsl_min_fminimizer_ ## name); \ if (tmp == NULL){ \ PyGSL_add_traceback(module, __FILE__, __FUNCTION__, __LINE__); \ } \ FUNC_MESS_END(); \ return tmp; \ } AMIN_F(brent) AMIN_F(goldensection) static PyObject* PyGSL_min_test_interval(PyObject * self, PyObject *args) { double x_lower, x_upper, epsabs, epsrel; if(!PyArg_ParseTuple(args, "dddd", &x_lower, &x_upper, &epsabs, &epsrel)) return NULL; return PyInt_FromLong(gsl_min_test_interval(x_lower, x_upper, epsabs, epsrel)); } static const char PyGSL_minimize_module_doc [] = "XXX Missing "; static PyMethodDef mMethods[] = { /* solvers */ {"brent", PyGSL_min_init_brent, METH_NOARGS, (char *)min_init_brent_doc}, {"goldensection",PyGSL_min_init_goldensection, METH_NOARGS, (char *)min_init_goldensection_doc}, /* min methods */ {"test_interval", PyGSL_min_test_interval, METH_VARARGS, (char *)min_test_delta_doc}, {NULL, NULL, 0, NULL} }; void initminimize(void) { PyObject* m, *dict, *item; FUNC_MESS_BEGIN(); m=Py_InitModule("minimize", mMethods); import_pygsl_solver(); assert(PyGSL_API); module = m; assert(m); dict = PyModule_GetDict(m); if(!dict) goto fail; if (!(item = PyString_FromString((char*)PyGSL_minimize_module_doc))){ PyErr_SetString(PyExc_ImportError, "I could not generate module doc string!"); goto fail; } if (PyDict_SetItemString(dict, "__doc__", item) != 0){ PyErr_SetString(PyExc_ImportError, "I could not init doc string!"); goto fail; } FUNC_MESS_END(); fail: FUNC_MESS("FAIL"); return; }
{ "alphanum_fraction": 0.6491754123, "avg_line_length": 34.1079545455, "ext": "c", "hexsha": "20c27d35c82a8c722eb47264317c33b41875587a", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2018-10-02T06:18:07.000Z", "max_forks_repo_forks_event_min_datetime": "2018-10-02T06:18:07.000Z", "max_forks_repo_head_hexsha": "457e7afb5cab424296dff95e1acf10ebf70d32a9", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "juhnowski/FishingRod", "max_forks_repo_path": "production/pygsl-0.9.5/testing/src/solvers/minimize.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "457e7afb5cab424296dff95e1acf10ebf70d32a9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "juhnowski/FishingRod", "max_issues_repo_path": "production/pygsl-0.9.5/testing/src/solvers/minimize.c", "max_line_length": 104, "max_stars_count": null, "max_stars_repo_head_hexsha": "457e7afb5cab424296dff95e1acf10ebf70d32a9", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "juhnowski/FishingRod", "max_stars_repo_path": "production/pygsl-0.9.5/testing/src/solvers/minimize.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1617, "size": 6003 }
/* -*- Mode: C++; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* * Copyright 2017 Couchbase, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cJSON.h> #include <memcached/mcd_util-visibility.h> #include <gsl/gsl> #include <string> namespace cb { namespace breakpad { /** * What information should breakpad minidumps contain? */ enum class Content { /** * Default content (threads+stack+env+arguments) */ Default }; /** * Settings for Breakpad crash catcher. */ struct MCD_UTIL_PUBLIC_API Settings { /** * Default constructor initialize the object to be in a disabled state */ Settings() = default; /** * Initialize the Breakpad object from the specified JSON structure * which looks like: * * { * "enabled" : true, * "minidump_dir" : "/var/crash", * "content" : "default" * } * * @param json The json to parse * @throws std::invalid_argument if the json dosn't look as expected */ explicit Settings(gsl::not_null<const cJSON*> json); bool enabled{false}; std::string minidump_dir; Content content{Content::Default}; }; } // namespace breakpad } // namespace cb MCD_UTIL_PUBLIC_API std::string to_string(cb::breakpad::Content content);
{ "alphanum_fraction": 0.6522896699, "avg_line_length": 26.0833333333, "ext": "h", "hexsha": "26c4bf6fa67ec45482202900f09418a24ca83c45", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "24d4546220f3181678d7eadedc68b4ea088d3538", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "t3rm1n4l/kv_engine", "max_forks_repo_path": "utilities/breakpad_settings.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "24d4546220f3181678d7eadedc68b4ea088d3538", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "t3rm1n4l/kv_engine", "max_issues_repo_path": "utilities/breakpad_settings.h", "max_line_length": 79, "max_stars_count": null, "max_stars_repo_head_hexsha": "24d4546220f3181678d7eadedc68b4ea088d3538", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "t3rm1n4l/kv_engine", "max_stars_repo_path": "utilities/breakpad_settings.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 450, "size": 1878 }
// // editor.hpp // PretendToWork // // Created by tqtifnypmb on 14/12/2017. // Copyright © 2017 tqtifnypmb. All rights reserved. // #pragma once #include "../crdt/Engine.h" #include "../types.h" #include <memory> #include <gsl/gsl> #include <map> namespace brick { class Rope; class Editor { public: using DeltaList = std::vector<std::tuple<Revision, size_t, size_t>>; // [(delta, begRow, endRow)] using SyncCb = std::function<void(const Editor::DeltaList& deltaList)>; Editor(size_t viewId, const detail::CodePointList& cplist, SyncCb syncCb); Editor(size_t viewId, SyncCb syncCb); template <class Converter> void insert(gsl::span<const char> bytes, size_t pos); void insert(const detail::CodePointList& cplist, size_t pos); void erase(Range range); void undo(); void sync(Editor& editor); std::map<size_t, detail::CodePointList> region(size_t begLine, size_t endLine); void clearRevisions() { engine_.revisions().clear(); } private: Editor::DeltaList convertEngineDelta(const Engine::DeltaList& deltas); void updateLines(size_t pos, const detail::CodePointList& cplist); void updateLines(Range r); std::unique_ptr<Rope> rope_; Engine engine_; std::vector<size_t> linesIndex_; SyncCb sync_cb_; }; template <class Converter> void Editor::insert(gsl::span<const char> bytes, size_t pos) { insert(Converter::encode(bytes), pos); } } // namespace brick
{ "alphanum_fraction": 0.6657859974, "avg_line_length": 24.0317460317, "ext": "h", "hexsha": "f239c1c2f19a1d19dd6bf39dddb1353ff41d9158", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "49398f77113c57d4e256e838a5ad6b9a6381de6a", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "tqtifnypmb/brick", "max_forks_repo_path": "src/editor/Editor.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "49398f77113c57d4e256e838a5ad6b9a6381de6a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "tqtifnypmb/brick", "max_issues_repo_path": "src/editor/Editor.h", "max_line_length": 105, "max_stars_count": null, "max_stars_repo_head_hexsha": "49398f77113c57d4e256e838a5ad6b9a6381de6a", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "tqtifnypmb/brick", "max_stars_repo_path": "src/editor/Editor.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 405, "size": 1514 }
// Copyright (c) Microsoft Corporation. // Licensed under the MIT license. #pragma once #define NOMINMAX #define WIN32_LEAN_AND_MEAN #include <array> #include <iomanip> #include <optional> #include <sstream> #include <string_view> #include <thread> #include <unordered_map> #include <unordered_set> #include <vector> #include <d2d1.h> #include <d3d11_1.h> #include <d3dcompiler.h> #include <dwrite_3.h> #include <dcomp.h> #include <dxgi1_3.h> #include <dxgidebug.h> #include <VersionHelpers.h> #include <gsl/gsl_util> #include <gsl/pointers> #include <gsl/span> #include <wil/com.h> #include <wil/filesystem.h> #include <wil/result_macros.h> #include <wil/stl.h> #include <wil/win32_helpers.h> // Dynamic Bitset (optional dependency on LibPopCnt for perf at bit counting) // Variable-size compressed-storage header-only bit flag storage library. #pragma warning(push) #pragma warning(disable : 4702) // unreachable code #include <dynamic_bitset.hpp> #pragma warning(pop) // Chromium Numerics (safe math) #pragma warning(push) #pragma warning(disable : 4100) // '...': unreferenced formal parameter #pragma warning(disable : 26812) // The enum type '...' is unscoped. Prefer 'enum class' over 'enum' (Enum.3). #include <base/numerics/safe_math.h> #pragma warning(pop) #include <til.h> #include <til/bit.h>
{ "alphanum_fraction": 0.7135531136, "avg_line_length": 25.7547169811, "ext": "h", "hexsha": "e6ede9efed74c365c9bc5fc1a87d5e398a40319f", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-10-31T06:35:07.000Z", "max_forks_repo_forks_event_min_datetime": "2020-10-31T06:35:07.000Z", "max_forks_repo_head_hexsha": "62c95b5017e92a780cdc43008e30b4e43d2edc9b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "by-memory/terminal", "max_forks_repo_path": "src/renderer/atlas/pch.h", "max_issues_count": 10, "max_issues_repo_head_hexsha": "62c95b5017e92a780cdc43008e30b4e43d2edc9b", "max_issues_repo_issues_event_max_datetime": "2020-10-20T16:23:04.000Z", "max_issues_repo_issues_event_min_datetime": "2020-05-22T17:33:36.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "by-memory/terminal", "max_issues_repo_path": "src/renderer/atlas/pch.h", "max_line_length": 111, "max_stars_count": 9, "max_stars_repo_head_hexsha": "62c95b5017e92a780cdc43008e30b4e43d2edc9b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "by-memory/terminal", "max_stars_repo_path": "src/renderer/atlas/pch.h", "max_stars_repo_stars_event_max_datetime": "2022-03-03T09:58:23.000Z", "max_stars_repo_stars_event_min_datetime": "2020-04-21T02:08:05.000Z", "num_tokens": 338, "size": 1365 }
#ifndef __KILLDUPTRKS_H__ #define __KILLDUPTRKS_H__ #include <cstddef> #include <vector> #include <algorithm> #include <functional> #include <utility> #include <iostream> #include "L1Trigger/TrackFindingTMTT/interface/Settings.h" #include "L1Trigger/TrackFindingTMTT/interface/Stub.h" #include "L1Trigger/TrackFindingTMTT/interface/TP.h" #include "FWCore/Utilities/interface/Exception.h" #include <gsl/gsl_fit.h> using namespace std; /** * Kill duplicate reconstructed tracks. * e.g. Those sharing many hits in common. * * Currently this is intended to run only on tracks found within a single (eta,phi) sector. * It includes a naive algorithms from Ian (dupTrkAlg = 1) & more sophisticated ones from Ivan (dupTrkAlg > 1). * The class is implemented inside L1Trigger/TrackFindingTMTT/interface/KillDupTrks.icc * * The template class "T" can be any class inheriting from L1trackBase. * * ------------------------------------------------------------------------------------------- * GENERAL INFO ABOUT THE FILTER ALGORITHMS DEFINED IN THE CLASS. * Some of these algorithms are designed to work on r-phi L1track2D tracks, and some on r-z * L1track2D tracks. Others work on L1tracks3D. * ------------------------------------------------------------------------------------------- */ namespace TMTT { class L1trackBase; class L1track2D; class L1track3D; class L1fittedTrack; template <class T> class KillDupTrks { public: KillDupTrks() { // Check that classed used as template "T" inherits from class L1trackBase. static_assert(std::is_base_of<L1trackBase, T>::value, "KillDupTrks ERROR: You instantiated this with a template class not inheriting from L1trackBase!"); } ~KillDupTrks() {} /** * Make available cfg parameters & specify which algorithm is to be used for duplicate track removal. */ void init(const Settings* settings, unsigned int dupTrkAlg); /** * Eliminate duplicate tracks from the input collection, and so return a reduced list of tracks. */ vector<T> filter(const vector<T>& vecTracks) const; private: /** * Implementing "inverse" OSU algorithm, check for stubs in common, * keep largest candidates if common stubs in N or more layers (default 5 at present), both if equal * Implementing "inverse" OSU algorithm, check for stubs in common, * keep largest candidates if common stubs in N or more layers (default 5 at present), both if equal */ vector<T> filterAlg8(const vector<T>& vecTracks) const; /** Implementing "inverse" OSU algorithm, check for layers in common, reverse order as per Luis's suggestion * Comparison window of up to 6 * Modified version of Algo23, looking for layers in common as in Algo8 * Check if N or more common layers (default 5 at present) * Then keep candidate with most stubs, use |q/pT| as tie-break, finally drop "latest" if still equal */ vector<T> filterAlg25(const vector<T>& vecTracks) const; /** * Prints out a consistently formatted formatted report of killed duplicate track */ void printKill(unsigned alg, unsigned dup, unsigned cand, T dupTrack, T candTrack) const; /** * Counts candidate layers with stubs in common */ unsigned int layerMatches(std::vector< std::pair<unsigned int, unsigned int> >* iStubs, std::vector< std::pair<unsigned int, unsigned int> >* jStubs) const; private: const Settings *settings_; // Configuration parameters. unsigned int dupTrkAlg_; // Specifies choice of algorithm for duplicate track removal. unsigned int dupTrkMinCommonHitsLayers_; // Min no of matched stubs & layers to keep smaller cand }; } //=== Include file which implements all the functions in the above class. #include "L1Trigger/TrackFindingTMTT/interface/KillDupTrks.icc" #endif
{ "alphanum_fraction": 0.7052743175, "avg_line_length": 35.261682243, "ext": "h", "hexsha": "fef3f2802a4161c120eb81bc87374ecf05c55356", "lang": "C", "max_forks_count": 5, "max_forks_repo_forks_event_max_datetime": "2020-01-09T13:33:17.000Z", "max_forks_repo_forks_event_min_datetime": "2018-08-21T16:37:52.000Z", "max_forks_repo_head_hexsha": "d66d1b15005a5f253dcbd3704591620e39fe4290", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "djcranshaw/cmssw", "max_forks_repo_path": "L1Trigger/TrackFindingTMTT/interface/KillDupTrks.h", "max_issues_count": 3, "max_issues_repo_head_hexsha": "d66d1b15005a5f253dcbd3704591620e39fe4290", "max_issues_repo_issues_event_max_datetime": "2019-12-05T21:16:03.000Z", "max_issues_repo_issues_event_min_datetime": "2018-08-23T13:40:24.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "djcranshaw/cmssw", "max_issues_repo_path": "L1Trigger/TrackFindingTMTT/interface/KillDupTrks.h", "max_line_length": 157, "max_stars_count": 3, "max_stars_repo_head_hexsha": "d66d1b15005a5f253dcbd3704591620e39fe4290", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "djcranshaw/cmssw", "max_stars_repo_path": "L1Trigger/TrackFindingTMTT/interface/KillDupTrks.h", "max_stars_repo_stars_event_max_datetime": "2019-02-19T11:45:32.000Z", "max_stars_repo_stars_event_min_datetime": "2018-08-24T19:10:26.000Z", "num_tokens": 939, "size": 3773 }
/** * \file TypedBaseFilter.h */ #ifndef ATK_CORE_TYPEDBASEFILTER_H #define ATK_CORE_TYPEDBASEFILTER_H #include <ATK/Core/BaseFilter.h> #include <ATK/Core/TypeTraits.h> #include <memory> #include <vector> #include <boost/align/aligned_allocator.hpp> #include <gsl/gsl> namespace ATK { const gsl::index ALIGNMENT = 32; /// Interface for output filters template<typename DataType> class ATK_CORE_EXPORT OutputArrayInterface { public: virtual ~OutputArrayInterface() = default; /** * @brief Returns an array with the processed output * @param port is the port that the next plugin listens to */ virtual DataType* get_output_array(gsl::index port) const = 0; /** * Returns the size of the output arrays (usually the last size processed) */ virtual gsl::index get_output_array_size() const = 0; }; /// Base class for typed filters, contains arrays template<typename DataType_, typename DataType__ = DataType_> class ATK_CORE_EXPORT TypedBaseFilter : public BaseFilter, public OutputArrayInterface<DataType__> { protected: /// Simplify parent calls using Parent = BaseFilter; public: /// To be used by inherited APIs using DataType = DataType_; /// To be used by inherited APIs using DataTypeInput = DataType_; /// To be used by inherited APIs using DataTypeOutput = DataType__; /// To be used for filters that require aligned data using AlignedVector = std::vector<DataTypeInput, boost::alignment::aligned_allocator<DataTypeInput, ALIGNMENT> >; /// To be used for filters that require aligned data for output data using AlignedOutVector = std::vector<DataTypeOutput, boost::alignment::aligned_allocator<DataTypeOutput, ALIGNMENT> >; /// To be used for filters that required aligned data for parameters (like EQ) using AlignedScalarVector = std::vector<typename TypeTraits<DataType>::Scalar, boost::alignment::aligned_allocator<typename TypeTraits<DataType>::Scalar, ALIGNMENT> >; /// Base constructor for filters with actual data TypedBaseFilter(gsl::index nb_input_ports, gsl::index nb_output_ports); /// Move constructor TypedBaseFilter(TypedBaseFilter&& other); /// Destructor ~TypedBaseFilter() override = default; TypedBaseFilter(const TypedBaseFilter&) = delete; TypedBaseFilter& operator=(const TypedBaseFilter&) = delete; /** * @brief Returns an array with the processed output * @param port is the port that the next plugin listens to */ DataType__* get_output_array(gsl::index port) const final; gsl::index get_output_array_size() const final; void set_nb_input_ports(gsl::index nb_ports) override; void set_nb_output_ports(gsl::index nb_ports) override; void full_setup() override; /// Connects this filter input to another's output void set_input_port(gsl::index input_port, gsl::not_null<BaseFilter*> filter, gsl::index output_port) final; void set_input_port(gsl::index input_port, BaseFilter& filter, gsl::index output_port) final; private: int get_type() const override; protected: /// This implementation does nothing void process_impl(gsl::index size) const override; /// Prepares the filter by retrieving the inputs arrays void prepare_process(gsl::index size) final; /// Prepares the filter by resizing the outputs arrays void prepare_outputs(gsl::index size) final; /// Used to convert other filter outputs to DataType* void convert_inputs(gsl::index size); /// Input arrays with the input delay, owned here std::vector<AlignedVector> converted_inputs_delay; /// Input arrays, starting from t=0 (without input delay) std::vector<DataTypeInput*> converted_inputs; /// Current size of the input arrays, without delay std::vector<gsl::index> converted_inputs_size; /// Current input delay std::vector<gsl::index> converted_in_delays; /// Pointer to the output interface of the connected filters std::vector<OutputArrayInterface<DataType_>*> direct_filters; /// Output arrays with the output delay, owned here std::vector<AlignedOutVector> outputs_delay; /// Output arrays, starting from t=0 (without output delay) std::vector<DataTypeOutput*> outputs; /// Current size of the output arrays, without delay std::vector<gsl::index> outputs_size; /// Current output delay std::vector<gsl::index> out_delays; /// A vector containing the default values for the input arrays AlignedVector default_input; /// A vector containing the default values for the output arrays AlignedOutVector default_output; }; } #endif
{ "alphanum_fraction": 0.7249626308, "avg_line_length": 36.5859375, "ext": "h", "hexsha": "48f7a9d888238363a7cc405c53e97889b61ac2f6", "lang": "C", "max_forks_count": 48, "max_forks_repo_forks_event_max_datetime": "2021-04-07T02:33:07.000Z", "max_forks_repo_forks_event_min_datetime": "2015-08-15T12:08:13.000Z", "max_forks_repo_head_hexsha": "accf009d7238f32702eb1d5ee23c5148fc68e3bd", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "D-J-Roberts/AudioTK", "max_forks_repo_path": "ATK/Core/TypedBaseFilter.h", "max_issues_count": 22, "max_issues_repo_head_hexsha": "accf009d7238f32702eb1d5ee23c5148fc68e3bd", "max_issues_repo_issues_event_max_datetime": "2020-07-11T14:18:19.000Z", "max_issues_repo_issues_event_min_datetime": "2015-07-28T15:20:24.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "D-J-Roberts/AudioTK", "max_issues_repo_path": "ATK/Core/TypedBaseFilter.h", "max_line_length": 171, "max_stars_count": 249, "max_stars_repo_head_hexsha": "accf009d7238f32702eb1d5ee23c5148fc68e3bd", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "D-J-Roberts/AudioTK", "max_stars_repo_path": "ATK/Core/TypedBaseFilter.h", "max_stars_repo_stars_event_max_datetime": "2022-03-15T18:47:46.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-05T13:36:26.000Z", "num_tokens": 1069, "size": 4683 }
#include <lapacke.h> int main() { LAPACKE_dlapy2(0, 0); }
{ "alphanum_fraction": 0.6551724138, "avg_line_length": 19.3333333333, "ext": "c", "hexsha": "76f8ebca6147b55d2effa06acf7137813317241b", "lang": "C", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2019-06-01T07:10:01.000Z", "max_forks_repo_forks_event_min_datetime": "2015-06-16T04:22:23.000Z", "max_forks_repo_head_hexsha": "ebd49e1198c4ec9e7612ad4a9806d16a4ff0bdc9", "max_forks_repo_licenses": [ "BSL-1.0" ], "max_forks_repo_name": "t-sakashita/rokko", "max_forks_repo_path": "config/check_lapacke.c", "max_issues_count": 514, "max_issues_repo_head_hexsha": "ebd49e1198c4ec9e7612ad4a9806d16a4ff0bdc9", "max_issues_repo_issues_event_max_datetime": "2021-06-25T09:29:52.000Z", "max_issues_repo_issues_event_min_datetime": "2015-02-05T14:56:54.000Z", "max_issues_repo_licenses": [ "BSL-1.0" ], "max_issues_repo_name": "t-sakashita/rokko", "max_issues_repo_path": "config/check_lapacke.c", "max_line_length": 36, "max_stars_count": 16, "max_stars_repo_head_hexsha": "ebd49e1198c4ec9e7612ad4a9806d16a4ff0bdc9", "max_stars_repo_licenses": [ "BSL-1.0" ], "max_stars_repo_name": "t-sakashita/rokko", "max_stars_repo_path": "config/check_lapacke.c", "max_stars_repo_stars_event_max_datetime": "2022-03-18T19:04:49.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-31T18:57:48.000Z", "num_tokens": 24, "size": 58 }
#include <math.h> #include <gsl/gsl_errno.h> #include "advanceCN.h" #include "advanceBE.h" #include "checkpoint.h" #include "driver.h" #include "setup.h" #include "userFunc.h" /**************************************************************************/ /* Driver routine */ /**************************************************************************/ double driver( /* Time parameters */ const double tStart, const double tEnd, /* Equation of state parameters */ const bool eos_func, const double gamma_val, const double delta_val, /* Dimensionless viscosity parameters */ const bool alpha_func, const double alpha_val, /* Inner boundary condition parameters */ const pres_bc_type ibc_pres, const enth_bc_type ibc_enth, const bool ibc_func, const double ibc_pres_val, const double ibc_enth_val, /* Outer boundary condition parameters */ const pres_bc_type obc_pres, const enth_bc_type obc_enth, const bool obc_func, const double obc_pres_val, const double obc_enth_val, /* Source function parameters */ const bool massSrc_func, const double massSrc_val, const bool intEnSrc_func, const double intEnSrc_val, /* Control and method parameters */ const double dtStart, const double dtMin, const double dtTol, const double errTol, const double maxDtIncrease, const unsigned long maxIter, const unsigned long interpOrder, const unsigned long maxStep, const bool useBE, const bool preTimestep_func, const bool postTimestep_func, const unsigned long verbosity, /* Output control parameters */ const unsigned long nSave, const double *tSave, const unsigned long nUserOut, const bool *userOutCum, const bool *writeCheckpoint, const char *checkname, const bool userWriteCheckpoint, const bool writeFirstStep, const unsigned long checknum, /* Computational grid and workspace */ const grid *grd, const wksp *w, /* Input data */ double *col, double *pres, double *eInt, /* User-defined extra parameters */ void *params, /* Diagnostic outputs */ unsigned long *nStep, unsigned long *nIter, unsigned long *nFail, /* Storage for outputs */ unsigned long *nOut, double *tOut, double *colOut, double *presOut, double *eIntOut, double *mBndOut, double *eBndOut, double *mSrcOut, double *eSrcOut, double *userOut #ifdef TESTING_MODE /* Parameters used in code tests */ , double *residSum, unsigned long *iterStep, double *driverTime, double *advanceTime, double *nextIterTime, double *userTime #endif ) { ; status stat = RUNNING; double t = tStart; double dt = dtStart; bool writeOut = writeFirstStep; unsigned long savePtr = 0, chkPtr = checknum; double dtNew; unsigned long i, j; unsigned long nIterTmp, nreduce; #ifdef TESTING_MODE unsigned long *residType; clock_t start_t, end_t; /* Start the clocks */ start_t = clock(); *advanceTime = *nextIterTime = *userTime = 0.0; /* In testing mode, allocate memory for diagnostic array */ if (!(residType = calloc(maxIter, sizeof(unsigned long)))) { fprintf(stderr, "Error: unable to allocate memory for diagnostic arrays!\n"); exit(1); } /* In testing mode, initialize sum of residuals and iteration counter arrays */ for (i=0; i<maxIter; i++) residSum[i] = 0.0; for (i=0; i<maxStep; i++) iterStep[i] = 0; #endif /* Begin main loop */ while (stat == RUNNING) { /* Print status */ if (verbosity > 1) { printf("Step %ld, t = %e, dt = %e\n", *nStep, t, dt); } else if (verbosity > 0) { if (*nStep % 100 == 0) { printf("Step %ld, t = %e, dt = %e\n", *nStep, t, dt); } } /* Call user work function */ if (preTimestep_func) { userPreTimestep(t, dt, grd, col, pres, eInt, mBndOut+2*(*nOut), eBndOut+2*(*nOut), mSrcOut+grd->nr*(*nOut), eSrcOut+grd->nr*(*nOut), params, nUserOut, userOut+nUserOut*grd->nr*(*nOut)); } /* Try to advance */ dtNew = -1.0; nreduce = 0; while (dtNew < 0) { if (useBE == 0) { dtNew = advanceCN(t, dt, grd, col, pres, eInt, mBndOut+2*(*nOut), eBndOut+2*(*nOut), mSrcOut+grd->nr*(*nOut), eSrcOut+grd->nr*(*nOut), eos_func, gamma_val, delta_val, alpha_func, alpha_val, ibc_pres, ibc_enth, ibc_func, ibc_pres_val, ibc_enth_val, obc_pres, obc_enth, obc_func, obc_pres_val, obc_enth_val, massSrc_func, massSrc_val, intEnSrc_func, intEnSrc_val, errTol, dtTol, maxIter, interpOrder, false, verbosity > 2, w, params, &nIterTmp #ifdef TESTING_MODE , residSum, residType, advanceTime, nextIterTime, userTime #endif ); } else { dtNew = advanceBE(t, dt, grd, col, pres, eInt, mBndOut+2*(*nOut), eBndOut+2*(*nOut), mSrcOut+grd->nr*(*nOut), eSrcOut+grd->nr*(*nOut), eos_func, gamma_val, delta_val, alpha_func, alpha_val, ibc_pres, ibc_enth, ibc_func, ibc_pres_val, ibc_enth_val, obc_pres, obc_enth, obc_func, obc_pres_val, obc_enth_val, massSrc_func, massSrc_val, intEnSrc_func, intEnSrc_val, errTol, dtTol, maxIter, interpOrder, false, verbosity > 2, w, params, &nIterTmp #ifdef TESTING_MODE , residSum, residType, advanceTime, nextIterTime, userTime #endif ); } /* Upate iteration count. We do it here to ensure that we count iterations even if they fail to converge. */ *nIter += nIterTmp; #ifdef TESTING_MODE iterStep[*nStep-1] += nIterTmp; #endif if (dtNew < 0) { writeOut = false; dt = dt/2.0; nreduce++; (*nFail)++; if (verbosity > 1) printf(" Iterative solver non-convergence! Reducing dt to %e.\n", dt); if (dt < dtMin*(tEnd - tStart)) { stat = ZENO_ERROR; dtNew = dt; break; } } } for (i=0; i<nreduce; i++) dtNew = dtNew / 2; /* Update time and step counter */ t += dt; (*nStep)++; /* Call user work function */ if (postTimestep_func) { userPostTimestep(t, dt, grd, col, pres, eInt, mBndOut+2*(*nOut), eBndOut+2*(*nOut), mSrcOut+grd->nr*(*nOut), eSrcOut+grd->nr*(*nOut), params, nUserOut, userOut+nUserOut*grd->nr*(*nOut)); } /* If requested, scale back the next time step */ if (dtNew/dt > maxDtIncrease) dtNew = maxDtIncrease * dt; /* Store state */ if (writeOut) { if (verbosity > 0) printf("Storing output %lu at step %lu, time %e\n", (*nOut)+1, (*nStep)-1, t); tOut[(*nOut)] = t; for (i=0; i<grd->nr; i++) { colOut[i + (*nOut)*grd->nr] = col[i]; presOut[i + (*nOut)*grd->nr] = pres[i]; if (eos_func) eIntOut[i + (*nOut)*grd->nr] = eInt[i]; } /* Boundary and source values are cumulative, so just copy the final value during this output interval into the slot for the next interval */ if (savePtr<nSave-1) { mBndOut[2*((*nOut)+1)] = mBndOut[2*(*nOut)]; mBndOut[2*((*nOut)+1)+1] = mBndOut[2*(*nOut)+1]; eBndOut[2*((*nOut)+1)] = eBndOut[2*(*nOut)]; eBndOut[2*((*nOut)+1)+1] = eBndOut[2*(*nOut)+1]; if (massSrc_func == 1) { for (i=0; i<grd->nr; i++) { mSrcOut[((*nOut)+1)*grd->nr+i] = mSrcOut[(*nOut)*grd->nr+i]; eSrcOut[((*nOut)+1)*grd->nr+i] = eSrcOut[(*nOut)*grd->nr+i]; } } else if (intEnSrc_func == 1) { for (i=0; i<grd->nr; i++) { eSrcOut[((*nOut)+1)*grd->nr+i] = eSrcOut[(*nOut)*grd->nr+i]; } } /* Handle any user-defined cumulative outputs in the same way; note that user outputs are a 3D array, with the dimensions being (output time, output variable number, cell number), where cell number is the fastest varying index and output time is the slowest varying index in memory */ for (j=0; j<nUserOut; j++) { if (userOutCum) { if (userOutCum[j]) { for (i=0; i<grd->nr; i++) userOut[((*nOut)+1)*nUserOut*grd->nr + j*grd->nr + i] = userOut[(*nOut)*nUserOut*grd->nr + j*grd->nr + i]; } } } } /* Write checkpoint if requested */ if (0&&writeCheckpoint) { if (writeCheckpoint[savePtr]) { saveCheckpoint(checkname, chkPtr, eos_func, massSrc_func, intEnSrc_func, nUserOut, t, dtNew, *nStep, *nIter, *nFail, grd, *nOut+1, tOut, colOut, presOut, eIntOut, mBndOut, eBndOut, mSrcOut, eSrcOut, userOut, #if AA_M > 0 w->constraint, #endif params, userWriteCheckpoint, verbosity); /* Update checkpoint counter */ chkPtr++; } } /* Update counters and flags */ (*nOut)++; savePtr++; writeOut = false; } /* Check termination conditions */ if ((*nStep > maxStep) && (maxStep > 0)) stat = TOO_MANY_STEPS; if (dtNew < dtMin*(tEnd-tStart)) stat = ZENO_ERROR; if (t >= tEnd) stat = NORMAL_EXIT; /* If necessary, reduce next time step to hit the next output time or the end time */ if (savePtr < nSave) { if (t + dtNew > tSave[savePtr]) { dtNew = tSave[savePtr] - t; writeOut = true; } } if (t + dtNew >= tEnd) dtNew = (1.0+1.0e-10)*(tEnd - t); dt = dtNew; } /* Main loop end */ #ifdef TESTING_MODE /* In testing mode, free memory for diagnostic array */ free(residType); #endif /* On early exit, store final state in last output */ if (stat != NORMAL_EXIT) { if (savePtr < nSave) { tOut[(*nOut)] = t; for (i=0; i<grd->nr; i++) { colOut[i + (*nOut)*grd->nr] = col[i]; presOut[i + (*nOut)*grd->nr] = pres[i]; if (eos_func) eIntOut[i + (*nOut)*grd->nr] = eInt[i]; } (*nOut)++; } } /* Print final status and return */ if (verbosity > 0) { *nStep = *nStep-1; if (stat == NORMAL_EXIT) printf("Finished computation in %ld steps, t = %e\n", *nStep, t); else if (stat == TOO_MANY_STEPS) printf("Reached maximum number of steps = %ld, t = %e\n", *nStep, t); else if (stat == ZENO_ERROR) printf("Time step too small after %ld steps, t = %e, dt = %e\n", *nStep, t, dt); } #ifdef TESTING_MODE end_t = clock(); *driverTime = (double) (end_t - start_t) / CLOCKS_PER_SEC; #endif return(t); }
{ "alphanum_fraction": 0.585479426, "avg_line_length": 29.3938547486, "ext": "c", "hexsha": "b3e4b0dff41239d34f71a1fe3086e4fb24c443dd", "lang": "C", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2021-11-20T02:11:17.000Z", "max_forks_repo_forks_event_min_datetime": "2021-11-19T04:41:37.000Z", "max_forks_repo_head_hexsha": "646b3136c39da7152c82a032f8151555ec1e3d44", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "franciscaconcha/amuse-vader", "max_forks_repo_path": "src/amuse/community/vader/src/driver.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "646b3136c39da7152c82a032f8151555ec1e3d44", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "franciscaconcha/amuse-vader", "max_issues_repo_path": "src/amuse/community/vader/src/driver.c", "max_line_length": 81, "max_stars_count": null, "max_stars_repo_head_hexsha": "646b3136c39da7152c82a032f8151555ec1e3d44", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "franciscaconcha/amuse-vader", "max_stars_repo_path": "src/amuse/community/vader/src/driver.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3215, "size": 10523 }
#pragma once #include <initializer_list> #include <list> #include <vector> #include <gsl/gsl_complex.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_permutation.h> #include <gsl/gsl_vector.h> #include "../Utils/Types.h" #include "Expression.h" #define UNIQUE_GSL(M) \ typedef std::unique_ptr<M, decltype(&M##_free)> unique_##M; \ inline unique_##M make_##M(size_t rows, size_t cols){ \ return unique_##M(M##_alloc(rows, cols), M##_free); \ } \ unique_##M to_##M(expression e); UNIQUE_GSL(gsl_matrix) UNIQUE_GSL(gsl_matrix_complex) #undef UNIQUE_GSL #define UNIQUE_GSL(M) \ typedef std::unique_ptr<M, decltype(&M##_free)> unique_##M; \ inline unique_##M make_##M(size_t size){ \ return unique_##M(M##_alloc(size), M##_free); \ } \ unique_##M to_##M(expression e); UNIQUE_GSL(gsl_permutation) UNIQUE_GSL(gsl_vector) UNIQUE_GSL(gsl_vector_complex) #undef UNIQUE_GSL class MatrixExpression: public Expression { std::vector<expression> mat; size_t rows, cols; MatrixExpression(); MatrixExpression(size_t numRows, size_t numCols); MatrixExpression(std::vector<expression>&& matrix, size_t numRows, size_t numCols); MatrixExpression(std::list<expression>& matrix, size_t numRows, size_t numCols); MatrixExpression(std::initializer_list<double> matrix, size_t numRows, size_t numCols); MatrixExpression(std::initializer_list<gsl_complex> matrix, size_t numRows, size_t numCols); MatrixExpression(unique_gsl_matrix& matrix); MatrixExpression(unique_gsl_matrix_complex& matrix); MatrixExpression(unique_gsl_permutation& permutation); MatrixExpression(unique_gsl_vector& vec); MatrixExpression(unique_gsl_vector_complex& vec); public: static expression construct(); static expression construct(size_t numRows, size_t numCols); static expression construct(std::vector<expression>&& matrix, size_t numRows, size_t numCols); static expression construct(std::list<expression>& matrix, size_t numRows, size_t numCols); static expression construct(std::initializer_list<double> matrix, size_t numRows, size_t numCols); static expression construct(std::initializer_list<gsl_complex> matrix, size_t numRows, size_t numCols); static expression construct(unique_gsl_matrix& matrix); static expression construct(unique_gsl_matrix_complex& matrix); static expression construct(unique_gsl_permutation& matrix); static expression construct(unique_gsl_vector& vec); static expression construct(unique_gsl_vector_complex& vec); expression simplify() override; expression derivative(const std::string& var) override; expression integrate(const std::string& var) override; expression at(const int index) override; size_t shape(const int axis) const override; size_t size() const override; expression apply(TransformerFunction f) override; EXPRESSION_OVERRIDES };
{ "alphanum_fraction": 0.6745689655, "avg_line_length": 39.1325301205, "ext": "h", "hexsha": "86a4db925e4f54d582fd312a28eb7e8840aca69e", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "33cede17001e0a7038f99ea40dd6f9e433cf6454", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "antoniojkim/CalcPlusPlus", "max_forks_repo_path": "MathEngine/Expressions/MatrixExpression.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "33cede17001e0a7038f99ea40dd6f9e433cf6454", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "antoniojkim/CalcPlusPlus", "max_issues_repo_path": "MathEngine/Expressions/MatrixExpression.h", "max_line_length": 111, "max_stars_count": null, "max_stars_repo_head_hexsha": "33cede17001e0a7038f99ea40dd6f9e433cf6454", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "antoniojkim/CalcPlusPlus", "max_stars_repo_path": "MathEngine/Expressions/MatrixExpression.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 683, "size": 3248 }
#ifndef QDM_BIAS_H #define QDM_BIAS_H #include <gsl/gsl_matrix.h> #include <gsl/gsl_vector.h> #include "evaluation.h" gsl_matrix * qdm_bias_values( qdm_evaluation *h, qdm_evaluation *f ); gsl_matrix * qdm_bias_correct( const gsl_vector *years, const double month, const gsl_vector *days, const gsl_vector *y, qdm_evaluation *fp, qdm_evaluation *ho, qdm_evaluation *hc ); #endif /* QDM_BIAS_H */
{ "alphanum_fraction": 0.700913242, "avg_line_length": 15.1034482759, "ext": "h", "hexsha": "f04eb39937584d1f6c5993c52a6fdc4b8ae0d21f", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "2ee95bec6c8be64f69e231c78f2be5fce3509c67", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "calebcase/qdm", "max_forks_repo_path": "include/qdm/bias.h", "max_issues_count": 3, "max_issues_repo_head_hexsha": "2ee95bec6c8be64f69e231c78f2be5fce3509c67", "max_issues_repo_issues_event_max_datetime": "2020-03-22T20:22:53.000Z", "max_issues_repo_issues_event_min_datetime": "2020-03-06T18:09:06.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "calebcase/qdm", "max_issues_repo_path": "include/qdm/bias.h", "max_line_length": 28, "max_stars_count": null, "max_stars_repo_head_hexsha": "2ee95bec6c8be64f69e231c78f2be5fce3509c67", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "calebcase/qdm", "max_stars_repo_path": "include/qdm/bias.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 126, "size": 438 }
#include <stdio.h> #include <stdarg.h> #include <string.h> #include <math.h> #include <gbpLib.h> #include <gbpRNG.h> #include <gbpMCMC.h> #include <gsl/gsl_linalg.h> #include <gsl/gsl_fit.h> #include <gsl/gsl_interp.h> void generate_MCMC_parameters(MCMC_info *MCMC) { int i_P; int j_P; double factor_i; int flag_continue; static int reflect_priors; static int n_P; static double factor; static double * P_new; static double * P_chain; static double * P_init; static double * P_limit_min; static double * P_limit_max; static gsl_vector *b; static gsl_matrix *m; static RNG_info * RNG; // Initialize a few things on the first call switch(MCMC->first_parameter_call) { case GBP_TRUE: n_P = MCMC->n_P; P_new = MCMC->P_new; P_chain = MCMC->P_chain; P_init = MCMC->P_init; P_limit_min = MCMC->P_limit_min; P_limit_max = MCMC->P_limit_max; b = MCMC->b; m = MCMC->m; RNG = MCMC->RNG; factor = 2.4 / sqrt((double)MCMC->n_M_total); MCMC->first_parameter_call = GBP_FALSE; reflect_priors = SID_CHECK_BITFIELD_SWITCH(MCMC->mode, MCMC_MODE_REFLECT_PRIORS); break; } // Loop until a satisfactory set of parameters has been selected flag_continue = GBP_TRUE; while(flag_continue) { // Generate a random displacement vector factor_i = MCMC->temperature * factor; for(i_P = 0; i_P < n_P; i_P++) gsl_vector_set(b, i_P, factor_i * random_gaussian(RNG)); // Use the rotated covariance matrix (if available) if(m != NULL) { memcpy(P_new, P_chain, n_P * sizeof(double)); for(i_P = 0; i_P < n_P; i_P++) { for(j_P = 0; j_P < n_P; j_P++) P_new[i_P] += gsl_matrix_get(m, i_P, j_P) * gsl_vector_get(b, j_P); } } else { for(j_P = 0; j_P < n_P; j_P++) P_new[j_P] = P_init[j_P] * (1. + gsl_vector_get(b, j_P)); } // Enforce parameter limits if(reflect_priors) { for(i_P = 0; i_P < n_P; i_P++) { if(P_new[i_P] < P_limit_min[i_P]) P_new[i_P] = 2 * (P_limit_min[i_P]) - P_new[i_P]; if(P_new[i_P] > P_limit_max[i_P]) P_new[i_P] = 2 * (P_limit_max[i_P]) - P_new[i_P]; } } else { for(i_P = 0, flag_continue = GBP_FALSE; i_P < n_P; i_P++) if(P_new[i_P] < P_limit_min[i_P] || P_new[i_P] > P_limit_max[i_P]) flag_continue = GBP_TRUE; } } if(!SID_CHECK_BITFIELD_SWITCH(MCMC->mode, MCMC_MODE_PARALLEL)) SID_Bcast(P_new, n_P, SID_DOUBLE, SID_MASTER_RANK, MCMC->comm); }
{ "alphanum_fraction": 0.5057070387, "avg_line_length": 37.1058823529, "ext": "c", "hexsha": "cb58f17c05975eaf628e5ae2842536cc58f73c16", "lang": "C", "max_forks_count": 4, "max_forks_repo_forks_event_max_datetime": "2016-08-01T08:14:24.000Z", "max_forks_repo_forks_event_min_datetime": "2015-01-23T00:50:40.000Z", "max_forks_repo_head_hexsha": "5157d2e377edbd4806258d1c16b329373186d43a", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "gbpoole/gbpCode", "max_forks_repo_path": "src/gbpMath/gbpMCMC/generate_MCMC_parameters.c", "max_issues_count": 2, "max_issues_repo_head_hexsha": "5157d2e377edbd4806258d1c16b329373186d43a", "max_issues_repo_issues_event_max_datetime": "2019-06-18T00:40:46.000Z", "max_issues_repo_issues_event_min_datetime": "2017-07-30T11:10:49.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "gbpoole/gbpCode", "max_issues_repo_path": "src/gbpMath/gbpMCMC/generate_MCMC_parameters.c", "max_line_length": 105, "max_stars_count": 1, "max_stars_repo_head_hexsha": "5157d2e377edbd4806258d1c16b329373186d43a", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "gbpoole/gbpCode", "max_stars_repo_path": "src/gbpMath/gbpMCMC/generate_MCMC_parameters.c", "max_stars_repo_stars_event_max_datetime": "2015-10-20T11:39:53.000Z", "max_stars_repo_stars_event_min_datetime": "2015-10-20T11:39:53.000Z", "num_tokens": 839, "size": 3154 }
/* This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Original implementation was copyright (C) 1997 Makoto Matsumoto and Takuji Nishimura. Coded by Takuji Nishimura, considering the suggestions by Topher Cooper and Marc Rieffel in July-Aug. 1997, "A C-program for MT19937: Integer version (1998/4/6)" This implementation copyright (C) 1998 Brian Gough. I reorganized the code to use the module framework of GSL. The license on this implementation was changed from LGPL to GPL, following paragraph 3 of the LGPL, version 2. The seeding procedure has been updated to match the 10/99 release of MT19937. The original code included the comment: "When you use this, send an email to: matumoto@math.keio.ac.jp with an appropriate reference to your work". Makoto Matsumoto has a web page with more information about the generator, http://www.math.keio.ac.jp/~matumoto/emt.html. The paper below has details of the algorithm. From: Makoto Matsumoto and Takuji Nishimura, "Mersenne Twister: A 623-dimensionally equidistributerd uniform pseudorandom number generator". ACM Transactions on Modeling and Computer Simulation, Vol. 8, No. 1 (Jan. 1998), Pages 3-30 You can obtain the paper directly from Makoto Matsumoto's web page. The period of this generator is 2^{19937} - 1. */ #include <config.h> #include <stdlib.h> #include <gsl/gsl_rng.h> static inline unsigned long int mt_get (void *vstate); static double mt_get_double (void *vstate); static void mt_set (void *state, unsigned long int s); #define N 624 /* Period parameters */ #define M 397 /* most significant w-r bits */ static const unsigned long UPPER_MASK = 0x80000000UL; /* least significant r bits */ static const unsigned long LOWER_MASK = 0x7fffffffUL; typedef struct { unsigned long mt[N]; int mti; } mt_state_t; static inline unsigned long mt_get (void *vstate) { mt_state_t *state = (mt_state_t *) vstate; unsigned long k ; unsigned long int *const mt = state->mt; #define MAGIC(y) (((y)&0x1) ? 0x9908b0dfUL : 0) if (state->mti >= N) { /* generate N words at one time */ int kk; for (kk = 0; kk < N - M; kk++) { unsigned long y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); mt[kk] = mt[kk + M] ^ (y >> 1) ^ MAGIC(y); } for (; kk < N - 1; kk++) { unsigned long y = (mt[kk] & UPPER_MASK) | (mt[kk + 1] & LOWER_MASK); mt[kk] = mt[kk + (M - N)] ^ (y >> 1) ^ MAGIC(y); } { unsigned long y = (mt[N - 1] & UPPER_MASK) | (mt[0] & LOWER_MASK); mt[N - 1] = mt[M - 1] ^ (y >> 1) ^ MAGIC(y); } state->mti = 0; } /* Tempering */ k = mt[state->mti]; k ^= (k >> 11); k ^= (k << 7) & 0x9d2c5680UL; k ^= (k << 15) & 0xefc60000UL; k ^= (k >> 18); state->mti++; return k; } static double mt_get_double (void * vstate) { return mt_get (vstate) / 4294967296.0 ; } static void mt_set (void *vstate, unsigned long int s) { mt_state_t *state = (mt_state_t *) vstate; int i; if (s == 0) s = 4357; /* the default seed is 4357 */ /* This is the October 1999 version of the seeding procedure. It was updated by the original developers to avoid the periodicity in the simple congruence originally used. Note that an ANSI-C unsigned long integer arithmetic is automatically modulo 2^32 (or a higher power of two), so we can safely ignore overflow. */ #define LCG(x) ((69069 * x) + 1) &0xffffffffUL for (i = 0; i < N; i++) { state->mt[i] = s & 0xffff0000UL; s = LCG(s); state->mt[i] |= (s &0xffff0000UL) >> 16; s = LCG(s); } state->mti = i; } /* This is the original version of the seeding procedure, no longer used but available for compatibility with the original MT19937. */ static void mt_1998_set (void *vstate, unsigned long int s) { mt_state_t *state = (mt_state_t *) vstate; int i; if (s == 0) s = 4357; /* the default seed is 4357 */ state->mt[0] = s & 0xffffffffUL; #define LCG1998(n) ((69069 * n) & 0xffffffffUL) for (i = 1; i < N; i++) state->mt[i] = LCG1998 (state->mt[i - 1]); state->mti = i; } static const gsl_rng_type mt_type = {"mt19937", /* name */ 0xffffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (mt_state_t), &mt_set, &mt_get, &mt_get_double}; static const gsl_rng_type mt_1998_type = {"mt19937_1998", /* name */ 0xffffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (mt_state_t), &mt_1998_set, &mt_get, &mt_get_double}; const gsl_rng_type *gsl_rng_mt19937 = &mt_type; const gsl_rng_type *gsl_rng_mt19937_1998 = &mt_1998_type; /* MT19937 is the default generator, so define that here too */ const gsl_rng_type *gsl_rng_default = &mt_type; unsigned long int gsl_rng_default_seed = 0;
{ "alphanum_fraction": 0.6630574417, "avg_line_length": 27.245, "ext": "c", "hexsha": "eac955534d7b6c6c7d0a8b42c7c3683d7c15fe55", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2015-10-02T01:32:59.000Z", "max_forks_repo_forks_event_min_datetime": "2015-10-02T01:32:59.000Z", "max_forks_repo_head_hexsha": "91e70bc88726ee680ec6e8cbc609977db3fdcff9", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "ICML14MoMCompare/spectral-learn", "max_forks_repo_path": "code/em/treba/gsl-1.0/rng/mt.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "91e70bc88726ee680ec6e8cbc609977db3fdcff9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "ICML14MoMCompare/spectral-learn", "max_issues_repo_path": "code/em/treba/gsl-1.0/rng/mt.c", "max_line_length": 71, "max_stars_count": 14, "max_stars_repo_head_hexsha": "91e70bc88726ee680ec6e8cbc609977db3fdcff9", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "ICML14MoMCompare/spectral-learn", "max_stars_repo_path": "code/em/treba/gsl-1.0/rng/mt.c", "max_stars_repo_stars_event_max_datetime": "2021-06-10T11:31:28.000Z", "max_stars_repo_stars_event_min_datetime": "2015-12-18T18:09:25.000Z", "num_tokens": 1678, "size": 5449 }
#include <stdio.h> #include <stdlib.h> #include <string.h> #define COMPEARTH_PRIVATE_DET3X3 1 #include "compearth.h" /* #ifdef COMPEARTH_USE_MKL #include <mkl_cblas.h> #else #include <cblas.h> #endif */ //static double det3x3ColumnMajor(const double *__restrict__ A); /*! * @brief Ensure that U is a rotation matrix with det(U) = 1. * * @param[in] n Number of rotation matrices. * @param[in] Uin Rotation matrices to check. This is an array of dimension * [3 x 3 x n] where each [3 x 3] matrix is in column major * format. * @param[in] Uout Rotation matrices with determinants all positive. This * is an array of dimension [3 x 3 x n] where each [3 x 3] * matrix is in column major format. * * @result 0 indicates success. * * @author Carl Tape and translated to C by Ben Baker * * @copyright MIT * */ int compearth_Udetcheck(const int n, const double *__restrict__ Uin, double *__restrict__ Uout) { double det; int i, imt; // Copy Uin to Uout memcpy(Uout, Uin, 9*(size_t) n*sizeof(double)); //cblas_dcopy(9*n, Uin, 1, Uout, 1); // Fix for (imt=0; imt<n; imt++) { det = det3x3ColumnMajor(&Uout[9*imt]); // Negate the second column if (det < 0.0) { for (i=0; i<3; i++) { Uout[9*imt+3+i] =-Uout[9*imt+3+i]; } } } // Verify for (imt=0; imt<n; imt++) { det = det3x3ColumnMajor(&Uout[9*imt]); if (det < 0.0) { fprintf(stderr, "%s: Error det(u) < 0\n", __func__); return -1; } } return 0; } /* static double det3x3ColumnMajor(const double *__restrict__ A) { double det; det = A[0]*( A[4]*A[8] - A[5]*A[7]) - A[3]*( A[1]*A[8] - A[2]*A[7]) + A[6]*( A[1]*A[5] - A[2]*A[4]); return det; } */
{ "alphanum_fraction": 0.530726257, "avg_line_length": 24.9240506329, "ext": "c", "hexsha": "5d0b0a0dbe0d240f6491343e5e27413bec3c3fbc", "lang": "C", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2022-02-28T13:42:40.000Z", "max_forks_repo_forks_event_min_datetime": "2021-07-08T00:13:50.000Z", "max_forks_repo_head_hexsha": "188058083602cebf1471ea88939b07999c90b655", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "carltape/mtbeach", "max_forks_repo_path": "c_src/Udetcheck.c", "max_issues_count": 1, "max_issues_repo_head_hexsha": "188058083602cebf1471ea88939b07999c90b655", "max_issues_repo_issues_event_max_datetime": "2017-11-02T17:30:53.000Z", "max_issues_repo_issues_event_min_datetime": "2017-11-02T17:30:53.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "carltape/mtbeach", "max_issues_repo_path": "c_src/Udetcheck.c", "max_line_length": 79, "max_stars_count": 9, "max_stars_repo_head_hexsha": "188058083602cebf1471ea88939b07999c90b655", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "OUCyf/mtbeach", "max_stars_repo_path": "c_src/Udetcheck.c", "max_stars_repo_stars_event_max_datetime": "2022-02-28T23:55:36.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-13T01:18:12.000Z", "num_tokens": 620, "size": 1969 }
#pragma once #include <winrt\Windows.Foundation.h> #include <cstdint> #include <d3d11.h> #include <DirectXMath.h> #include <dxgidebug.h> #include <gsl\gsl> #include "GameException.h" namespace Library { enum class ShaderStages { IA, VS, HS, DS, GS, SO, RS, PS, OM, CS }; const std::array<ShaderStages, 6> ProgrammableGraphicsShaderStages { ShaderStages::VS, ShaderStages::HS, ShaderStages::DS, ShaderStages::GS, ShaderStages::PS, ShaderStages::CS, }; inline bool ShaderStageIsProgrammable(ShaderStages shaderStage) { static const std::map<ShaderStages, bool> isProgrammableMap { {ShaderStages::IA, false }, { ShaderStages::VS, true }, { ShaderStages::HS, true }, { ShaderStages::DS, true }, { ShaderStages::GS, true }, { ShaderStages::SO, false }, { ShaderStages::RS, false }, { ShaderStages::PS, true }, { ShaderStages::OM, false }, { ShaderStages::CS, true }, }; return isProgrammableMap.at(shaderStage); } void CreateIndexBuffer(gsl::not_null<ID3D11Device*> device, const gsl::span<const std::uint16_t>& indices, gsl::not_null<ID3D11Buffer**> indexBuffer); void CreateIndexBuffer(gsl::not_null<ID3D11Device*> device, const gsl::span<const std::uint32_t>& indices, gsl::not_null<ID3D11Buffer**> indexBuffer); void CreateConstantBuffer(gsl::not_null<ID3D11Device*> device, std::size_t byteWidth, gsl::not_null<ID3D11Buffer**> constantBuffer); inline float ConvertDipsToPixels(float dips, float dpi) { static const float dipsPerInch = 96.0f; return floorf(dips * dpi / dipsPerInch + 0.5f); // Round to nearest integer. } #if defined(DEBUG) || defined(_DEBUG) // Check for SDK Layer support. inline bool SdkLayersAvailable() { HRESULT hr = D3D11CreateDevice( nullptr, D3D_DRIVER_TYPE_NULL, // There is no need to create a real hardware device. 0, D3D11_CREATE_DEVICE_DEBUG, // Check for the SDK layers. nullptr, // Any feature level will do. 0, D3D11_SDK_VERSION, // Always set this to D3D11_SDK_VERSION for Windows Store apps. nullptr, // No need to keep the D3D device reference. nullptr, // No need to know the feature level. nullptr // No need to keep the D3D device context reference. ); return SUCCEEDED(hr); } #endif #if defined(DEBUG) || defined(_DEBUG) inline void DumpD3DDebug() { winrt::com_ptr<IDXGIDebug1> debugInterface = nullptr; ThrowIfFailed(DXGIGetDebugInterface1(0, IID_PPV_ARGS(debugInterface.put()))); ThrowIfFailed(debugInterface->ReportLiveObjects(DXGI_DEBUG_ALL, DXGI_DEBUG_RLO_ALL)); } #endif }
{ "alphanum_fraction": 0.6907177753, "avg_line_length": 27.71875, "ext": "h", "hexsha": "bd13c88fa4f3bc81ac3eeb42ae6bb2a293de1b78", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "05a05c5c26784dafa9a89747276f385252951f2f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ssshammi/real-time-3d-rendering-with-directx-and-hlsl", "max_forks_repo_path": "source/Library.Shared/DirectXHelper.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "05a05c5c26784dafa9a89747276f385252951f2f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ssshammi/real-time-3d-rendering-with-directx-and-hlsl", "max_issues_repo_path": "source/Library.Shared/DirectXHelper.h", "max_line_length": 151, "max_stars_count": null, "max_stars_repo_head_hexsha": "05a05c5c26784dafa9a89747276f385252951f2f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ssshammi/real-time-3d-rendering-with-directx-and-hlsl", "max_stars_repo_path": "source/Library.Shared/DirectXHelper.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 802, "size": 2661 }
#include <stdio.h> #include <gsl/gsl_statistics.h> #include <stdlib.h> #include <float.h> #ifndef N #define N 32 #endif #define K N/2 #include <klee/klee.h> int main(void) { double data[K], w[K]; float input[K]; // random inputs int i; for (i = 0; i < K; i++){ if (i<2) w[i] = 1; else w[i] = 2*w[i-1]; } // symbolic inputs klee_make_symbolic(&input, sizeof(input), "input"); for (i = 0; i < K; i++){ data[i] = input[i] / FLT_MAX * 100; } double wmean; wmean = gsl_stats_wmean(w, 1, data, 1, K); printf ("The sample mean is %g\n", wmean); return 0; }
{ "alphanum_fraction": 0.5669421488, "avg_line_length": 15.5128205128, "ext": "c", "hexsha": "5bf890e70d6c7394045c5d658bceb5dcf384c480", "lang": "C", "max_forks_count": 5, "max_forks_repo_forks_event_max_datetime": "2022-01-04T19:22:30.000Z", "max_forks_repo_forks_event_min_datetime": "2020-06-27T11:11:07.000Z", "max_forks_repo_head_hexsha": "4fa9a35cc5695d65509296790accd4b34071432d", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "snipekill/FPGen", "max_forks_repo_path": "benchmarks/gsl/wmean/wmean-SYMBOLIC.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "4fa9a35cc5695d65509296790accd4b34071432d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "snipekill/FPGen", "max_issues_repo_path": "benchmarks/gsl/wmean/wmean-SYMBOLIC.c", "max_line_length": 53, "max_stars_count": 3, "max_stars_repo_head_hexsha": "4fa9a35cc5695d65509296790accd4b34071432d", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "snipekill/FPGen", "max_stars_repo_path": "benchmarks/gsl/wmean/wmean-SYMBOLIC.c", "max_stars_repo_stars_event_max_datetime": "2022-02-20T21:02:18.000Z", "max_stars_repo_stars_event_min_datetime": "2020-07-06T02:44:11.000Z", "num_tokens": 219, "size": 605 }
// // Author : github.com/luncliff (luncliff@gmail.com) // License : CC BY 4.0 // // Note // Async I/O operation support for socket // #pragma once // clang-format off #if defined(FORCE_STATIC_LINK) # define _INTERFACE_ # define _HIDDEN_ #elif defined(_MSC_VER) // MSVC or clang-cl # define _HIDDEN_ # ifdef _WINDLL # define _INTERFACE_ __declspec(dllexport) # else # define _INTERFACE_ __declspec(dllimport) # endif #elif defined(__GNUC__) || defined(__clang__) # define _INTERFACE_ __attribute__((visibility("default"))) # define _HIDDEN_ __attribute__((visibility("hidden"))) #else # error "unexpected linking configuration" #endif // clang-format on #ifndef COROUTINE_NET_IO_H #define COROUTINE_NET_IO_H #include <chrono> #include <gsl/gsl> #include <coroutine/yield.hpp> #if __has_include(<WinSock2.h>) // use winsock #include <WS2tcpip.h> #include <WinSock2.h> #include <ws2def.h> using io_control_block = OVERLAPPED; static constexpr bool is_winsock = true; static constexpr bool is_netinet = false; #elif __has_include(<netinet/in.h>) // use netinet #include <fcntl.h> #include <netdb.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <sys/socket.h> #include <unistd.h> // Follow the definition of Windows `OVERLAPPED` struct io_control_block { uint64_t internal; // uint32_t errc, int32_t flag uint64_t internal_high; // int64_t len, socklen_t addrlen union { struct { int32_t offset; int32_t offset_high; }; void* ptr; // sockaddr* addr; }; int64_t handle; // int64_t sd; }; static constexpr bool is_winsock = false; static constexpr bool is_netinet = true; #endif // winsock || netinet namespace coro { using namespace std; using namespace std::experimental; // 1 I/O task == 1 coroutine function using io_task_t = coroutine_handle<void>; // This is simply a view to storage. Be aware that it doesn't have ownership using io_buffer_t = gsl::span<std::byte>; static_assert(sizeof(io_buffer_t) <= sizeof(void*) * 2); // A struct to describe "1 I/O request" to system API class io_work_t : public io_control_block { public: io_task_t task{}; io_buffer_t buffer{}; protected: _INTERFACE_ bool ready() const noexcept; public: // Multiple retrieving won't be a matter _INTERFACE_ uint32_t error() const noexcept; }; static_assert(sizeof(io_work_t) <= 56); // Type to perform `sendto` I/O request class io_send_to final : public io_work_t { private: // This function must be used through `co_await` _INTERFACE_ void suspend(io_task_t t) noexcept(false); // This function must be used through `co_await` // Unlike inherited `error` function, multiple invoke of this will // lead to malfunction. _INTERFACE_ int64_t resume() noexcept; public: bool await_ready() const noexcept { return this->ready(); } void await_suspend(io_task_t t) noexcept(false) { return this->suspend(t); } auto await_resume() noexcept { return this->resume(); } }; static_assert(sizeof(io_send_to) == sizeof(io_work_t)); // Type to perform `recvfrom` I/O request class io_recv_from final : public io_work_t { private: // This function must be used through `co_await` _INTERFACE_ void suspend(io_task_t t) noexcept(false); // This function must be used through `co_await` // Unlike inherited `error` function, multiple invoke of this will // lead to malfunction. _INTERFACE_ int64_t resume() noexcept; public: bool await_ready() const noexcept { return this->ready(); } void await_suspend(io_task_t t) noexcept(false) { return this->suspend(t); } auto await_resume() noexcept { return this->resume(); } }; static_assert(sizeof(io_recv_from) == sizeof(io_work_t)); // Type to perform `send` I/O request class io_send final : public io_work_t { private: // This function must be used through `co_await` _INTERFACE_ void suspend(io_task_t t) noexcept(false); // This function must be used through `co_await` // Unlike inherited `error` function, multiple invoke of this will // lead to malfunction. _INTERFACE_ int64_t resume() noexcept; public: bool await_ready() const noexcept { return this->ready(); } void await_suspend(io_task_t t) noexcept(false) { return this->suspend(t); } auto await_resume() noexcept { return this->resume(); } }; static_assert(sizeof(io_send) == sizeof(io_work_t)); // Type to perform `recv` I/O request class io_recv final : public io_work_t { private: // This function must be used through `co_await` _INTERFACE_ void suspend(io_task_t t) noexcept(false); // This function must be used through `co_await` // Unlike inherited `error` function, multiple invoke of this will // lead to malfunction. _INTERFACE_ int64_t resume() noexcept; public: bool await_ready() const noexcept { return this->ready(); } void await_suspend(io_task_t t) noexcept(false) { return this->suspend(t); } auto await_resume() noexcept { return this->resume(); } }; static_assert(sizeof(io_recv) == sizeof(io_work_t)); // Constructs awaitable `io_send_to` object with the given parameters [[nodiscard]] _INTERFACE_ // auto // send_to(uint64_t sd, const sockaddr_in& remote, // io_buffer_t buf, io_work_t& work) noexcept(false) // -> io_send_to&; // Constructs awaitable `io_send_to` object with the given parameters [[nodiscard]] _INTERFACE_ // auto // send_to(uint64_t sd, const sockaddr_in6& remote, io_buffer_t buf, // io_work_t& work) noexcept(false) // -> io_send_to&; // Constructs awaitable `io_recv_from` object with the given parameters [[nodiscard]] _INTERFACE_ // auto // recv_from(uint64_t sd, sockaddr_in& remote, io_buffer_t buf, // io_work_t& work) noexcept(false) // -> io_recv_from&; // Constructs awaitable `io_recv_from` object with the given parameters [[nodiscard]] _INTERFACE_ // auto // recv_from(uint64_t sd, sockaddr_in6& remote, io_buffer_t buf, // io_work_t& work) noexcept(false) // -> io_recv_from&; // Constructs awaitable `io_send` object with the given parameters [[nodiscard]] _INTERFACE_ // auto // send_stream(uint64_t sd, io_buffer_t buf, uint32_t flag, // io_work_t& work) noexcept(false) // -> io_send&; // Constructs awaitable `io_recv` object with the given parameters [[nodiscard]] _INTERFACE_ // auto // recv_stream(uint64_t sd, io_buffer_t buf, uint32_t flag, // io_work_t& work) noexcept(false) // -> io_recv&; // This function is for non-Windows platform. // Over Windows api, it always yields **nothing**. // // Its caller must continue the loop without break // so there is no leak of the I/O events // // Also, the library doesn't guarantee all coroutines(i/o tasks) will be // fetched at once. Therefore it is strongly recommended for user to have // another method to detect that watching I/O coroutines are returned. _INTERFACE_ void wait_net_tasks(enumerable<io_task_t>& tasks, chrono::nanoseconds timeout) noexcept(false); inline auto wait_net_tasks(chrono::nanoseconds timeout) noexcept(false) { enumerable<io_task_t> tasks{}; wait_net_tasks(tasks, timeout); return tasks; } // // Name resolution utilities // using zstring_host = gsl::zstring<NI_MAXHOST>; using zstring_serv = gsl::zstring<NI_MAXSERV>; using czstring_host = gsl::czstring<NI_MAXHOST>; using czstring_serv = gsl::czstring<NI_MAXSERV>; // Combination of `getaddrinfo` functions // If there is an error, the enumerable is untouched _INTERFACE_ int32_t resolve(enumerable<sockaddr>& g, const addrinfo& hint, // czstring_host name, czstring_serv serv) noexcept; // construct system_error using `gai_strerror` function _INTERFACE_ auto resolve_error(int32_t ec) noexcept -> std::system_error; inline auto resolve(const addrinfo& hint, // czstring_host name, czstring_serv serv) noexcept(false) -> enumerable<sockaddr> { enumerable<sockaddr> g{}; if (const auto ec = resolve(g, hint, name, serv)) { throw resolve_error(ec); } return g; } // Thin wrapper of `getnameinfo`. Parameter 'serv' can be nullptr. _INTERFACE_ int32_t get_name(const sockaddr_in& addr, zstring_host name, zstring_serv serv, int32_t flags = NI_NUMERICHOST | NI_NUMERICSERV) noexcept; _INTERFACE_ int32_t get_name(const sockaddr_in6& addr, zstring_host name, zstring_serv serv, int32_t flags = NI_NUMERICHOST | NI_NUMERICSERV) noexcept; } // namespace coro #endif // COROUTINE_NET_IO_H
{ "alphanum_fraction": 0.6246427113, "avg_line_length": 33.6632302405, "ext": "h", "hexsha": "20098f7d6b06a607f903ab17776fe2decbbd8d19", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "cfe7f0ca5ab4670e539a9f4d6c69d85ba4cb18f7", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "Farwaykorse/coroutine", "max_forks_repo_path": "interface/coroutine/net.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "cfe7f0ca5ab4670e539a9f4d6c69d85ba4cb18f7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "Farwaykorse/coroutine", "max_issues_repo_path": "interface/coroutine/net.h", "max_line_length": 81, "max_stars_count": null, "max_stars_repo_head_hexsha": "cfe7f0ca5ab4670e539a9f4d6c69d85ba4cb18f7", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "Farwaykorse/coroutine", "max_stars_repo_path": "interface/coroutine/net.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2213, "size": 9796 }
#pragma once #include <cooperative_groups.h> #include <gsl-lite/gsl-lite.hpp> #include <cuda/runtime_api.hpp> #include <cub/cub.cuh> #include <makeshift/variant.hpp> #include <thrustshift/constant.h> #include <thrustshift/copy.h> #include <thrustshift/fill.h> #include <thrustshift/histogram.h> #include <thrustshift/math.h> #include <thrustshift/not-a-vector.h> #include <thrustshift/select-if.h> #include <thrustshift/type-traits.h> namespace thrustshift { namespace device_function { namespace implicit_unroll { /*! \brief Sum an n x N array column-wise. * \param N length of row * \param n number of rows * \param p input of length `N * n` * \param result of length `N`. As a result you may pass `p`. */ template <typename T0, typename T1, typename I0, typename I1, typename I2> CUDA_FD void sum_subsequent_into(const T0* p, T1* result, int tid, I0 num_threads, I1 N, I2 n) { auto num_columns_per_thread = N / num_threads; auto sum_column = [&](int col_id) { T0 x{}; #pragma unroll for (int j = 0; j < n; ++j) { x += p[col_id + N * j]; } result[col_id] = x; }; #pragma unroll for (int i = 0; i < num_columns_per_thread; ++i) { const int col_id = i * num_threads + tid; sum_column(col_id); } auto num_rest = N % num_threads; if (tid < num_rest) { const int col_id = num_columns_per_thread * num_threads + tid; sum_column(col_id); } } } // namespace implicit_unroll namespace explicit_unroll { /*! \brief Sum an n x N array column-wise. * \param N length of row * \param n number of rows * \param p input of length `N * n` * \param result of length `N`. As a result you may pass `p`. */ template <typename T, int num_threads, int N, int n> CUDA_FD void sum_subsequent_into(const T* p, T* result, int tid) { constexpr int num_columns_per_thread = N / num_threads; auto sum_column = [&](int col_id) { T x{}; #pragma unroll for (int j = 0; j < n; ++j) { x += p[col_id + N * j]; } result[col_id] = x; }; #pragma unroll for (int i = 0; i < num_columns_per_thread; ++i) { const int col_id = i * num_threads + tid; sum_column(col_id); } constexpr int num_rest = N % num_threads; if (tid < num_rest) { const int col_id = num_columns_per_thread * num_threads + tid; sum_column(col_id); } } } // namespace explicit_unroll /*! \brief Bin value `x` based on a subset of 8 bits into a 256 bin histogram with a full warp. * * Only one atomic addition is executed for each bin, although multiple threads in a warp can * have a value which belongs into the same bin. Via warp level primitives the bin incrementation is * aggregated to one single thread. * * ``` * x x x x x x x x x x x x x x x x x x x x x x x <--- `x` represents one bit * |--bit_offset---|--bit_size---|--rest---------| * |-----prefix----| * ``` * * \param x value which is put into the bin. Can be different for each thread. * \param bit_offset number of bits which are omitted from the left of the bit pattern of `x`. * \param prefix bit pattern with `bit_offset` reasonable bits from the right. If the beginning of the bit pattern * of `x` is not equal to `prefix`, the value is not binned. * \param histogram length of 256. Does not have to be unique for each warp. * \param valid_write set to false if a thread should not bin the value `x`. * \param bin_index_transform lambda with `[] (int i) -> int {...}` signature. If set to the identity * the values are binned from low to high. The lambda can be used to reverse the order in the * histogram. */ template <typename T, typename IH, class F> CUDA_FD void bin_value256(T x, int bit_offset, uint64_t prefix, IH* histogram, bool valid_write, F bin_index_transform) { using I = typename thrustshift::make_uintegral_of_equal_size<T>::type; using K = uint8_t; constexpr int bit_size = sizeof(K) * 8; // extract the bits of length `bit_size` from the left with an offset of `bit_offset` to the right. // // x x x x x x x x x x x x x x x x x x x x x x x <--- `x` represents one bit // |--bit_offset---|--bit_size---|--rest---------| // const I i = *reinterpret_cast<I*>((void*) (&x)); const K b = (i >> (sizeof(I) * 8 - bit_size - bit_offset)) & I(std::numeric_limits<K>::max()); int bi = bin_index_transform(b); int k = 1; // increment of the bin // Only count values which start with the given prefix pattern if ((i >> sizeof(I) * 8 - bit_offset) != static_cast<I>(prefix) || !valid_write) { bi = -1; // value has different prefix k = 0; } // Threads with higher ID take precedence over those with low ID and the // same bin. const int lane_id = threadIdx.x % warp_size; const int mask = __match_any_sync(0xffffffff, bi) & (~(1 << lane_id)); // set our own bit to zero const int lsbi = sizeof(int) * 8 - __clz(mask) - 1; // get ID of highest thread which has this value const unsigned umask = *reinterpret_cast<const unsigned*>((const void*) &mask); k = lsbi > lane_id ? 0 : (__popc(umask) + 1); if (k > 0 && bi >= 0) { atomicAdd(histogram + bi, k); } } namespace implicit_unroll { /*! \brief Bin transformed values based on a subset of 8 bits into a 256 bin histogram. * * After execution and synchronization, multiple histograms (`histograms`) are filled, which must subsequently be summed into * one single histogram. * * ``` * x x x x x x x x x x x x x x x x x x x x x x x <--- `x` represents one bit * |--bit_offset---|--bit_size---|--rest---------| * |-----prefix----| * ``` * * \param values of length N. * \param histograms of length `256 * block_dim / warp_size`. Contains the result after execution and must be * initialized before function execution. * \param num_histograms number of histograms. * \param my_tile_start is equal to zero for intra block binning and equal to `blockIdx.x` for inter block binning. * \param tile_increment is equal to one for intra block binning and equal to `gridDim.x` for inter block binning. * For best performance, this should be a compile time constant. * \param bit_offset number of bits which are omitted from the left of the bit pattern of `x`. * \param num_threads the number of threads which are entering the function (num_threads % warp_size == 0). * \param prefix bit pattern with `bit_offset` reasonable bits from the right. If the beginning of the bit pattern * of `x` is not equal to `prefix`, the value is not binned. * \param unary_functor lambda to transform the values before binning. * \param bin_index_transform lambda with `[] (int i) -> int {...}` signature. If set to the identity * the values are binned from low to high. The lambda can be used to reverse the order in the * histogram. */ template <typename T, typename I0, typename I1, typename I2, typename I3, typename I4, typename I5, class F0, class F1> CUDA_FHD void bin_values256( const T* values, I0 N, I1* histograms, I5 num_histograms, I2 my_tile_start, // =0 in case of intra block, =blockIdx.x in case of inter block binning I3 tile_increment, // =1 in case of intra block, =grid_dim in case of inter block binning int tid, I4 num_threads, int bit_offset, uint64_t prefix, F0 unary_functor, F1 bin_index_transform) { gsl_ExpectsAudit(num_threads % warp_size == 0); constexpr int histogram_length = 256; const int warp_id = tid / warp_size; auto num_warps = num_threads / warp_size; gsl_ExpectsAudit(num_warps % num_histograms == 0); gsl_ExpectsAudit(num_histograms <= num_warps); auto num_warps_per_histogram = num_warps / num_histograms; const int histogram_id = warp_id / num_warps_per_histogram; I1* my_histogram = histograms + histogram_id * histogram_length; const int num_tiles = thrustshift::ceil_divide(N, num_threads); auto tile_size = num_threads; int tile_id = my_tile_start; for (; tile_id < num_tiles - 1; tile_id += tile_increment) { const int tile_offset = tile_id * num_threads; const auto x = unary_functor(values[tile_offset + tid]); bin_value256( x, bit_offset, prefix, my_histogram, true, bin_index_transform); } // last tile if (tile_id == num_tiles - 1) { const int tile_offset = tile_id * num_threads; const int curr_tile_size = tile_offset + tile_size > N ? (N - tile_offset) : tile_size; bool valid_rw = tid < curr_tile_size; const auto x = [&] { if (valid_rw) { return unary_functor(values[tile_offset + tid]); } return T{}; }(); // every thread of the warp must enter this function as it contains // `*__sync` warp level primitives. bin_value256( x, bit_offset, prefix, my_histogram, valid_rw, bin_index_transform); } } } // namespace implicit_unroll template <int block_dim, int num_histograms, typename IH> struct k_largest_values_abs_block { static constexpr int histogram_length = 256; static constexpr int num_warps = block_dim / warp_size; static_assert(block_dim % warp_size == 0); static_assert(histogram_length % block_dim == 0); static constexpr int num_scan_elements_per_thread = histogram_length / block_dim; using BlockLoad = cub::BlockLoad<IH, block_dim, num_scan_elements_per_thread, cub::BLOCK_LOAD_WARP_TRANSPOSE>; using BlockScan = cub::BlockScan<IH, block_dim>; using BlockStore = cub::BlockStore<IH, block_dim, num_scan_elements_per_thread, cub::BLOCK_STORE_WARP_TRANSPOSE>; struct triplet_t { int k; uint64_t prefix; int selected_values_pos; }; struct TempStorage { union { typename BlockLoad::TempStorage block_load; typename BlockScan::TempStorage block_scan; typename BlockStore::TempStorage block_store; }; triplet_t triplet; }; template <typename T, typename I0> static CUDA_FD thrust::tuple<uint64_t, int> k_largest_values_abs_radix_block(const T* values, I0 N, IH* uninitialized_histograms, int k, TempStorage& temp_storage) { const int tid = threadIdx.x; auto unary_functor = [](T x) { using std::abs; return abs(x); }; // Create the histogram from large to small values auto bin_index_transform = [](auto i) { return histogram_length - i - 1; }; uint64_t prefix = 0; for (int bit_offset = 0; bit_offset < int(sizeof(T) * 8); bit_offset += 8) { // // Initialize the histograms // device_function::implicit_unroll::fill( uninitialized_histograms, 0, tid, block_dim, num_warps * histogram_length); auto* histograms = uninitialized_histograms; // rename for better readability __syncthreads(); // // Bin values into one histogram per warp // thrustshift::device_function::implicit_unroll::bin_values256( values, N, histograms, num_histograms, 0, // tile start 1, // tile increment tid, block_dim, bit_offset, prefix, unary_functor, bin_index_transform); __syncthreads(); // // Sum all histograms // if (num_histograms > 1) { thrustshift::device_function::implicit_unroll:: sum_subsequent_into(histograms, histograms, tid, block_dim, histogram_length, num_warps); __syncthreads(); } // The first histogram is now the sum of all histograms in shared memory // // Scan the histogram // IH hvalues[num_scan_elements_per_thread]; IH hcumulative_values[num_scan_elements_per_thread]; BlockLoad(temp_storage.block_load).Load(histograms, hvalues); __syncthreads(); // necessary due to reuse of temporary memory BlockScan(temp_storage.block_scan) .InclusiveSum(hvalues, hcumulative_values); __syncthreads(); // Create helper array to have the value of our left neighbour IH hcumulative_values2[num_scan_elements_per_thread + 1]; #pragma unroll for (int j = 0; j < num_scan_elements_per_thread; ++j) { hcumulative_values2[j + 1] = hcumulative_values[j]; } BlockStore( temp_storage .block_store) // block store changes the register values .Store(histograms, hcumulative_values); __syncthreads(); if (tid > 0) { hcumulative_values2[0] = histograms[tid * num_scan_elements_per_thread - 1]; } else { hcumulative_values2[0] = 0; } #pragma unroll for (int j = 0; j < num_scan_elements_per_thread; ++j) { const int l = j + tid * num_scan_elements_per_thread; const int i = histogram_length - l - 1; if (hcumulative_values2[j + 1] >= k && hcumulative_values2[j] < k) { // Only one thread is expected to enter this branch prefix = (prefix << 8) | uint64_t(i); // all values with this prefix and larger are included temp_storage.triplet.k = hcumulative_values2[j + 1] == k ? 0 : k - hcumulative_values2[j]; temp_storage.triplet.prefix = prefix; // This is set for subsequent function execution to avoid one // additional __syncthreads(); temp_storage.triplet.selected_values_pos = 0; } } __syncthreads(); k = temp_storage.triplet.k; prefix = temp_storage.triplet.prefix; if (k == 0) { return {prefix, bit_offset + 8}; } } return {prefix, sizeof(T) * 8}; } template <typename It, typename ItSelected, typename I0> static CUDA_FD void select_k_largest_values_with_index_abs( It values, I0 N, ItSelected selected_values, IH* uninitialized_histograms, int k, TempStorage& temp_storage) { const int tid = threadIdx.x; auto tup = k_largest_values_abs_radix_block( values, N, uninitialized_histograms, k, temp_storage); auto prefix = thrust::get<0>(tup); auto bit_offset = thrust::get<1>(tup); using T = typename std::remove_const< typename std::iterator_traits<It>::value_type>::type; auto cit = thrust::make_counting_iterator(0); auto it = thrust::make_zip_iterator(thrust::make_tuple(values, cit)); auto select_op = [prefix, bit_offset] __device__( const thrust::tuple<T, int>& tup) { using std::abs; auto x = abs(thrust::get<0>(tup)); using I = typename thrustshift::make_uintegral_of_equal_size<T>::type; const I i = *reinterpret_cast<I*>((void*) (&x)); return (i >> sizeof(I) * 8 - bit_offset) >= static_cast<I>(prefix); }; device_function::implicit_unroll::select_if( it, N, selected_values, &temp_storage.triplet.selected_values_pos, tid, block_dim, select_op); } }; } // namespace device_function namespace kernel { template <typename T, int num_threads, int N, int n> __global__ void sum_subsequent_into(const T* p, T* result) { const int tid = threadIdx.x; thrustshift::device_function::explicit_unroll:: sum_subsequent_into<T, num_threads, N, n>(p, result, tid); } template <typename T, int block_dim, int grid_dim, int num_sh_histograms, class F0, class F1> __global__ void bin_values256(const T* data, int N, int* histograms, int bit_offset, uint64_t prefix, F0 unary_functor, F1 bin_index_transform) { constexpr int histogram_length = 256; constexpr int all_sh_histograms_length = histogram_length * num_sh_histograms; using histogram_value_type = unsigned; __shared__ histogram_value_type sh_histograms[all_sh_histograms_length]; const int tid = threadIdx.x; device_function::explicit_unroll:: fill<histogram_value_type, block_dim, all_sh_histograms_length>( sh_histograms, 0, tid); __syncthreads(); device_function::implicit_unroll::bin_values256(data, N, sh_histograms, num_sh_histograms, blockIdx.x, grid_dim, tid, block_dim, bit_offset, prefix, unary_functor, bin_index_transform); __syncthreads(); // // Sum all histograms // if (num_sh_histograms > 1) { thrustshift::device_function::explicit_unroll::sum_subsequent_into< histogram_value_type, block_dim, histogram_length, num_sh_histograms>(sh_histograms, sh_histograms, tid); __syncthreads(); } thrustshift::block_copy<block_dim, histogram_length>( sh_histograms, histograms + blockIdx.x * histogram_length); } template <typename T, int block_dim, int grid_dim, int num_sh_histograms, class F0, class F1> __global__ void bin_values256_threadfence(const T* data, int N, volatile int* histograms, int bit_offset, uint64_t prefix, F0 unary_functor, F1 bin_index_transform, unsigned* entry_ticket, unsigned* exit_ticket) { constexpr int histogram_length = 256; constexpr int all_sh_histograms_length = histogram_length * num_sh_histograms; using histogram_value_type = int; __shared__ histogram_value_type sh_histograms[all_sh_histograms_length]; const int tid = threadIdx.x; device_function::explicit_unroll:: fill<histogram_value_type, block_dim, all_sh_histograms_length>( sh_histograms, 0, tid); __shared__ int sh_entry_bid; __shared__ int sh_exit_bid; if (tid == 0) { sh_entry_bid = atomicInc(entry_ticket, grid_dim); } __syncthreads(); int bid = sh_entry_bid; device_function::implicit_unroll::bin_values256(data, N, sh_histograms, num_sh_histograms, bid, grid_dim, tid, block_dim, bit_offset, prefix, unary_functor, bin_index_transform); __syncthreads(); // // Sum all histograms // if (num_sh_histograms > 1) { thrustshift::device_function::explicit_unroll::sum_subsequent_into< histogram_value_type, block_dim, histogram_length, num_sh_histograms>(sh_histograms, sh_histograms, tid); __syncthreads(); } thrustshift::block_copy<block_dim, histogram_length>( sh_histograms, histograms + bid * histogram_length); // To my understanding this sync threads is necessary because it might happen that // thread 0 does not observe the write of all threads of our histogram. __syncthreads(); __threadfence(); if (tid == 0) { sh_exit_bid = atomicInc(exit_ticket, grid_dim); } __syncthreads(); if (sh_exit_bid == grid_dim - 1) { static_assert(block_dim >= histogram_length); if (tid < histogram_length) { histogram_value_type h = 0; #pragma unroll for (int histo_id = 0; histo_id < grid_dim; ++histo_id) { h += histograms[histo_id * histogram_length + tid]; } histograms[tid] = h; } } } template <typename T, int block_dim, int grid_dim, int num_sh_histograms, class F0, class F1> __global__ void bin_values256_atomic(const T* data, int N, int* histogram, int bit_offset, uint64_t prefix, F0 unary_functor, F1 bin_index_transform) { constexpr int histogram_length = 256; constexpr int all_sh_histograms_length = histogram_length * num_sh_histograms; using histogram_value_type = int; __shared__ histogram_value_type sh_histograms[all_sh_histograms_length]; const int tid = threadIdx.x; const int bid = blockIdx.x; device_function::explicit_unroll:: fill<histogram_value_type, block_dim, all_sh_histograms_length>( sh_histograms, 0, tid); __syncthreads(); device_function::implicit_unroll::bin_values256(data, N, sh_histograms, num_sh_histograms, bid, grid_dim, tid, block_dim, bit_offset, prefix, unary_functor, bin_index_transform); __syncthreads(); // // Sum all histograms // if (num_sh_histograms > 1) { thrustshift::device_function::explicit_unroll::sum_subsequent_into< histogram_value_type, block_dim, histogram_length, num_sh_histograms>(sh_histograms, sh_histograms, tid); __syncthreads(); } if (tid < histogram_length) { atomicAdd(histogram + tid, sh_histograms[tid]); } } template <typename T, int block_dim, int grid_dim, int num_sh_histograms, bool use_k0_and_zero_prefix, class F0, class F1> __global__ void bin_values256_atomic_with_ptr(const T* data, int N, int* histogram, int bit_offset, uint64_t* prefix_, int* k_, int k0, F0 unary_functor, F1 bin_index_transform) { constexpr int histogram_length = 256; constexpr int all_sh_histograms_length = histogram_length * num_sh_histograms; using histogram_value_type = int; __shared__ histogram_value_type sh_histograms[all_sh_histograms_length]; const int tid = threadIdx.x; const int bid = blockIdx.x; const int k = [&] { if constexpr (use_k0_and_zero_prefix) { return k0; } else { return *k_; } }(); if (k <= 0) { return; } const uint64_t prefix = [&]() -> uint64_t { if constexpr (use_k0_and_zero_prefix) { return 0; } else { return *prefix_; } }(); device_function::explicit_unroll:: fill<histogram_value_type, block_dim, all_sh_histograms_length>( sh_histograms, 0, tid); __syncthreads(); device_function::implicit_unroll::bin_values256(data, N, sh_histograms, num_sh_histograms, bid, grid_dim, tid, block_dim, bit_offset, prefix, unary_functor, bin_index_transform); __syncthreads(); // // Sum all histograms // if (num_sh_histograms > 1) { thrustshift::device_function::explicit_unroll::sum_subsequent_into< histogram_value_type, block_dim, histogram_length, num_sh_histograms>(sh_histograms, sh_histograms, tid); __syncthreads(); } if (tid < histogram_length) { atomicAdd(histogram + tid, sh_histograms[tid]); } } template <typename T, typename IH, int block_dim, int grid_dim, int num_sh_histograms> __global__ void k_select_radix(const T* data, int N, IH* histograms, int* bit_offset_, uint64_t* prefix_, int* k_) { constexpr int histogram_length = 256; constexpr int all_sh_histograms_length = histogram_length * num_sh_histograms; auto unary_functor = [](T x) { using std::abs; return abs(x); }; using BlockScan = cub::BlockScan<IH, block_dim>; __shared__ union { typename BlockScan::TempStorage block_scan; IH histograms[all_sh_histograms_length]; } temp_storage; const int tid = threadIdx.x; const int bid = blockIdx.x; uint64_t prefix; int k; auto grid = cooperative_groups::this_grid(); auto bin_index_transform = [](auto i) { return histogram_length - i - 1; }; for (int bit_offset = 0; bit_offset < int(sizeof(T) * 8); bit_offset += 8) { k = *k_; prefix = *prefix_; if (k == 0) { return; } device_function::explicit_unroll:: fill<IH, block_dim, all_sh_histograms_length>( temp_storage.histograms, 0, tid); __syncthreads(); device_function::implicit_unroll::bin_values256(data, N, temp_storage.histograms, num_sh_histograms, blockIdx.x, grid_dim, tid, block_dim, bit_offset, prefix, unary_functor, bin_index_transform); __syncthreads(); // // Sum all histograms // if (num_sh_histograms > 1) { thrustshift::device_function::explicit_unroll::sum_subsequent_into< IH, block_dim, histogram_length, num_sh_histograms>( temp_storage.histograms, temp_storage.histograms, tid); __syncthreads(); } IH h = temp_storage.histograms[tid]; IH hcum; // it is faster if all blocks do already the scan before // the histograms are summed up. BlockScan(temp_storage.block_scan).InclusiveSum(h, hcum); histograms[bid * histogram_length + tid] = hcum; // // Store block histogram to global memory // // thrustshift::block_copy<block_dim, histogram_length>( // temp_storage.histograms, histograms + bid * histogram_length); grid.sync(); if (bid == 0) { static_assert(histogram_length == block_dim); #pragma unroll for (int histo_id = 1; histo_id < grid_dim; ++histo_id) { hcum += histograms[histo_id * histogram_length + tid]; } // BlockScan(temp_storage.block_scan).InclusiveSum(h, hcum); // __syncthreads(); temp_storage.histograms[tid] = hcum; __syncthreads(); IH hleft = 0; if (tid > 0) { hleft = temp_storage.histograms[tid - 1]; } const int i = histogram_length - tid - 1; if (hcum >= k && hleft < k) { // Only one thread is expected to enter this branch prefix = (prefix << 8) | uint64_t(i); // all values with this prefix and larger are included *k_ = hcum == k ? 0 : k - hleft; *prefix_ = prefix; *bit_offset_ = bit_offset + 8; } __syncthreads(); } grid.sync(); } } // The summation is done redundantly template <typename T, typename IH, int block_dim, int grid_dim, int num_sh_histograms> __global__ void k_select_radix2(const T* data, int N, IH* histograms, int* bit_offset_, uint64_t* prefix_, int k) { constexpr int histogram_length = 256; constexpr int all_sh_histograms_length = histogram_length * num_sh_histograms; auto unary_functor = [](T x) { using std::abs; return abs(x); }; using BlockScan = cub::BlockScan<IH, block_dim>; __shared__ union { typename BlockScan::TempStorage block_scan; IH histograms[all_sh_histograms_length]; } temp_storage; const int tid = threadIdx.x; const int bid = blockIdx.x; __shared__ uint64_t sh_prefix; __shared__ int sh_k; uint64_t prefix = 0; auto grid = cooperative_groups::this_grid(); auto bin_index_transform = [](auto i) { return histogram_length - i - 1; }; for (int bit_offset = 0, l = 0; bit_offset < int(sizeof(T) * 8); bit_offset += 8, ++l) { if (k == 0) { return; } device_function::explicit_unroll:: fill<IH, block_dim, all_sh_histograms_length>( temp_storage.histograms, 0, tid); __syncthreads(); device_function::implicit_unroll::bin_values256(data, N, temp_storage.histograms, num_sh_histograms, blockIdx.x, grid_dim, tid, block_dim, bit_offset, prefix, unary_functor, bin_index_transform); __syncthreads(); // // Sum all histograms // if (num_sh_histograms > 1) { thrustshift::device_function::explicit_unroll::sum_subsequent_into< IH, block_dim, histogram_length, num_sh_histograms>( temp_storage.histograms, temp_storage.histograms, tid); __syncthreads(); } IH h = temp_storage.histograms[tid]; IH hcum; // it is faster if all blocks do already the scan before // the histograms are summed up. BlockScan(temp_storage.block_scan).InclusiveSum(h, hcum); histograms[l * grid_dim * histogram_length + bid * histogram_length + tid] = hcum; // // Store block histogram to global memory // // thrustshift::block_copy<block_dim, histogram_length>( // temp_storage.histograms, histograms + bid * histogram_length); grid.sync(); static_assert(histogram_length == block_dim); #pragma unroll for (int histo_id = 1; histo_id < grid_dim; ++histo_id) { hcum += histograms[l * grid_dim * histogram_length + histo_id * histogram_length + tid]; } // BlockScan(temp_storage.block_scan).InclusiveSum(h, hcum); // __syncthreads(); temp_storage.histograms[tid] = hcum; __syncthreads(); IH hleft = 0; if (tid > 0) { hleft = temp_storage.histograms[tid - 1]; } const int i = histogram_length - tid - 1; if (hcum >= k && hleft < k) { // Only one thread is expected to enter this branch prefix = (prefix << 8) | uint64_t(i); // all values with this prefix and larger are included sh_k = hcum == k ? 0 : k - hleft; sh_prefix = prefix; if (bid == 0) { *prefix_ = prefix; *bit_offset_ = bit_offset + 8; } } __syncthreads(); k = sh_k; prefix = sh_prefix; } } template <typename T, typename IH, int block_dim, int num_sh_histograms, int num_gl_histograms> __global__ void k_select_radix_dynamic_parallelism(const T* data, int N, IH* histograms, int* bit_offset_, uint64_t* prefix_, int k) { constexpr int histogram_length = 256; auto unary_functor = [](T x) { using std::abs; return abs(x); }; using BlockScan = cub::BlockScan<IH, block_dim>; __shared__ union { typename BlockScan::TempStorage block_scan; IH histogram[histogram_length]; } temp_storage; __shared__ int sh_k; __shared__ uint64_t sh_prefix; static_assert(block_dim == 256); constexpr int child_grid_dim = num_gl_histograms; constexpr int child_block_dim = 256; const int tid = threadIdx.x; const int bid = blockIdx.x; uint64_t prefix = 0; auto bin_index_transform = [](auto i) { return histogram_length - i - 1; }; #pragma unroll for (int bit_offset = 0; bit_offset < int(sizeof(T) * 8); bit_offset += 8) { if (k == 0) { return; } if (tid == 0) { kernel::bin_values256<T, child_block_dim, child_grid_dim, num_sh_histograms> <<<child_grid_dim, child_block_dim>>>(data, N, histograms, bit_offset, prefix, unary_functor, bin_index_transform); cudaDeviceSynchronize(); } __syncthreads(); IH h = 0; IH hcum; #pragma unroll for (int histo_id = 0; histo_id < num_gl_histograms; ++histo_id) { h += histograms[histo_id * histogram_length + tid]; } BlockScan(temp_storage.block_scan).InclusiveSum(h, hcum); __syncthreads(); temp_storage.histogram[tid] = hcum; __syncthreads(); IH hleft = 0; if (tid > 0) { hleft = temp_storage.histogram[tid - 1]; } if (hcum >= k && hleft < k) { const int i = histogram_length - tid - 1; // Only one thread is expected to enter this branch prefix = (prefix << 8) | uint64_t(i); // all values with this prefix and larger are included k = hcum == k ? 0 : k - hleft; // Write to shared memory sh_k = k; sh_prefix = prefix; // Write to global memory //*k_ = k; *prefix_ = prefix; *bit_offset_ = bit_offset + 8; } __syncthreads(); k = sh_k; prefix = sh_prefix; } } template <typename T, typename IH, int block_dim, int num_sh_histograms, int child_grid_dim> __global__ void k_select_radix_dynamic_parallelism_atomic_binning( const T* data, int N, IH* histogram, int* bit_offset_, uint64_t* prefix_, int k) { constexpr int histogram_length = 256; auto unary_functor = [](T x) { using std::abs; return abs(x); }; using BlockScan = cub::BlockScan<IH, block_dim>; __shared__ union { typename BlockScan::TempStorage block_scan; IH histogram[histogram_length]; } temp_storage; __shared__ int sh_k; __shared__ uint64_t sh_prefix; static_assert(block_dim == 256); constexpr int child_block_dim = 256; const int tid = threadIdx.x; const int bid = blockIdx.x; gsl_ExpectsAudit(bid == 0); uint64_t prefix = 0; auto bin_index_transform = [](auto i) { return histogram_length - i - 1; }; #pragma unroll for (int bit_offset = 0; bit_offset < int(sizeof(T) * 8); bit_offset += 8) { if (k == 0) { return; } histogram[tid] = 0; __syncthreads(); if (tid == 0) { kernel::bin_values256_atomic<T, child_block_dim, child_grid_dim, num_sh_histograms> <<<child_grid_dim, child_block_dim>>>(data, N, histogram, bit_offset, prefix, unary_functor, bin_index_transform); cudaDeviceSynchronize(); } __syncthreads(); IH h = histogram[tid]; IH hcum; BlockScan(temp_storage.block_scan).InclusiveSum(h, hcum); __syncthreads(); temp_storage.histogram[tid] = hcum; __syncthreads(); IH hleft = 0; if (tid > 0) { hleft = temp_storage.histogram[tid - 1]; } if (hcum >= k && hleft < k) { const int i = histogram_length - tid - 1; // Only one thread is expected to enter this branch prefix = (prefix << 8) | uint64_t(i); // all values with this prefix and larger are included k = hcum == k ? 0 : k - hleft; // Write to shared memory sh_k = k; sh_prefix = prefix; // Write to global memory //*k_ = k; *prefix_ = prefix; *bit_offset_ = bit_offset + 8; } __syncthreads(); k = sh_k; prefix = sh_prefix; } } template <typename IH, int block_dim> __global__ void k_select_radix_from_histogram(IH* histogram, int bit_offset, uint64_t* prefix_, // only output int* k_) { constexpr int histogram_length = 256; using BlockScan = cub::BlockScan<IH, block_dim>; __shared__ union { typename BlockScan::TempStorage block_scan; IH histogram[histogram_length]; } temp_storage; static_assert(block_dim == 256); const int tid = threadIdx.x; const int bid = blockIdx.x; uint64_t prefix = 0; int k = *k_; IH h = histogram[tid]; IH hcum; BlockScan(temp_storage.block_scan).InclusiveSum(h, hcum); __syncthreads(); temp_storage.histogram[tid] = hcum; __syncthreads(); IH hleft = 0; if (tid > 0) { hleft = temp_storage.histogram[tid - 1]; } if (hcum >= k && hleft < k) { const int i = histogram_length - tid - 1; // Only one thread is expected to enter this branch prefix = uint64_t(i); // all values with this prefix and larger are included k = hcum == k ? 0 : k - hleft; *k_ = k; *prefix_ = prefix; } } template <typename IH, int block_dim, bool initialize_offset_prefix_k> __global__ void k_select_radix_from_histogram_with_ptr(IH* histogram, int* bit_offset_, uint64_t* prefix_, int* k_, int k0) { constexpr int histogram_length = 256; using BlockScan = cub::BlockScan<IH, block_dim>; __shared__ union { typename BlockScan::TempStorage block_scan; IH histogram[histogram_length]; } temp_storage; static_assert(block_dim == 256); const int tid = threadIdx.x; const int bid = blockIdx.x; uint64_t prefix = [&]() -> uint64_t { if constexpr (initialize_offset_prefix_k) { return 0; } else { return *prefix_; } }(); int k = [&] { if constexpr (initialize_offset_prefix_k) { return k0; } else { return *k_; } }(); int bit_offset = [&] { if constexpr (initialize_offset_prefix_k) { return 0; } else { return *bit_offset_; } }(); if (k <= 0) { return; } IH h = histogram[tid]; IH hcum; BlockScan(temp_storage.block_scan).InclusiveSum(h, hcum); __syncthreads(); temp_storage.histogram[tid] = hcum; __syncthreads(); IH hleft = 0; if (tid > 0) { hleft = temp_storage.histogram[tid - 1]; } if (hcum >= k && hleft < k) { const int i = histogram_length - tid - 1; // Only one thread is expected to enter this branch prefix = (prefix << 8) | uint64_t(i); // all values with this prefix and larger are included k = hcum == k ? 0 : k - hleft; *k_ = k; *prefix_ = prefix; *bit_offset_ = bit_offset + 8; } } } // namespace kernel namespace async { template <typename T, class MemoryResource, class F0, class F1> void bin_values256(cuda::stream_t& stream, gsl_lite::span<const T> values, gsl_lite::span<int> histogram, int bit_offset, uint64_t prefix, F0 unary_functor, F1 bin_index_transform, MemoryResource& delayed_memory_resource) { constexpr int histogram_length = 256; gsl_Expects(histogram.size() == histogram_length); constexpr int block_dim = 256; // RTX 2080 Ti has 68 SMs, `4*` to get best occupancy in practice constexpr int num_histograms = 4 * 68; constexpr int num_sh_histograms = 1; auto c = cuda::make_launch_config(num_histograms, block_dim); auto tmp_mem = make_not_a_vector<int>(num_histograms * histogram_length, delayed_memory_resource); std::cout << "bit_offset = " << bit_offset << ", prefix = " << prefix << std::endl; auto histograms = tmp_mem.to_span(); cuda::enqueue_launch(kernel::bin_values256<T, block_dim, num_histograms, num_sh_histograms, F0, F1>, stream, c, values.data(), values.size(), histograms.data(), bit_offset, prefix, unary_functor, bin_index_transform); cuda::enqueue_launch(kernel::sum_subsequent_into<int, block_dim, histogram_length, num_histograms>, stream, cuda::make_launch_config(1, 256), histograms.data(), histogram.data()); } template <typename T, class MemoryResource, class F0, class F1> void bin_values256(cuda::stream_t& stream, gsl_lite::span<const T> values, gsl_lite::span<int> histogram, int bit_offset, uint64_t prefix, int block_dim, int num_histograms, int num_sh_histograms, F0 unary_functor, F1 bin_index_transform, MemoryResource& delayed_memory_resource) { constexpr int histogram_length = 256; gsl_Expects(histogram.size() == histogram_length); auto block_dim_v = makeshift::expand( block_dim, MAKESHIFT_CONSTVAL(std::array{64, 128, 256, 512})); auto num_histograms_v = makeshift::expand( num_histograms, MAKESHIFT_CONSTVAL(std::array{68, 2 * 68, 3 * 68, 4 * 68, 8 * 68})); auto num_sh_histograms_v = makeshift::expand( num_sh_histograms, MAKESHIFT_CONSTVAL(std::array{1, 2, 3, 4, 8})); std::visit( [&](auto block_dim, auto num_histograms, auto num_sh_histograms) { auto num_warps = block_dim / warp_size; gsl_Expects(num_warps % num_sh_histograms == 0); gsl_Expects(num_warps >= num_sh_histograms); auto c = cuda::make_launch_config(int(num_histograms), int(block_dim)); auto tmp_mem = make_not_a_vector<int>( num_histograms * histogram_length, delayed_memory_resource); auto histograms = tmp_mem.to_span(); cuda::enqueue_launch(kernel::bin_values256<T, block_dim, num_histograms, num_sh_histograms, F0, F1>, stream, c, values.data(), values.size(), histograms.data(), bit_offset, prefix, unary_functor, bin_index_transform); cuda::enqueue_launch(kernel::sum_subsequent_into<int, block_dim, histogram_length, num_histograms>, stream, cuda::make_launch_config(1, 256), histograms.data(), histogram.data()); }, block_dim_v, num_histograms_v, num_sh_histograms_v); } template <typename T, class MemoryResource, class F0, class F1> void bin_values256_threadfence(cuda::stream_t& stream, gsl_lite::span<const T> values, gsl_lite::span<int> histogram, int bit_offset, uint64_t prefix, int block_dim, int num_histograms, int num_sh_histograms, F0 unary_functor, F1 bin_index_transform, MemoryResource& delayed_memory_resource) { constexpr int histogram_length = 256; gsl_Expects(histogram.size() == histogram_length); auto block_dim_v = makeshift::expand(block_dim, MAKESHIFT_CONSTVAL(std::array{256, 512})); auto num_histograms_v = makeshift::expand( num_histograms, MAKESHIFT_CONSTVAL(std::array{68, 2 * 68, 3 * 68, 4 * 68, 8 * 68})); auto num_sh_histograms_v = makeshift::expand( num_sh_histograms, MAKESHIFT_CONSTVAL(std::array{1, 2, 3, 4, 8})); auto tmp = make_not_a_vector<unsigned>(2, delayed_memory_resource); auto tickets = tmp.to_span(); std::visit( [&](auto block_dim, auto num_histograms, auto num_sh_histograms) { async::fill(stream, tickets, 0); auto num_warps = block_dim / warp_size; gsl_Expects(num_warps % num_sh_histograms == 0); gsl_Expects(num_warps >= num_sh_histograms); auto c = cuda::make_launch_config(int(num_histograms), int(block_dim)); auto tmp_mem = make_not_a_vector<int>( num_histograms * histogram_length, delayed_memory_resource); auto histograms = tmp_mem.to_span(); cuda::enqueue_launch( kernel::bin_values256_threadfence<T, block_dim, num_histograms, num_sh_histograms, F0, F1>, stream, c, values.data(), values.size(), histograms.data(), bit_offset, prefix, unary_functor, bin_index_transform, tickets.data(), tickets.data() + 1); }, block_dim_v, num_histograms_v, num_sh_histograms_v); } template <typename T, class MemoryResource, class F0, class F1> void bin_values256_atomic(cuda::stream_t& stream, gsl_lite::span<const T> values, gsl_lite::span<int> histogram, int bit_offset, uint64_t prefix, int block_dim, int grid_dim, int num_sh_histograms, F0 unary_functor, F1 bin_index_transform, MemoryResource& delayed_memory_resource) { constexpr int histogram_length = 256; gsl_Expects(histogram.size() == histogram_length); auto block_dim_v = makeshift::expand(block_dim, MAKESHIFT_CONSTVAL(std::array{256, 512})); auto grid_dim_v = makeshift::expand( grid_dim, MAKESHIFT_CONSTVAL(std::array{68, 2 * 68, 3 * 68, 4 * 68, 8 * 68})); auto num_sh_histograms_v = makeshift::expand( num_sh_histograms, MAKESHIFT_CONSTVAL(std::array{1, 2, 3, 4, 8})); std::visit( [&](auto block_dim, auto grid_dim, auto num_sh_histograms) { async::fill(stream, histogram, 0); auto num_warps = block_dim / warp_size; gsl_Expects(num_warps % num_sh_histograms == 0); gsl_Expects(num_warps >= num_sh_histograms); auto c = cuda::make_launch_config(int(grid_dim), int(block_dim)); cuda::enqueue_launch(kernel::bin_values256_atomic<T, block_dim, grid_dim, num_sh_histograms, F0, F1>, stream, c, values.data(), values.size(), histogram.data(), bit_offset, prefix, unary_functor, bin_index_transform); }, block_dim_v, grid_dim_v, num_sh_histograms_v); } template <typename T, class MemoryResource, class F0, class F1> void bin_values256_atomic_with_ptr(cuda::stream_t& stream, gsl_lite::span<const T> values, gsl_lite::span<int> histogram, int bit_offset, uint64_t* prefix, int* k, int k0, bool use_k0_and_zero_prefix, int block_dim, int grid_dim, int num_sh_histograms, F0 unary_functor, F1 bin_index_transform, MemoryResource& delayed_memory_resource) { constexpr int histogram_length = 256; gsl_Expects(histogram.size() == histogram_length); auto block_dim_v = makeshift::expand(block_dim, MAKESHIFT_CONSTVAL(std::array{256, 512})); auto grid_dim_v = makeshift::expand( grid_dim, MAKESHIFT_CONSTVAL(std::array{68, 2 * 68, 3 * 68, 4 * 68, 8 * 68})); auto num_sh_histograms_v = makeshift::expand( num_sh_histograms, MAKESHIFT_CONSTVAL(std::array{1, 2, 3, 4, 8})); auto use_k0_and_zero_prefix_v = makeshift::expand( use_k0_and_zero_prefix, MAKESHIFT_CONSTVAL(std::array{0, 1})); std::visit( [&](auto block_dim, auto grid_dim, auto num_sh_histograms, auto use_k0_and_zero_prefix) { async::fill(stream, histogram, 0); auto num_warps = block_dim / warp_size; gsl_Expects(num_warps % num_sh_histograms == 0); gsl_Expects(num_warps >= num_sh_histograms); auto c = cuda::make_launch_config(int(grid_dim), int(block_dim)); cuda::enqueue_launch( kernel::bin_values256_atomic_with_ptr<T, block_dim, grid_dim, num_sh_histograms, use_k0_and_zero_prefix, F0, F1>, stream, c, values.data(), values.size(), histogram.data(), bit_offset, prefix, k, k0, unary_functor, bin_index_transform); }, block_dim_v, grid_dim_v, num_sh_histograms_v, use_k0_and_zero_prefix_v); } } // namespace async template <typename T, class MemoryResource> std::tuple<uint64_t, int> k_largest_values_abs_radix( cuda::stream_t& stream, gsl_lite::span<const T> values, int k, MemoryResource& delayed_memory_resource) { auto unary_functor = [] __device__(T x) { using std::abs; return abs(x); }; using IH = int; auto bin_index_transform = [] __device__(IH i) { return i; }; constexpr int histogram_length = 256; auto tmp = make_not_a_vector<IH>(histogram_length, delayed_memory_resource); auto histogram = tmp.to_span(); // { // const size_t s = histogram.size() * sizeof(IH); // void* ptr = reinterpret_cast<void*>(histogram.data()); // cuda::memory::managed::region_t mr{ptr, s}; // static auto device = stream.device(); // static bool b = false; // if (!b) { // mr.set_preferred_location(device); // b = true; // } // } uint64_t prefix = 0; for (int bit_offset = 0; bit_offset < int(sizeof(T) * 8); bit_offset += 8) { async::bin_values256<T>( stream, values, histogram, bit_offset, prefix, 256, // block dim // params determined empirically by benchmarks 3 * 68, // num histos 1, // num sh histo unary_functor, bin_index_transform, delayed_memory_resource); stream.synchronize(); int acc = histogram[histogram_length - 1]; int acc_prev = 0; for (int i = histogram_length - 2; i >= 0; --i) { acc += histogram[i]; if (acc >= k) { prefix = (prefix << 8) | uint64_t(i); if (acc == k) { // all values with this prefix and larger are included return {prefix, bit_offset + 8}; } k = k - acc_prev; break; } acc_prev = acc; } } return {prefix, sizeof(T) * 8}; } template <typename T, class MemoryResource> std::tuple<uint64_t, int> k_largest_values_abs_radix_atomic( cuda::stream_t& stream, gsl_lite::span<const T> values, int k, MemoryResource& delayed_memory_resource) { auto unary_functor = [] __device__(T x) { using std::abs; return abs(x); }; using IH = int; auto bin_index_transform = [] __device__(IH i) { return i; }; constexpr int histogram_length = 256; auto tmp = make_not_a_vector<IH>(histogram_length, delayed_memory_resource); auto histogram = tmp.to_span(); //{ // const size_t s = histogram.size() * sizeof(IH); // void* ptr = reinterpret_cast<void*>(histogram.data()); // cuda::memory::managed::region_t mr{ptr, s}; // static auto device = stream.device(); // static bool b = false; // if (!b) { // //mr.set_preferred_location(device); // //cudaMemAdvise(ptr, s, cudaMemAdviseSetAccessedBy, cudaCpuDeviceId); // cudaMemAdvise(ptr, s, cudaMemAdviseSetAccessedBy, device.id()); // b = true; // } //} uint64_t prefix = 0; for (int bit_offset = 0; bit_offset < int(sizeof(T) * 8); bit_offset += 8) { async::bin_values256_atomic<T>( stream, values, histogram, bit_offset, prefix, 256, // block dim // params determined empirically by benchmarks 68 * 4, // grid dim 1, // num sh histo unary_functor, bin_index_transform, delayed_memory_resource); stream.synchronize(); int acc = histogram[histogram_length - 1]; int acc_prev = 0; for (int i = histogram_length - 2; i >= 0; --i) { acc += histogram[i]; if (acc >= k) { prefix = (prefix << 8) | uint64_t(i); if (acc == k) { // all values with this prefix and larger are included return {prefix, bit_offset + 8}; } k = k - acc_prev; break; } acc_prev = acc; } } return {prefix, sizeof(T) * 8}; // HACK BEGIN // constexpr std::array<uint64_t, 4> prefixes{0, 75, 19224, 4921493}; // for (int bit_offset = 0, i = 0; bit_offset < int(sizeof(T) * 8); // bit_offset += 8, ++i) { // uint64_t prefix = prefixes[i]; // async::bin_values256_atomic<T>( // stream, // values, // histogram, // bit_offset, // prefix, // 256, // block dim // params determined empirically by benchmarks // 68 * 4, // grid dim // 1, // num sh histo // unary_functor, // bin_index_transform, // delayed_memory_resource); // stream.synchronize(); // // int acc = histogram[histogram_length - 1]; // // int acc_prev = 0; // // for (int i = histogram_length - 2; i >= 0; --i) { // // acc += histogram[i]; // // if (acc >= k) { // // prefix = (prefix << 8) | uint64_t(i); // // if (acc == k) { // // // all values with this prefix and larger are included // // return {prefix, bit_offset + 8}; // // } // // k = k - acc_prev; // // break; // // } // // acc_prev = acc; // // } // } // return {prefixes[3], sizeof(T) * 8}; // HACK END } template <typename T, class MemoryResource> std::tuple<uint64_t, int> k_largest_values_abs_radix_atomic_devicehisto( cuda::stream_t& stream, gsl_lite::span<const T> values, int k, MemoryResource& delayed_memory_resource) { auto unary_functor = [] __device__(T x) { using std::abs; return abs(x); }; using IH = int; constexpr int histogram_length = 256; auto bin_index_transform = [] __device__(IH i) { return histogram_length - i - 1; }; auto tmp = make_not_a_vector<IH>(histogram_length, delayed_memory_resource); auto tmp0 = make_not_a_vector<uint64_t>(1, delayed_memory_resource); auto tmp1 = make_not_a_vector<int>(1, delayed_memory_resource); auto prefix_s = tmp0.to_span(); auto k_s = tmp1.to_span(); async::fill(stream, k_s, k); auto histogram = tmp.to_span(); uint64_t prefix = 0; for (int bit_offset = 0; bit_offset < int(sizeof(T) * 8); bit_offset += 8) { async::bin_values256_atomic<T>( stream, values, histogram, bit_offset, prefix, 256, // block dim // params determined empirically by benchmarks 68 * 4, // grid dim 1, // num sh histo unary_functor, bin_index_transform, delayed_memory_resource); cuda::enqueue_launch(kernel::k_select_radix_from_histogram<IH, 256>, stream, cuda::make_launch_config(1, 256), histogram.data(), bit_offset, prefix_s.data(), k_s.data()); stream.synchronize(); prefix = (prefix << 8) | prefix_s[0]; k = k_s[0]; if (k <= 0) { return {prefix, bit_offset + 8}; } } return {prefix, sizeof(T) * 8}; // HACK BEGIN // constexpr std::array<uint64_t, 4> prefixes{0, 75, 19224, 4921493}; // for (int bit_offset = 0, i = 0; bit_offset < int(sizeof(T) * 8); // bit_offset += 8, ++i) { // uint64_t prefix = prefixes[i]; // async::bin_values256_atomic<T>( // stream, // values, // histogram, // bit_offset, // prefix, // 256, // block dim // params determined empirically by benchmarks // 68 * 4, // grid dim // 1, // num sh histo // unary_functor, // bin_index_transform, // delayed_memory_resource); // cuda::enqueue_launch(kernel::k_select_radix_from_histogram<IH, 256>, // stream, // cuda::make_launch_config(1, 256), // histogram.data(), // bit_offset, // prefix_s.data(), // k_s.data()); // // stream.synchronize(); // // prefix = (prefix << 8) | prefix_s[0]; // // k = k_s[0]; // // if (k <= 0) { // // return {prefix, bit_offset + 8}; // // } // } // return {prefixes[3], sizeof(T) * 8}; // HACK END } namespace async { template <typename T, class MemoryResource> void k_largest_values_abs_radix_atomic_devicehisto_with_ptr( cuda::stream_t& stream, gsl_lite::span<const T> values, uint64_t* prefix, // only output int* bit_offset, //only output int k, MemoryResource& delayed_memory_resource) { auto unary_functor = [] __device__(T x) { using std::abs; return abs(x); }; using IH = int; constexpr int histogram_length = 256; auto bin_index_transform = [] __device__(IH i) { return histogram_length - i - 1; }; auto tmp = make_not_a_vector<IH>(histogram_length, delayed_memory_resource); auto tmp1 = make_not_a_vector<int>(1, delayed_memory_resource); auto k_s = tmp1.to_span(); gsl_lite::span<int> bit_offset_s({bit_offset, 1}); gsl_lite::span<uint64_t> prefix_s({prefix, 1}); auto histogram = tmp.to_span(); for (int bit_offset = 0; bit_offset < int(sizeof(T) * 8); bit_offset += 8) { async::bin_values256_atomic_with_ptr<T>( stream, values, histogram, bit_offset, prefix_s.data(), k_s.data(), k, bit_offset == 0, 256, // block dim // params determined empirically by benchmarks 68 * 4, // grid dim 1, // num sh histo unary_functor, bin_index_transform, delayed_memory_resource); auto flag_v = makeshift::expand(bit_offset == 0, MAKESHIFT_CONSTVAL(std::array{0, 1})); std::visit( [&](auto flag) { cuda::enqueue_launch( kernel:: k_select_radix_from_histogram_with_ptr<IH, 256, flag>, stream, cuda::make_launch_config(1, 256), histogram.data(), bit_offset_s.data(), prefix_s.data(), k_s.data(), k); }, flag_v); } } } // namespace async template <typename T, class MemoryResource> std::tuple<uint64_t, int> k_largest_values_abs_radix_with_cub( cuda::stream_t& stream, gsl_lite::span<const T> values, int k, MemoryResource& delayed_memory_resource) { constexpr int histogram_length = 256; auto tmp = make_not_a_vector<int>(histogram_length, delayed_memory_resource); auto histogram = tmp.to_span(); using I = typename thrustshift::make_uintegral_of_equal_size<T>::type; uint64_t prefix = 0; I lower_level = 0; // inclusive I upper_level = std::numeric_limits<I>::max(); // exclusive const int N = values.size(); for (int bit_offset = 0; bit_offset < int(sizeof(T) * 8); bit_offset += 8) { auto sample_iterator = thrust::make_transform_iterator( values.data(), [prefix, bit_offset] __device__(const T& x) { using std::abs; const T abs_x = abs(x); const I i = *reinterpret_cast<I*>((void*) (&abs_x)); return i; }); async::bin_values_into_histogram(stream, sample_iterator, histogram.begin(), histogram_length, lower_level, upper_level, N, delayed_memory_resource); stream.synchronize(); int acc = histogram[histogram_length - 1]; int acc_prev = 0; for (int i = histogram_length - 2; i >= 0; --i) { acc += histogram[i]; if (acc >= k) { prefix = (prefix << 8) | uint64_t(i); if (acc == k) { // all values with this prefix and larger are included return {prefix, bit_offset + 8}; } k = k - acc_prev; break; } acc_prev = acc; } lower_level = prefix << (sizeof(I) * 8 - (bit_offset + 8)); upper_level = (prefix + 1) << (sizeof(I) * 8 - (bit_offset + 8)); } return {prefix, sizeof(T) * 8}; } // selected_values.size() == values.size() because CUB might select more values, if e.g. values are all equal template <typename T, class MemoryResource> void select_k_largest_values_abs(cuda::stream_t& stream, gsl_lite::span<const T> values, gsl_lite::span<T> selected_values, gsl_lite::span<int> selected_indices, int k, MemoryResource& delayed_memory_resource) { const auto tup = thrustshift::k_largest_values_abs_radix<T>( stream, values, k, delayed_memory_resource); const auto prefix = std::get<0>(tup); const auto bit_offset = std::get<1>(tup); auto select_op = [prefix, bit_offset] __device__(const thrust::tuple<T, int>& tup) { using std::abs; auto x = abs(thrust::get<0>(tup)); using I = typename thrustshift::make_uintegral_of_equal_size<T>::type; const I i = *reinterpret_cast<I*>((void*) (&x)); return (i >> sizeof(I) * 8 - bit_offset) >= static_cast<I>(prefix); }; auto tmp = make_not_a_vector<int>(1, delayed_memory_resource); auto num_selected = tmp.to_span(); async::select_if_with_index(stream, values, selected_values, selected_indices, num_selected.data(), select_op, delayed_memory_resource); stream.synchronize(); } template <typename T, class MemoryResource> void select_k_largest_values_abs_atomic( cuda::stream_t& stream, gsl_lite::span<const T> values, gsl_lite::span<T> selected_values, gsl_lite::span<int> selected_indices, int k, MemoryResource& delayed_memory_resource) { const auto tup = thrustshift::k_largest_values_abs_radix_atomic<T>( stream, values, k, delayed_memory_resource); const auto prefix = std::get<0>(tup); const auto bit_offset = std::get<1>(tup); auto select_op = [prefix, bit_offset] __device__(const thrust::tuple<T, int>& tup) { using std::abs; auto x = abs(thrust::get<0>(tup)); using I = typename thrustshift::make_uintegral_of_equal_size<T>::type; const I i = *reinterpret_cast<I*>((void*) (&x)); return (i >> sizeof(I) * 8 - bit_offset) >= static_cast<I>(prefix); }; auto tmp = make_not_a_vector<int>(1, delayed_memory_resource); auto num_selected = tmp.to_span(); async::select_if_with_index(stream, values, selected_values, selected_indices, num_selected.data(), select_op, delayed_memory_resource); stream.synchronize(); } // selected_values.size() == values.size() because CUB might select more values, if e.g. values are all equal template <typename T, class MemoryResource> void select_k_largest_values_abs_with_cub( cuda::stream_t& stream, gsl_lite::span<const T> values, gsl_lite::span<T> selected_values, gsl_lite::span<int> selected_indices, int k, MemoryResource& delayed_memory_resource) { const auto tup = thrustshift::k_largest_values_abs_radix_with_cub<T>( stream, values, k, delayed_memory_resource); const auto prefix = std::get<0>(tup); const auto bit_offset = std::get<1>(tup); auto select_op = [prefix, bit_offset] __device__(const thrust::tuple<T, int>& tup) { using std::abs; auto x = abs(thrust::get<0>(tup)); using I = typename thrustshift::make_uintegral_of_equal_size<T>::type; const I i = *reinterpret_cast<I*>((void*) (&x)); return (i >> sizeof(I) * 8 - bit_offset) >= static_cast<I>(prefix); }; auto tmp = make_not_a_vector<int>(1, delayed_memory_resource); auto num_selected = tmp.to_span(); async::select_if_with_index(stream, values, selected_values, selected_indices, num_selected.data(), select_op, delayed_memory_resource); stream.synchronize(); } namespace cooperative { template <typename T, class MemoryResource> void select_k_largest_values_abs(cuda::stream_t& stream, gsl_lite::span<const T> values, gsl_lite::span<T> selected_values, gsl_lite::span<int> selected_indices, int k, MemoryResource& delayed_memory_resource) { constexpr int histogram_length = 256; constexpr int block_dim = 256; constexpr int grid_dim = 2 * 68; constexpr int num_sh_histograms = 1; auto tmp = make_not_a_vector<int>(3, delayed_memory_resource); auto tmp1 = make_not_a_vector<uint64_t>(1, delayed_memory_resource); auto tmp2 = make_not_a_vector<unsigned>(histogram_length * grid_dim, delayed_memory_resource); auto num_selected = tmp.to_span().first(1); auto bit_offset_s = tmp.to_span().subspan(1, 1); auto prefix_s = tmp1.to_span().first(1); auto histograms = tmp2.to_span(); auto k_s = tmp.to_span().subspan(2, 1); k_s[0] = k; auto c = cuda::make_launch_config(grid_dim, block_dim); c.block_cooperation = true; const int N = values.size(); cuda::enqueue_launch( kernel:: k_select_radix<T, unsigned, block_dim, grid_dim, num_sh_histograms>, stream, c, values.data(), N, histograms.data(), bit_offset_s.data(), prefix_s.data(), k_s.data()); stream.synchronize(); const auto prefix = prefix_s[0]; const auto bit_offset = bit_offset_s[0]; auto select_op = [prefix, bit_offset] __device__(const thrust::tuple<T, int>& tup) { using std::abs; auto x = abs(thrust::get<0>(tup)); using I = typename thrustshift::make_uintegral_of_equal_size<T>::type; const I i = *reinterpret_cast<I*>((void*) (&x)); return (i >> sizeof(I) * 8 - bit_offset) >= static_cast<I>(prefix); }; async::select_if_with_index(stream, values, selected_values, selected_indices, num_selected.data(), select_op, delayed_memory_resource); stream.synchronize(); } template <typename T, class MemoryResource> void select_k_largest_values_abs2(cuda::stream_t& stream, gsl_lite::span<const T> values, gsl_lite::span<T> selected_values, gsl_lite::span<int> selected_indices, int k, MemoryResource& delayed_memory_resource) { constexpr int histogram_length = 256; constexpr int block_dim = 256; constexpr int grid_dim = 2 * 68; constexpr int num_sh_histograms = 1; auto tmp = make_not_a_vector<int>(3, delayed_memory_resource); auto tmp1 = make_not_a_vector<uint64_t>(1, delayed_memory_resource); auto tmp2 = make_not_a_vector<unsigned>(histogram_length * grid_dim * 4, delayed_memory_resource); auto num_selected = tmp.to_span().first(1); auto bit_offset_s = tmp.to_span().subspan(1, 1); auto prefix_s = tmp1.to_span().first(1); auto histograms = tmp2.to_span(); auto k_s = tmp.to_span().subspan(2, 1); k_s[0] = k; auto c = cuda::make_launch_config(grid_dim, block_dim); c.block_cooperation = true; const int N = values.size(); cuda::enqueue_launch(kernel::k_select_radix2<T, unsigned, block_dim, grid_dim, num_sh_histograms>, stream, c, values.data(), N, histograms.data(), bit_offset_s.data(), prefix_s.data(), k); stream.synchronize(); const auto prefix = prefix_s[0]; const auto bit_offset = bit_offset_s[0]; auto select_op = [prefix, bit_offset] __device__(const thrust::tuple<T, int>& tup) { using std::abs; auto x = abs(thrust::get<0>(tup)); using I = typename thrustshift::make_uintegral_of_equal_size<T>::type; const I i = *reinterpret_cast<I*>((void*) (&x)); return (i >> sizeof(I) * 8 - bit_offset) >= static_cast<I>(prefix); }; async::select_if_with_index(stream, values, selected_values, selected_indices, num_selected.data(), select_op, delayed_memory_resource); stream.synchronize(); } } // namespace cooperative namespace dynamic_parallelism { template <typename T, class MemoryResource> std::tuple<uint64_t, int> k_largest_values_abs_radix_atomic_binning( cuda::stream_t& stream, gsl_lite::span<const T> values, int k, bool nosync, MemoryResource& delayed_memory_resource) { constexpr int histogram_length = 256; constexpr int block_dim = 256; constexpr int child_grid_dim = 68 * 4; constexpr int num_sh_histograms = 1; using IH = int; auto tmp = make_not_a_vector<int>(1, delayed_memory_resource); auto tmp1 = make_not_a_vector<uint64_t>(1, delayed_memory_resource); auto tmp2 = make_not_a_vector<IH>(histogram_length, delayed_memory_resource); auto bit_offset_s = tmp.to_span(); auto prefix_s = tmp1.to_span(); auto histogram = tmp2.to_span(); auto c = cuda::make_launch_config(1, block_dim); const int N = values.size(); cuda::enqueue_launch( kernel::k_select_radix_dynamic_parallelism_atomic_binning< T, IH, block_dim, num_sh_histograms, child_grid_dim>, stream, c, values.data(), N, histogram.data(), bit_offset_s.data(), prefix_s.data(), k); // Alternatively give CUB select if a lambda, which accesses the values, which are already on the GPU if (!nosync) { stream.synchronize(); return {prefix_s[0], bit_offset_s[0]}; } else { return {0, 0}; } } template <typename T, class MemoryResource> void select_k_largest_values_abs(cuda::stream_t& stream, gsl_lite::span<const T> values, gsl_lite::span<T> selected_values, gsl_lite::span<int> selected_indices, int k, MemoryResource& delayed_memory_resource) { constexpr int histogram_length = 256; constexpr int block_dim = 256; constexpr int num_sh_histograms = 1; constexpr int num_gl_histograms = 1 * 68; using IH = int; auto tmp = make_not_a_vector<int>(3, delayed_memory_resource); auto tmp1 = make_not_a_vector<uint64_t>(1, delayed_memory_resource); auto tmp2 = make_not_a_vector<IH>(histogram_length * num_gl_histograms, delayed_memory_resource); auto num_selected = tmp.to_span().first(1); auto bit_offset_s = tmp.to_span().subspan(1, 1); auto prefix_s = tmp1.to_span().first(1); auto histograms = tmp2.to_span(); // auto k_s = tmp.to_span().subspan(2, 1); // k_s[0] = k; auto c = cuda::make_launch_config(1, block_dim); const int N = values.size(); cuda::enqueue_launch( kernel::k_select_radix_dynamic_parallelism<T, IH, block_dim, num_sh_histograms, num_gl_histograms>, stream, c, values.data(), N, histograms.data(), bit_offset_s.data(), prefix_s.data(), k); stream.synchronize(); const auto prefix = prefix_s[0]; const auto bit_offset = bit_offset_s[0]; auto select_op = [prefix, bit_offset] __device__(const thrust::tuple<T, int>& tup) { using std::abs; auto x = abs(thrust::get<0>(tup)); using I = typename thrustshift::make_uintegral_of_equal_size<T>::type; const I i = *reinterpret_cast<I*>((void*) (&x)); return (i >> sizeof(I) * 8 - bit_offset) >= static_cast<I>(prefix); }; async::select_if_with_index(stream, values, selected_values, selected_indices, num_selected.data(), select_op, delayed_memory_resource); stream.synchronize(); } } // namespace dynamic_parallelism namespace async { template <typename T, class MemoryResource> void select_k_largest_values_abs(cuda::stream_t& stream, gsl_lite::span<const T> values, gsl_lite::span<T> selected_values, gsl_lite::span<int> selected_indices, int k, MemoryResource& delayed_memory_resource) { auto tmp0 = make_not_a_vector<uint64_t>(1, delayed_memory_resource); auto prefix_s = tmp0.to_span(); auto tmp1 = make_not_a_vector<int>(1, delayed_memory_resource); auto bit_offset_s = tmp1.to_span(); uint64_t* prefix_ptr = prefix_s.data(); int* bit_offset_ptr = bit_offset_s.data(); async::k_largest_values_abs_radix_atomic_devicehisto_with_ptr<T>( stream, values, prefix_ptr, bit_offset_ptr, k, delayed_memory_resource); auto select_op = [prefix_ptr, bit_offset_ptr] __device__( const thrust::tuple<T, int>& tup) { using std::abs; auto x = abs(thrust::get<0>(tup)); using I = typename thrustshift::make_uintegral_of_equal_size<T>::type; const I i = *reinterpret_cast<I*>((void*) (&x)); // No performance effects measured when we load these values here from // global memory. Probably they end up in the caches and can be loaded fast. uint64_t prefix = *prefix_ptr; int bit_offset = *bit_offset_ptr; // uint64_t prefix = 1259902258; // int bit_offset = 32; return (i >> sizeof(I) * 8 - bit_offset) >= static_cast<I>(prefix); }; auto tmp = make_not_a_vector<int>(1, delayed_memory_resource); auto num_selected = tmp.to_span(); async::select_if_with_index(stream, values, selected_values, selected_indices, num_selected.data(), select_op, delayed_memory_resource); } } // namespace async } // namespace thrustshift
{ "alphanum_fraction": 0.5631301674, "avg_line_length": 32.0322971336, "ext": "h", "hexsha": "71e9ed42d5b33eee9b230af8ae096525042ad232", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "763805f862e3121374286c927dd6949960bffb84", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "pauleonix/thrustshift", "max_forks_repo_path": "include/thrustshift/k-selection.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "763805f862e3121374286c927dd6949960bffb84", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "pauleonix/thrustshift", "max_issues_repo_path": "include/thrustshift/k-selection.h", "max_line_length": 126, "max_stars_count": null, "max_stars_repo_head_hexsha": "763805f862e3121374286c927dd6949960bffb84", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "pauleonix/thrustshift", "max_stars_repo_path": "include/thrustshift/k-selection.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 18471, "size": 79344 }
/* permutation/gsl_permutation.h * * Copyright (C) 1996, 1997, 1998, 1999, 2000, 2004, 2007 Brian Gough * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef __GSL_PERMUTATION_H__ #define __GSL_PERMUTATION_H__ #if !defined( GSL_FUN ) # if !defined( GSL_DLL ) # define GSL_FUN extern # elif defined( BUILD_GSL_DLL ) # define GSL_FUN extern __declspec(dllexport) # else # define GSL_FUN extern __declspec(dllimport) # endif #endif #include <stdlib.h> #include <gsl/gsl_types.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_inline.h> #include <gsl/gsl_check_range.h> #undef __BEGIN_DECLS #undef __END_DECLS #ifdef __cplusplus # define __BEGIN_DECLS extern "C" { # define __END_DECLS } #else # define __BEGIN_DECLS /* empty */ # define __END_DECLS /* empty */ #endif __BEGIN_DECLS struct gsl_permutation_struct { size_t size; size_t *data; }; typedef struct gsl_permutation_struct gsl_permutation; GSL_FUN gsl_permutation *gsl_permutation_alloc (const size_t n); GSL_FUN gsl_permutation *gsl_permutation_calloc (const size_t n); GSL_FUN void gsl_permutation_init (gsl_permutation * p); GSL_FUN void gsl_permutation_free (gsl_permutation * p); GSL_FUN int gsl_permutation_memcpy (gsl_permutation * dest, const gsl_permutation * src); GSL_FUN int gsl_permutation_fread (FILE * stream, gsl_permutation * p); GSL_FUN int gsl_permutation_fwrite (FILE * stream, const gsl_permutation * p); GSL_FUN int gsl_permutation_fscanf (FILE * stream, gsl_permutation * p); GSL_FUN int gsl_permutation_fprintf (FILE * stream, const gsl_permutation * p, const char *format); GSL_FUN size_t gsl_permutation_size (const gsl_permutation * p); GSL_FUN size_t * gsl_permutation_data (const gsl_permutation * p); GSL_FUN int gsl_permutation_swap (gsl_permutation * p, const size_t i, const size_t j); GSL_FUN int gsl_permutation_valid (const gsl_permutation * p); GSL_FUN void gsl_permutation_reverse (gsl_permutation * p); GSL_FUN int gsl_permutation_inverse (gsl_permutation * inv, const gsl_permutation * p); GSL_FUN int gsl_permutation_next (gsl_permutation * p); GSL_FUN int gsl_permutation_prev (gsl_permutation * p); GSL_FUN int gsl_permutation_mul (gsl_permutation * p, const gsl_permutation * pa, const gsl_permutation * pb); GSL_FUN int gsl_permutation_linear_to_canonical (gsl_permutation * q, const gsl_permutation * p); GSL_FUN int gsl_permutation_canonical_to_linear (gsl_permutation * p, const gsl_permutation * q); GSL_FUN size_t gsl_permutation_inversions (const gsl_permutation * p); GSL_FUN size_t gsl_permutation_linear_cycles (const gsl_permutation * p); GSL_FUN size_t gsl_permutation_canonical_cycles (const gsl_permutation * q); GSL_FUN INLINE_DECL size_t gsl_permutation_get (const gsl_permutation * p, const size_t i); #ifdef HAVE_INLINE INLINE_FUN size_t gsl_permutation_get (const gsl_permutation * p, const size_t i) { #if GSL_RANGE_CHECK if (GSL_RANGE_COND(i >= p->size)) { GSL_ERROR_VAL ("index out of range", GSL_EINVAL, 0); } #endif return p->data[i]; } #endif /* HAVE_INLINE */ __END_DECLS #endif /* __GSL_PERMUTATION_H__ */
{ "alphanum_fraction": 0.7761866879, "avg_line_length": 33.972972973, "ext": "h", "hexsha": "c6a6f1668ab8e38882835d2c4341086fbe416913", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-10-14T12:45:35.000Z", "max_forks_repo_forks_event_min_datetime": "2020-10-14T12:45:35.000Z", "max_forks_repo_head_hexsha": "df1bbf6bea0b87b8c7c9a99dce213fdc249118f2", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "zzpwahaha/Chimera-Control-Trim", "max_forks_repo_path": "Chimera/3rd_Party/GSL_MSVC/gsl/gsl_permutation.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "df1bbf6bea0b87b8c7c9a99dce213fdc249118f2", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "zzpwahaha/Chimera-Control-Trim", "max_issues_repo_path": "Chimera/3rd_Party/GSL_MSVC/gsl/gsl_permutation.h", "max_line_length": 110, "max_stars_count": 1, "max_stars_repo_head_hexsha": "df1bbf6bea0b87b8c7c9a99dce213fdc249118f2", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "zzpwahaha/Chimera-Control-Trim", "max_stars_repo_path": "Chimera/3rd_Party/GSL_MSVC/gsl/gsl_permutation.h", "max_stars_repo_stars_event_max_datetime": "2020-09-28T08:20:20.000Z", "max_stars_repo_stars_event_min_datetime": "2020-09-28T08:20:20.000Z", "num_tokens": 1010, "size": 3771 }
#include <stdio.h> #include <glib.h> #include <gsl/gsl_matrix.h> #define MATRIX_SIZE 3 static double d_matrix[MATRIX_SIZE][MATRIX_SIZE] = { {1., 4., 2.}, {-1., -2., 1.}, {3., 20., 19.}, }; static double d_vector[] = {8., 3., 71.}; int main(void) { printf("Gauss.\n\n"); // Alloc matrix`s gsl_matrix *matrix = gsl_matrix_alloc(MATRIX_SIZE, MATRIX_SIZE); // gsl_matrix *orig_matrix = gsl_matrix_alloc(MATRIX_SIZE, MATRIX_SIZE); gsl_vector *vector = gsl_vector_alloc(MATRIX_SIZE); // gsl_vector *orig_vector = gsl_vector_alloc(MATRIX_SIZE); gsl_vector *result = gsl_vector_alloc(MATRIX_SIZE); // Init matrix for (size_t i = 0; i < MATRIX_SIZE; i++) { for (size_t j = 0; j < MATRIX_SIZE; j++) { gsl_matrix_set(matrix, i, j, d_matrix[i][j]); // gsl_matrix_set(orig_matrix, i, j, d_matrix[i][j]); } } // Init vector for (size_t j = 0; j < MATRIX_SIZE; j++) { gsl_vector_set(vector, j, d_vector[j]); // gsl_vector_set(orig_vector, j, d_vector[j]); } printf("First matrix row:\n"); for (size_t j = 0; j < MATRIX_SIZE; j++) { double el = gsl_matrix_get(matrix, 0, j); printf("%zu - %f\n", j, el); } printf("\n"); // Gauss // Forward // Steps by equations size_t swap_counter = 0; for (size_t step = 0; step < MATRIX_SIZE - 1; step++) { // Walk by matrix rows for (size_t eq_idx = step + 1; eq_idx < MATRIX_SIZE; eq_idx++) { // Multiplier { // Get vector column from submatrix size_t subcol_size = MATRIX_SIZE - eq_idx; gsl_vector_view subcol = gsl_matrix_subcolumn(matrix, eq_idx, eq_idx, subcol_size); // Find max idx gsl_vector *subcol_copy = gsl_vector_alloc(subcol_size); gsl_vector_memcpy(subcol_copy, &subcol.vector); for (size_t i = 0; i < subcol_size; i++) { gsl_vector_set(subcol_copy, i, abs(gsl_vector_get(&subcol.vector, i))); } size_t eq_max_idx = gsl_vector_max_index(&subcol.vector) + eq_idx; // swap rows double cell = gsl_matrix_get(matrix, eq_max_idx, eq_idx); if (cell == 0) { goto err; } else if (eq_idx != eq_max_idx) { gsl_matrix_swap_rows(matrix, eq_idx, eq_max_idx); gsl_vector_swap_elements(vector, eq_idx, eq_max_idx); // gsl_matrix_swap_rows(orig_matrix, eq_idx, eq_max_idx); // gsl_vector_swap_elements(orig_vector, eq_idx, eq_max_idx); swap_counter++; } } double multiplier = gsl_matrix_get(matrix, eq_idx, step) / gsl_matrix_get(matrix, step, step); gsl_matrix_set(matrix, eq_idx, step, 0); // Update vector value double vector_val = gsl_vector_get(vector, eq_idx) - multiplier * gsl_vector_get(vector, step); gsl_vector_set(vector, eq_idx, vector_val); // Walk by eq cells for (size_t col = step + 1; col < MATRIX_SIZE; col++) { double cell_val = gsl_matrix_get(matrix, eq_idx, col) - multiplier * gsl_matrix_get(matrix, step, col); gsl_matrix_set(matrix, eq_idx, col, cell_val); } } } // /Forward // det double det = 1; for (size_t i = 0; i < MATRIX_SIZE; i++) { det *= gsl_matrix_get(matrix, i, i); } if (swap_counter % 2 != 0) { det *= -1; } printf("det = %f\n\n", det); // /det // Back for (ssize_t eq_idx = MATRIX_SIZE - 1; eq_idx >= 0; eq_idx--) { //1 double sum = 0; for (size_t col = eq_idx + 1; col <= MATRIX_SIZE - 1; col++) { // 2 sum += gsl_matrix_get(matrix, eq_idx, col) * gsl_vector_get(result, col); } gsl_vector_set(result, eq_idx, (gsl_vector_get(vector, eq_idx) - sum) / gsl_matrix_get(matrix, eq_idx, eq_idx)); } // /Back // /Gauss printf("Result:\n"); for (size_t i = 0; i < MATRIX_SIZE; i++) { printf("%zu - %f\n", i, gsl_vector_get(result, i)); } printf("\nCheck:\n"); for (size_t row = 0; row < MATRIX_SIZE; row++) { double sum = 0; for (size_t col = 0; col < MATRIX_SIZE; col++) { sum += d_matrix[row][col] * gsl_vector_get(result, col); } printf("%f = %f\n", sum, d_vector[row]); } exit(EXIT_SUCCESS); err: fprintf(stderr, "No decision.\n"); exit(EXIT_FAILURE); }
{ "alphanum_fraction": 0.6065109696, "avg_line_length": 25.5361445783, "ext": "c", "hexsha": "1405dfa17bce22819eff719bef72dddb013ebec2", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1b9e14a4a4774b317b96cb99fb5354f45938a870", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "unixs/numerical-la-gauss", "max_forks_repo_path": "src/gauss.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "1b9e14a4a4774b317b96cb99fb5354f45938a870", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "unixs/numerical-la-gauss", "max_issues_repo_path": "src/gauss.c", "max_line_length": 116, "max_stars_count": null, "max_stars_repo_head_hexsha": "1b9e14a4a4774b317b96cb99fb5354f45938a870", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "unixs/numerical-la-gauss", "max_stars_repo_path": "src/gauss.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1245, "size": 4239 }
/** \file AMDiS.h */ #pragma once // std c++ headers #ifndef _USE_MATH_DEFINES #define _USE_MATH_DEFINES #endif #include <cmath> #include <cstdint> // AMDiS includes #include "AMDiS.hpp" #include "MTL4Types.hpp" #include "AdaptInfo.hpp" #include "AdaptInstationary.hpp" #include "AdaptStationary.hpp" #include "Assembler.hpp" #include "BasisFunction.hpp" #include "Boundary.hpp" #include "Boundary.hpp" #include "BoundaryCondition.hpp" #include "BoundaryManager.hpp" #include "CoarseningManager.hpp" #include "CoarseningManager1d.hpp" #include "CoarseningManager2d.hpp" #include "CoarseningManager3d.hpp" #include "CouplingTimeInterface.hpp" #include "CouplingIterationInterface.hpp" #include "CreatorInterface.hpp" #include "CreatorMap.hpp" #include "Debug.hpp" #include "DOFAdmin.hpp" #include "DOFContainer.hpp" #include "DOFIndexed.hpp" #include "DOFIterator.hpp" #include "DOFMatrix.hpp" #include "DOFVector.hpp" #include "DOFVectorOperations.hpp" #include "DirichletBC.hpp" // #include "DualTraverse.hpp" #include "ElInfo.hpp" #include "ElInfo1d.hpp" #include "ElInfo2d.hpp" #include "ElInfo3d.hpp" #include "Element.hpp" #include "ElementDofIterator.hpp" #include "FiniteElemSpace.hpp" #include "FirstOrderTerm.hpp" #include "FixVec.hpp" #include "Flag.hpp" #include "Global.hpp" #include "Initfile.hpp" #include "Lagrange.hpp" #include "LeafData.hpp" #include "Line.hpp" #include "MacroElement.hpp" #include "Marker.hpp" #include "MatrixVector.hpp" #include "MatrixVectorOperations.hpp" #include "Mesh.hpp" #include "MeshStructure.hpp" #include "ComponentTraverseInfo.hpp" #include "Operator.hpp" #include "OperatorTerm.hpp" #include "Parametric.hpp" #include "PeriodicBC.hpp" #include "ProblemStat.hpp" #include "ProblemInstat.hpp" #include "ProblemTimeInterface.hpp" #include "ProblemStatBase.hpp" #include "SecondOrderTerm.hpp" #include "StandardProblemIteration.hpp" #include "Projection.hpp" #include "QPsiPhi.hpp" #include "Quadrature.hpp" #include "RCNeighbourList.hpp" #include "RefinementManager.hpp" #include "RefinementManager1d.hpp" #include "RefinementManager2d.hpp" #include "RefinementManager3d.hpp" #include "RobinBC.hpp" #include "SurfaceOperator.hpp" #include "SurfaceQuadrature.hpp" #include "SystemVector.hpp" #include "Tetrahedron.hpp" #include "Traverse.hpp" #include "Traits.hpp" #include "Triangle.hpp" #include "VertexVector.hpp" #include "ZeroOrderTerm.hpp" #include "est/Estimator.hpp" // #include "io/ArhReader.hpp" // #include "io/Arh2Reader.hpp" #include "io/Arh3Reader.hpp" // #include "io/ArhWriter.hpp" // #include "io/Arh2Writer.hpp" #include "io/Arh3Writer.hpp" #include "io/DataCollector.hpp" #include "io/FileWriter.hpp" #include "io/GNUPlotWriter.hpp" #include "io/GridWriter.hpp" #include "io/MacroWriter.hpp" #include "io/PngWriter.hpp" #include "io/PovrayWriter.hpp" #include "io/Spreadsheet.hpp" #include "io/ValueReader.hpp" #include "io/ValueWriter.hpp" #include "io/VtkWriter.hpp" #include "io/VtkVectorWriter.hpp" //#include "io/VtkReader.h" #include "io/Reader.hpp" #include "io/Writer.hpp" // #include "nonlin/ProblemNonLin.h" // #include "nonlin/NonLinSolver.h" #include "solver/ITL_Preconditioner.hpp" #include "solver/ITL_Solver.hpp" #include "solver/LinearSolverInterface.hpp" // #include "time/RosenbrockAdaptInstationary.h" // #include "time/RosenbrockStationary.h" #if HAVE_PARALLEL_DOMAIN_AMDIS #include "parallel/InteriorBoundary.hpp" #include "parallel/MpiHelper.hpp" #include "parallel/ParallelDebug.hpp" #include "parallel/StdMpi.hpp" #include "parallel/ParallelProblemStat.hpp" #if !HAVE_PARALLEL_MTL4 #include "parallel/PetscSolver.hpp" #include "parallel/PetscSolverNavierStokes.hpp" #endif #endif #if HAVE_PETSC #include <petsc.h> #endif
{ "alphanum_fraction": 0.7757185066, "avg_line_length": 25.675862069, "ext": "h", "hexsha": "5db64cfc3e922aba6711cff7c61eb7c7c2d1f90c", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "53c45c81a65752a8fafbb54f9ae6724a86639dcd", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "spraetor/amdis2", "max_forks_repo_path": "src/AMDiS.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "53c45c81a65752a8fafbb54f9ae6724a86639dcd", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "spraetor/amdis2", "max_issues_repo_path": "src/AMDiS.h", "max_line_length": 48, "max_stars_count": 2, "max_stars_repo_head_hexsha": "53c45c81a65752a8fafbb54f9ae6724a86639dcd", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "spraetor/amdis2", "max_stars_repo_path": "src/AMDiS.h", "max_stars_repo_stars_event_max_datetime": "2021-01-03T07:26:27.000Z", "max_stars_repo_stars_event_min_datetime": "2018-07-04T16:44:04.000Z", "num_tokens": 963, "size": 3723 }
/**************************************************************************/ /* This is the main simulation routine. It initializes either from given */ /* inputs or from a checkpoint, runs the simulation, and assembles the */ /* final results. */ /**************************************************************************/ #include "init.h" #include "vader.h" #include "setup.h" #include <string.h> #include <gsl/gsl_errno.h> double vader( /* Restart checkpoint name (empty string for new start) */ const char *restart_file, /* Time parameters */ const double tStart, const double tEnd, /* Equation of state parameters */ const bool eos_func, const double gamma_val, const double delta_val, /* Dimensionless viscosity parameters */ const bool alpha_func, const double alpha_val, /* Inner boundary condition parameters */ const pres_bc_type ibc_pres, const enth_bc_type ibc_enth, const bool ibc_func, const double ibc_pres_val, const double ibc_enth_val, /* Outer boundary condition parameters */ const pres_bc_type obc_pres, const enth_bc_type obc_enth, const bool obc_func, const double obc_pres_val, const double obc_enth_val, /* Source function parameters */ const bool massSrc_func, const double massSrc_val, const bool intEnSrc_func, const double intEnSrc_val, /* Control and method parameters */ const double dtStart, const double dtMin, const double dtTol, const double errTol, const double maxDtIncrease, const unsigned long maxIter, const unsigned long interpOrder, const unsigned long maxStep, const bool useBE, const bool preTimestep_func, const bool postTimestep_func, const unsigned long verbosity, /* Output control parameters */ const unsigned long nSave, const double *tSave, const unsigned long nUserOut, const bool *userOutCum, const bool *writeCheckpoint, const char *checkname, const bool userReadCheckpoint, const bool userWriteCheckpoint, /* Computational grid (leave as NULL for a restart) */ grid **grd_ptr, /* Starting data (leave as NULL for a restart) */ double **col_init, double **pres_init, double **eInt_init, /* User-defined extra parameters */ void *params, /* Diagnostic outputs */ unsigned long *nStep, unsigned long *nIter, unsigned long *nFail, /* Output values; all the doubles should be passed in set to NULL, and will be allocated appropriately */ unsigned long *nOut, double **tOut_ptr, double **colOut_ptr, double **presOut_ptr, double **eIntOut_ptr, double **mBndOut_ptr, double **eBndOut_ptr, double **mSrcOut_ptr, double **eSrcOut_ptr, double **userOut_ptr #ifdef TESTING_MODE /* Parameters used in code tests */ , double *residSum, unsigned long *iterStep, double *driverTime, double *advanceTime, double *nextIterTime, double *userTime #endif ) { setup_status set_stat; gsl_error_handler_t *gslerr; double *col, *pres, *eInt; double *tOut, *colOut, *presOut, *eIntOut, *mBndOut, *eBndOut, *mSrcOut, *eSrcOut, *userOut; const double *tSave_tmp; const bool *writeCheckpoint_tmp, *userOutCum_tmp; grid *grd; unsigned long nOutMax, savePtr, chkNum; wksp *w; bool writeOut, restart; double t, dt, tSimEnd; /* Turn off GSL error handling, since we'll handle errors manually */ gslerr = gsl_set_error_handler_off(); /* Sanity check on the interpolation order */ if ((interpOrder < 1) || (interpOrder > 3)) { fprintf(stderr, "vader: error: interpOrder must be 1, 2, or 3\n"); return(-1.0); } /* Do the initial setup */ if (!restart_file) restart = false; else if (strlen(restart_file) == 0) restart = false; else restart = true; if (!restart) { /* We're starting new, so the initial data and grid should already be present, so make our working pointers point to them */ grd = *grd_ptr; col = *col_init; pres = *pres_init; if (eos_func) eInt = *eInt_init; else eInt = NULL; /* Initialize time and counters */ t = tStart; *nStep = 1; *nIter = *nFail = 0; nOutMax = nSave; *nOut = savePtr = chkNum = 0; /* Call the setup routine; this stores the first outputs if necessary and computes the first time step */ set_stat = setup_new(tStart, tEnd, eos_func, gamma_val, delta_val, alpha_func, alpha_val, ibc_pres, ibc_enth, ibc_func, ibc_pres_val, ibc_enth_val, obc_pres, obc_enth, obc_func, obc_pres_val, obc_enth_val, massSrc_func, massSrc_val, intEnSrc_func, intEnSrc_val, dtStart, dtMin, dtTol, errTol, maxDtIncrease, maxIter, interpOrder, maxStep, useBE, userWriteCheckpoint, verbosity, nSave, tSave, nUserOut, writeCheckpoint, checkname, grd, &w, col, pres, eInt, params, tOut_ptr, colOut_ptr, presOut_ptr, eIntOut_ptr, mBndOut_ptr, eBndOut_ptr, mSrcOut_ptr, eSrcOut_ptr, userOut_ptr, &dt, nOut, &savePtr, &writeOut, &chkNum #ifdef TESTING_MODE , residSum, iterStep, advanceTime, nextIterTime, userTime #endif ); if (set_stat != GOOD_START) return -LARGE; } else { /* Set up from the checkpoint */ set_stat = setup_checkpoint(restart_file, tStart, tEnd, eos_func, massSrc_func, intEnSrc_func, userReadCheckpoint, verbosity, nSave, tSave, nUserOut, userOutCum, &grd, &col, &pres, &eInt, nStep, nIter, nFail, tOut_ptr, colOut_ptr, presOut_ptr, eIntOut_ptr, mBndOut_ptr, eBndOut_ptr, mSrcOut_ptr, eSrcOut_ptr, userOut_ptr, params, &w, &t, &dt, nOut, &savePtr, &nOutMax, &writeOut, &chkNum); if (set_stat != GOOD_START) return -LARGE; /* Point inputs to allocated memory */ *grd_ptr = grd; *col_init = col; *pres_init = pres; if (eos_func) *eInt_init = eInt; } /* Set pointers to output blocks and output controls; note that some of the control flags might be NULL, and we have to make sure to preserve those values, because they are significant in driver */ tOut = *tOut_ptr; colOut = *colOut_ptr; presOut = *presOut_ptr; if (eos_func) eIntOut = *eIntOut_ptr; else eIntOut = NULL; mBndOut = *mBndOut_ptr; eBndOut = *eBndOut_ptr; if (massSrc_func) mSrcOut = *mSrcOut_ptr; else mSrcOut = NULL; if (massSrc_func || intEnSrc_func) eSrcOut = *eSrcOut_ptr; else eSrcOut = NULL; if (nUserOut > 0) userOut = *userOut_ptr; else userOut = NULL; if (tSave) tSave_tmp = tSave + savePtr; else tSave_tmp = NULL; if (writeCheckpoint) writeCheckpoint_tmp = writeCheckpoint + savePtr; else writeCheckpoint_tmp = NULL; if (userOutCum) userOutCum_tmp = userOutCum + savePtr; else userOutCum_tmp = NULL; /* Now call the driver routine to run the simulation */ tSimEnd = driver(t, tEnd, eos_func, gamma_val, delta_val, alpha_func, alpha_val, ibc_pres, ibc_enth, ibc_func, ibc_pres_val, ibc_enth_val, obc_pres, obc_enth, obc_func, obc_pres_val, obc_enth_val, massSrc_func, massSrc_val, intEnSrc_func, intEnSrc_val, dt, dtMin, dtTol, errTol, maxDtIncrease, maxIter, interpOrder, maxStep, useBE, preTimestep_func, postTimestep_func, verbosity, nOutMax - *nOut, tSave_tmp, nUserOut, userOutCum_tmp, writeCheckpoint_tmp, checkname, userWriteCheckpoint, writeOut, chkNum, grd, w, col, pres, eInt, params, nStep, nIter, nFail, nOut, tOut, colOut, presOut, eIntOut, mBndOut, eBndOut, mSrcOut, eSrcOut, userOut #ifdef TESTING_MODE , residSum, iterStep, driverTime, advanceTime, nextIterTime, userTime #endif ); /* If we didn't use up all the output slots because the calculation terminated early, free unneeded memory */ if (*nOut < nOutMax) { outputResize(*nOut, eos_func, massSrc_func, intEnSrc_func, nUserOut, grd, tOut_ptr, colOut_ptr, presOut_ptr, eIntOut_ptr, mBndOut_ptr, eBndOut_ptr, mSrcOut_ptr, eSrcOut_ptr, userOut_ptr); } /* Free the workspace */ wkspFree(w); /* Return */ return tSimEnd; }
{ "alphanum_fraction": 0.6637941498, "avg_line_length": 35.6666666667, "ext": "c", "hexsha": "2389ade5b8081c5216fdeeee9d8a563ee917d57f", "lang": "C", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2021-11-20T02:11:17.000Z", "max_forks_repo_forks_event_min_datetime": "2021-11-19T04:41:37.000Z", "max_forks_repo_head_hexsha": "646b3136c39da7152c82a032f8151555ec1e3d44", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "franciscaconcha/amuse-vader", "max_forks_repo_path": "src/amuse/community/vader/src/vader.c", "max_issues_count": null, "max_issues_repo_head_hexsha": "646b3136c39da7152c82a032f8151555ec1e3d44", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "franciscaconcha/amuse-vader", "max_issues_repo_path": "src/amuse/community/vader/src/vader.c", "max_line_length": 76, "max_stars_count": null, "max_stars_repo_head_hexsha": "646b3136c39da7152c82a032f8151555ec1e3d44", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "franciscaconcha/amuse-vader", "max_stars_repo_path": "src/amuse/community/vader/src/vader.c", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2307, "size": 8239 }
/* interpolation/linear.c * * Copyright (C) 1996, 1997, 1998, 1999, 2000 Gerard Jungman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Author: G. Jungman */ #include <config.h> #include <stdlib.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_interp.h> static int linear_init (void * vstate, const double x_array[], const double y_array[], size_t size) { return GSL_SUCCESS; } static int linear_eval (const void * vstate, const double x_array[], const double y_array[], size_t size, double x, gsl_interp_accel * a, double *y) { double x_lo, x_hi; double y_lo, y_hi; double dx; size_t index; if (a != 0) { index = gsl_interp_accel_find (a, x_array, size, x); } else { index = gsl_interp_bsearch (x_array, x, 0, size - 1); } /* evaluate */ x_lo = x_array[index]; x_hi = x_array[index + 1]; y_lo = y_array[index]; y_hi = y_array[index + 1]; dx = x_hi - x_lo; if (dx > 0.0) { *y = y_lo + (x - x_lo) / dx * (y_hi - y_lo); return GSL_SUCCESS; } else { *y = 0.0; return GSL_EINVAL; } } static int linear_eval_deriv (const void * vstate, const double x_array[], const double y_array[], size_t size, double x, gsl_interp_accel * a, double *dydx) { double x_lo, x_hi; double y_lo, y_hi; double dx; double dy; size_t index; if (a != 0) { index = gsl_interp_accel_find (a, x_array, size, x); } else { index = gsl_interp_bsearch (x_array, x, 0, size - 1); } /* evaluate */ x_lo = x_array[index]; x_hi = x_array[index + 1]; y_lo = y_array[index]; y_hi = y_array[index + 1]; dx = x_hi - x_lo; dy = y_hi - y_lo; if (dx > 0.0) { *dydx = dy / dx;; return GSL_SUCCESS; } else { *dydx = 0.0; return GSL_EINVAL; } } static int linear_eval_deriv2 (const void * vstate, const double x_array[], const double y_array[], size_t size, double x, gsl_interp_accel * a, double *y_pp) { *y_pp = 0.0; return GSL_SUCCESS; } static int linear_eval_integ (const void * vstate, const double x_array[], const double y_array[], size_t size, gsl_interp_accel * acc, double a, double b, double * result) { size_t i, index_a, index_b; if (acc != 0) { index_a = gsl_interp_accel_find (acc, x_array, size, a); index_b = gsl_interp_accel_find (acc, x_array, size, b); } else { index_a = gsl_interp_bsearch (x_array, a, 0, size - 1); index_b = gsl_interp_bsearch (x_array, b, 0, size - 1); } /* endpoints span more than one interval */ *result = 0.0; /* interior intervals */ for(i=index_a; i<=index_b; i++) { const double x_hi = x_array[i + 1]; const double x_lo = x_array[i]; const double y_lo = y_array[i]; const double y_hi = y_array[i + 1]; const double dx = x_hi - x_lo; if(dx != 0.0) { if (i == index_a || i == index_b) { double x1 = (i == index_a) ? a : x_lo; double x2 = (i == index_b) ? b : x_hi; const double D = (y_hi-y_lo)/dx; *result += (x2-x1) * (y_lo + 0.5*D*((x2-x_lo)+(x1-x_lo))); } else { *result += 0.5 * dx * (y_lo + y_hi); } } } return GSL_SUCCESS; } static const gsl_interp_type linear_type = { "linear", 2, NULL, /* alloc, not applicable */ &linear_init, &linear_eval, &linear_eval_deriv, &linear_eval_deriv2, &linear_eval_integ, NULL, /* free, not applicable */ }; const gsl_interp_type * gsl_interp_linear = &linear_type;
{ "alphanum_fraction": 0.5737165423, "avg_line_length": 22.9045226131, "ext": "c", "hexsha": "65ca83ea50a1750ad23cbb8d2e01490e6c8eb9a6", "lang": "C", "max_forks_count": 14, "max_forks_repo_forks_event_max_datetime": "2020-03-12T12:31:25.000Z", "max_forks_repo_forks_event_min_datetime": "2015-07-21T04:47:52.000Z", "max_forks_repo_head_hexsha": "1b4ee4c146f526ea6e2f4f8607df7e9687204a9e", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "Brian-ning/HMNE", "max_forks_repo_path": "Source/BaselineMethods/MNE/C++/gsl-2.4/interpolation/linear.c", "max_issues_count": 6, "max_issues_repo_head_hexsha": "1b4ee4c146f526ea6e2f4f8607df7e9687204a9e", "max_issues_repo_issues_event_max_datetime": "2019-12-22T00:00:16.000Z", "max_issues_repo_issues_event_min_datetime": "2019-12-16T17:41:24.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "Brian-ning/HMNE", "max_issues_repo_path": "Source/BaselineMethods/MNE/C++/gsl-2.4/interpolation/linear.c", "max_line_length": 81, "max_stars_count": 14, "max_stars_repo_head_hexsha": "2c2e7c85f8414cb0e654cb82e9686cce5e75c63a", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ielomariala/Hex-Game", "max_stars_repo_path": "gsl-2.6/interpolation/linear.c", "max_stars_repo_stars_event_max_datetime": "2021-06-10T11:31:28.000Z", "max_stars_repo_stars_event_min_datetime": "2015-12-18T18:09:25.000Z", "num_tokens": 1311, "size": 4558 }
#pragma once #define STRICT #define NOMINMAX #include <phnt/phnt_windows.h> #include <phnt/phnt.h> #pragma comment( lib, "ntdll.lib" ) #include <delayimp.h> #ifndef _delayimp_h #define _delayimp_h #endif #pragma comment( lib, "delayimp.lib" ) EXTERN_C const IMAGE_DOS_HEADER __ImageBase; #include <stdlib.h> #include <stdio.h> #include <strsafe.h> #include <stdint.h> #include <string> #include <filesystem> namespace fs = std::filesystem; #include <codecvt> #include <gsl/gsl> #include <wil/token_helpers.h> #include <wil/resource.h> #define DETOURS_INTERNAL #include <detours/detours.h> #define SPDLOG_ACTIVE_LEVEL SPDLOG_LEVEL_DEBUG #define SPDLOG_WCHAR_TO_UTF8_SUPPORT #include <spdlog/spdlog.h> #include <spdlog/sinks/msvc_sink.h> #include <spdlog/fmt/bin_to_hex.h>
{ "alphanum_fraction": 0.7676508344, "avg_line_length": 19.9743589744, "ext": "h", "hexsha": "797cdcb6a5370199ccc922f174f069faad51a6e2", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-12-22T00:15:04.000Z", "max_forks_repo_forks_event_min_datetime": "2021-12-22T00:15:04.000Z", "max_forks_repo_head_hexsha": "321608bb86714ee5137823ade21a81f4e0058c09", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "LowerCode/bnspatch", "max_forks_repo_path": "src/client/pch.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "321608bb86714ee5137823ade21a81f4e0058c09", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "LowerCode/bnspatch", "max_issues_repo_path": "src/client/pch.h", "max_line_length": 46, "max_stars_count": 1, "max_stars_repo_head_hexsha": "321608bb86714ee5137823ade21a81f4e0058c09", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "LowerCode/bnspatch", "max_stars_repo_path": "src/client/pch.h", "max_stars_repo_stars_event_max_datetime": "2021-03-11T17:41:11.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-11T17:41:11.000Z", "num_tokens": 207, "size": 779 }
#ifndef TOOLS_H #define TOOLS_H #include <iostream> #include <string> #include <vector> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <math.h> using namespace std; //required for TRACE / DEBUG extern int indentLevel; extern string BLANKLINE; //how many blanks to indent #define INDENTATION 2 #define INDENT BLANKLINE.substr(0,(indentLevel)*INDENTATION) #define INDENT_INC BLANKLINE.substr(0,(indentLevel++)*INDENTATION) #define INDENT_DEC BLANKLINE.substr(0,(--indentLevel)*INDENTATION) #define MAX(x,y) ((x) > (y) ? (x) : (y)) #define ABS(x) ((x)>0 ? (x) : -(x)) #ifdef STDOUT #define OUTPUT(x) cout << INDENT << (x) <<"\n"; #define OUTPUT1(x,arg1) cout << INDENT << (x) <<arg1<<"\n"; #define OUTPUT2(x,arg1,arg2) cout << INDENT << (x) <<arg1<<" / " <<arg2<<"\n"; #else #define OUTPUT(x) "" #define OUTPUT1(x,arg1) "" #define OUTPUT2(x,arg1,arg2) "" #endif //STDOUT #ifdef TRACE #define TRACE_IN(x) cout << INDENT_INC << "***TRACE_IN: "<< (x) <<"()\n"; #define TRACE_IN1(x,arg1) cout << INDENT_INC << "***TRACE_IN: "<< (x) <<"("<<arg1<<")\n"; #define TRACE_OUT(x,ok) cout << INDENT_DEC << "***TRACE_OUT: "<< (x) <<"() = " << ok << "\n"; #else #define TRACE_IN(x) "" #define TRACE_IN1(x,arg1) "" #define TRACE_OUT(x,ok) "" #endif //TRACE #ifdef TRACEV #define TRACEV_IN(x) cout << INDENT_INC << "***TRACE_IN: "<< (x) <<"()\n"; #define TRACEV_IN1(x,arg1) cout << INDENT_INC << "***TRACE_IN: "<< (x) <<"("<<arg1<<")\n"; #define TRACEV_OUT(x,ok) cout << INDENT_DEC << "***TRACE_OUT: "<< (x) <<"() = " << ok << "\n"; #else #define TRACEV_IN(x) "" #define TRACEV_IN1(x,arg1) "" #define TRACEV_OUT(x,ok) "" #endif //TRACEV #ifdef DBUG #define DEBUG(x) cout << INDENT << "***DEBUG: "<< (x) <<"\n"; #define DEBUG1(x,arg1) cout << INDENT << "***DEBUG: "<< (x) <<arg1<<"\n"; #define DEBUG2(x,arg1,arg2) cout << INDENT << "***DEBUG: "<< (x) <<arg1<<" / " <<arg2<<"\n"; #else #define DEBUG(x) "" #define DEBUG1(x,arg1) "" #define DEBUG2(x,arg1,arg2) "" #endif //DBUG //verbose debug information //mainly exact positions (quite big with dim=30) #ifdef DBUGV #define DEBUGV(x) cout << "***DEBUG: "<< (x) <<"\n"; #define DEBUGV1(x,arg1) cout << "***DEBUG: "<< (x) <<arg1<<"\n"; #define DEBUGV2(x,arg1,arg2) cout << "***DEBUG: "<< (x) <<arg1<<" / " <<arg2<<"\n"; #else #define DEBUGV(x) "" #define DEBUGV1(x,arg1) "" #define DEBUGV2(x,arg1,arg2) "" #endif //DBUGV ///Little helper function calculating the maximum height /**of a regularly built pyramid of *@param minDegree minimum degree*/ int getMaxHeight(int minDegree, int swarmsize); //fix the seed for the global RNG void fixSeed(unsigned seed); ///Free the RNG ressources void freeRng(); ///Return gaussian distributed double from [0;1) double randDoubleGaussian(double sigma); ///Return double from [0;1) double randDouble(); ///Wrapper to GSL Function call /**Returns double from range [min,max)*/ double randDoubleRange(double min, double max); ///Wrapper to GSL Function call /**Returns int from range [min,max]*/ int randIntRange(int min, int max); string printVec(const vector<double>&); double euclideanDistance(const vector<double>& v1, const vector<double>& v2); ///Generate a reproducable sequence /**The seed is passed and thus the sequence is repeatable*/ class SequenceGenerator { public: ///Pass the seed SequenceGenerator(unsigned seed_in); ///Destructor ~SequenceGenerator(); ///Return exponentially distributed random variable, with mean mu double nextDoubleExponential(double mu); ///Wrapper to GSL Function call /**Returns double from range [min,max)*/ double nextDoubleRange(double min, double max); ///Wrapper to GSL Function call /**Returns int from range [min,max]*/ int nextIntRange(int min, int max); private: ///The random number generator gsl_rng* randGen; ///The random seed defining the sequence unsigned seed; }; #endif //ifndef TOOLS_H
{ "alphanum_fraction": 0.6458752515, "avg_line_length": 26.8648648649, "ext": "h", "hexsha": "b31fe39815d4ce2d492bffff84e5389fdc513642", "lang": "C", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "746b2f8eb8eeab27e0af515aa129ad8a00b035e5", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "andrejadd/ABC-bee-opt", "max_forks_repo_path": "HPSO/tools.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "746b2f8eb8eeab27e0af515aa129ad8a00b035e5", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "andrejadd/ABC-bee-opt", "max_issues_repo_path": "HPSO/tools.h", "max_line_length": 94, "max_stars_count": null, "max_stars_repo_head_hexsha": "746b2f8eb8eeab27e0af515aa129ad8a00b035e5", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "andrejadd/ABC-bee-opt", "max_stars_repo_path": "HPSO/tools.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1098, "size": 3976 }
#pragma once #include <gsl/gsl_math.h> #include <gsl/gsl_linalg.h> #include "../vector.h" namespace gsl_wrapper::bits { class MatrixRow { public: // Consructor MatrixRow(gsl_vector_view view); // Operators operator ::gsl_wrapper::Vector() const; auto operator[](const size_t index) -> double &; auto operator[](const size_t index) const -> const double &; private: gsl_vector_view m_view; }; inline MatrixRow::MatrixRow(gsl_vector_view view) : m_view{view} { } inline MatrixRow::operator ::gsl_wrapper::Vector() const { auto space = gsl_vector_calloc(m_view.vector.size); gsl_vector_memcpy(space, &m_view.vector); return ::gsl_wrapper::Vector(space); } inline auto MatrixRow::operator[](const size_t index) -> double & { return *gsl_vector_ptr(&m_view.vector, index); } inline auto MatrixRow::operator[](const size_t index) const -> const double & { return *gsl_vector_const_ptr(&m_view.vector, index); } }
{ "alphanum_fraction": 0.6776119403, "avg_line_length": 20.9375, "ext": "h", "hexsha": "25badfaec83094536c0d3a968cd0a52e0f0d1cc4", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2022-03-10T09:06:07.000Z", "max_forks_repo_forks_event_min_datetime": "2022-03-10T09:06:07.000Z", "max_forks_repo_head_hexsha": "0c9c4edfe751474edbf1a9a23075762cc3362cc1", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "Szynkaa/gsl_cpp_wrapper", "max_forks_repo_path": "include/gsl_wrapper/bits/matrix-view.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "0c9c4edfe751474edbf1a9a23075762cc3362cc1", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "Szynkaa/gsl_cpp_wrapper", "max_issues_repo_path": "include/gsl_wrapper/bits/matrix-view.h", "max_line_length": 79, "max_stars_count": 1, "max_stars_repo_head_hexsha": "0c9c4edfe751474edbf1a9a23075762cc3362cc1", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "Szynkaa/gsl_cpp_wrapper", "max_stars_repo_path": "include/gsl_wrapper/bits/matrix-view.h", "max_stars_repo_stars_event_max_datetime": "2022-03-09T14:35:36.000Z", "max_stars_repo_stars_event_min_datetime": "2022-03-09T14:35:36.000Z", "num_tokens": 263, "size": 1005 }
#include <stdio.h> #include <gsl/gsl_vector.h> int main (void) { int i; gsl_vector * v = gsl_vector_alloc (10); { FILE * f = fopen ("test.dat", "r"); gsl_vector_fscanf (f, v); fclose (f); } for (i = 0; i < 10; i++) { printf ("%g\n", gsl_vector_get(v, i)); } gsl_vector_free (v); return 0; }
{ "alphanum_fraction": 0.5219941349, "avg_line_length": 14.2083333333, "ext": "c", "hexsha": "203986b27e445f9bdaee4b51c252c3a3ca3aebc3", "lang": "C", "max_forks_count": 40, "max_forks_repo_forks_event_max_datetime": "2022-03-03T23:23:37.000Z", "max_forks_repo_forks_event_min_datetime": "2015-02-26T15:31:16.000Z", "max_forks_repo_head_hexsha": "1b4ee4c146f526ea6e2f4f8607df7e9687204a9e", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "Brian-ning/HMNE", "max_forks_repo_path": "Source/BaselineMethods/MNE/C++/gsl-2.4/doc/examples/vectorr.c", "max_issues_count": 12, "max_issues_repo_head_hexsha": "1b4ee4c146f526ea6e2f4f8607df7e9687204a9e", "max_issues_repo_issues_event_max_datetime": "2022-03-13T03:54:24.000Z", "max_issues_repo_issues_event_min_datetime": "2020-12-15T08:30:19.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "Brian-ning/HMNE", "max_issues_repo_path": "Source/BaselineMethods/MNE/C++/gsl-2.4/doc/examples/vectorr.c", "max_line_length": 44, "max_stars_count": 64, "max_stars_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "manggoguy/parsec-modified", "max_stars_repo_path": "pkgs/libs/gsl/src/doc/examples/vectorr.c", "max_stars_repo_stars_event_max_datetime": "2022-03-24T13:26:53.000Z", "max_stars_repo_stars_event_min_datetime": "2015-03-06T00:30:56.000Z", "num_tokens": 116, "size": 341 }
/* rng/random.c * * Copyright (C) 1996, 1997, 1998, 1999, 2000 James Theiler, Brian Gough * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <config.h> #include <stdlib.h> #include <gsl/gsl_rng.h> /* This file provides support for random() generators. There are three versions in widespread use today, - The original BSD version, e.g. on SunOS 4.1 and FreeBSD. - The Linux libc5 version, which is differs from the BSD version in its seeding procedure, possibly due to the introduction of a typo in the multiplier. - The GNU glibc2 version, which has a new (and better) seeding procedure. They all produce different numbers, due to the different seeding algorithms, but the algorithm for the generator is the same in each case. */ static inline long int random_get (int * i, int * j, int n, long int * x); static inline unsigned long int random8_get (void *vstate); static inline unsigned long int random32_get (void *vstate); static inline unsigned long int random64_get (void *vstate); static inline unsigned long int random128_get (void *vstate); static inline unsigned long int random256_get (void *vstate); static double random8_get_double (void *vstate); static double random32_get_double (void *vstate); static double random64_get_double (void *vstate); static double random128_get_double (void *vstate); static double random256_get_double (void *vstate); static void random8_glibc2_set (void *state, unsigned long int s); static void random32_glibc2_set (void *state, unsigned long int s); static void random64_glibc2_set (void *state, unsigned long int s); static void random128_glibc2_set (void *state, unsigned long int s); static void random256_glibc2_set (void *state, unsigned long int s); static void random8_libc5_set (void *state, unsigned long int s); static void random32_libc5_set (void *state, unsigned long int s); static void random64_libc5_set (void *state, unsigned long int s); static void random128_libc5_set (void *state, unsigned long int s); static void random256_libc5_set (void *state, unsigned long int s); static void random8_bsd_set (void *state, unsigned long int s); static void random32_bsd_set (void *state, unsigned long int s); static void random64_bsd_set (void *state, unsigned long int s); static void random128_bsd_set (void *state, unsigned long int s); static void random256_bsd_set (void *state, unsigned long int s); static void bsd_initialize (long int * x, int n, unsigned long int s); static void libc5_initialize (long int * x, int n, unsigned long int s); static void glibc2_initialize (long int * x, int n, unsigned long int s); typedef struct { long int x; } random8_state_t; typedef struct { int i, j; long int x[7]; } random32_state_t; typedef struct { int i, j; long int x[15]; } random64_state_t; typedef struct { int i, j; long int x[31]; } random128_state_t; typedef struct { int i, j; long int x[63]; } random256_state_t; static inline unsigned long int random8_get (void *vstate) { random8_state_t *state = (random8_state_t *) vstate; state->x = (1103515245 * state->x + 12345) & 0x7fffffffUL; return state->x; } static inline long int random_get (int * i, int * j, int n, long int * x) { long int k ; x[*i] += x[*j] ; k = (x[*i] >> 1) & 0x7FFFFFFF ; (*i)++ ; if (*i == n) *i = 0 ; (*j)++ ; if (*j == n) *j = 0 ; return k ; } static inline unsigned long int random32_get (void *vstate) { random32_state_t *state = (random32_state_t *) vstate; unsigned long int k = random_get (&state->i, &state->j, 7, state->x) ; return k ; } static inline unsigned long int random64_get (void *vstate) { random64_state_t *state = (random64_state_t *) vstate; long int k = random_get (&state->i, &state->j, 15, state->x) ; return k ; } static inline unsigned long int random128_get (void *vstate) { random128_state_t *state = (random128_state_t *) vstate; unsigned long int k = random_get (&state->i, &state->j, 31, state->x) ; return k ; } static inline unsigned long int random256_get (void *vstate) { random256_state_t *state = (random256_state_t *) vstate; long int k = random_get (&state->i, &state->j, 63, state->x) ; return k ; } static double random8_get_double (void *vstate) { return random8_get (vstate) / 2147483648.0 ; } static double random32_get_double (void *vstate) { return random32_get (vstate) / 2147483648.0 ; } static double random64_get_double (void *vstate) { return random64_get (vstate) / 2147483648.0 ; } static double random128_get_double (void *vstate) { return random128_get (vstate) / 2147483648.0 ; } static double random256_get_double (void *vstate) { return random256_get (vstate) / 2147483648.0 ; } static void random8_bsd_set (void *vstate, unsigned long int s) { random8_state_t *state = (random8_state_t *) vstate; if (s == 0) s = 1; state->x = s; } static void random32_bsd_set (void *vstate, unsigned long int s) { random32_state_t *state = (random32_state_t *) vstate; int i; bsd_initialize (state->x, 7, s) ; state->i = 3; state->j = 0; for (i = 0 ; i < 10 * 7 ; i++) random32_get (state) ; } static void random64_bsd_set (void *vstate, unsigned long int s) { random64_state_t *state = (random64_state_t *) vstate; int i; bsd_initialize (state->x, 15, s) ; state->i = 1; state->j = 0; for (i = 0 ; i < 10 * 15 ; i++) random64_get (state) ; } static void random128_bsd_set (void *vstate, unsigned long int s) { random128_state_t *state = (random128_state_t *) vstate; int i; bsd_initialize (state->x, 31, s) ; state->i = 3; state->j = 0; for (i = 0 ; i < 10 * 31 ; i++) random128_get (state) ; } static void random256_bsd_set (void *vstate, unsigned long int s) { random256_state_t *state = (random256_state_t *) vstate; int i; bsd_initialize (state->x, 63, s) ; state->i = 1; state->j = 0; for (i = 0 ; i < 10 * 63 ; i++) random256_get (state) ; } static void bsd_initialize (long int * x, int n, unsigned long int s) { int i; if (s == 0) s = 1 ; x[0] = s; for (i = 1 ; i < n ; i++) x[i] = 1103515245 * x[i-1] + 12345 ; } static void libc5_initialize (long int * x, int n, unsigned long int s) { int i; if (s == 0) s = 1 ; x[0] = s; for (i = 1 ; i < n ; i++) x[i] = 1103515145 * x[i-1] + 12345 ; } static void glibc2_initialize (long int * x, int n, unsigned long int s) { int i; if (s == 0) s = 1 ; x[0] = s; for (i = 1 ; i < n ; i++) { const long int h = s / 127773; const long int t = 16807 * (s - h * 127773) - h * 2836; if (t < 0) { s = t + 2147483647 ; } else { s = t ; } x[i] = s ; } } static void random8_glibc2_set (void *vstate, unsigned long int s) { random8_state_t *state = (random8_state_t *) vstate; if (s == 0) s = 1; state->x = s; } static void random32_glibc2_set (void *vstate, unsigned long int s) { random32_state_t *state = (random32_state_t *) vstate; int i; glibc2_initialize (state->x, 7, s) ; state->i = 3; state->j = 0; for (i = 0 ; i < 10 * 7 ; i++) random32_get (state) ; } static void random64_glibc2_set (void *vstate, unsigned long int s) { random64_state_t *state = (random64_state_t *) vstate; int i; glibc2_initialize (state->x, 15, s) ; state->i = 1; state->j = 0; for (i = 0 ; i < 10 * 15 ; i++) random64_get (state) ; } static void random128_glibc2_set (void *vstate, unsigned long int s) { random128_state_t *state = (random128_state_t *) vstate; int i; glibc2_initialize (state->x, 31, s) ; state->i = 3; state->j = 0; for (i = 0 ; i < 10 * 31 ; i++) random128_get (state) ; } static void random256_glibc2_set (void *vstate, unsigned long int s) { random256_state_t *state = (random256_state_t *) vstate; int i; glibc2_initialize (state->x, 63, s) ; state->i = 1; state->j = 0; for (i = 0 ; i < 10 * 63 ; i++) random256_get (state) ; } static void random8_libc5_set (void *vstate, unsigned long int s) { random8_state_t *state = (random8_state_t *) vstate; if (s == 0) s = 1; state->x = s; } static void random32_libc5_set (void *vstate, unsigned long int s) { random32_state_t *state = (random32_state_t *) vstate; int i; libc5_initialize (state->x, 7, s) ; state->i = 3; state->j = 0; for (i = 0 ; i < 10 * 7 ; i++) random32_get (state) ; } static void random64_libc5_set (void *vstate, unsigned long int s) { random64_state_t *state = (random64_state_t *) vstate; int i; libc5_initialize (state->x, 15, s) ; state->i = 1; state->j = 0; for (i = 0 ; i < 10 * 15 ; i++) random64_get (state) ; } static void random128_libc5_set (void *vstate, unsigned long int s) { random128_state_t *state = (random128_state_t *) vstate; int i; libc5_initialize (state->x, 31, s) ; state->i = 3; state->j = 0; for (i = 0 ; i < 10 * 31 ; i++) random128_get (state) ; } static void random256_libc5_set (void *vstate, unsigned long int s) { random256_state_t *state = (random256_state_t *) vstate; int i; libc5_initialize (state->x, 63, s) ; state->i = 1; state->j = 0; for (i = 0 ; i < 10 * 63 ; i++) random256_get (state) ; } static const gsl_rng_type random_glibc2_type = {"random-glibc2", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random128_state_t), &random128_glibc2_set, &random128_get, &random128_get_double}; static const gsl_rng_type random8_glibc2_type = {"random8-glibc2", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random8_state_t), &random8_glibc2_set, &random8_get, &random8_get_double}; static const gsl_rng_type random32_glibc2_type = {"random32-glibc2", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random32_state_t), &random32_glibc2_set, &random32_get, &random32_get_double}; static const gsl_rng_type random64_glibc2_type = {"random64-glibc2", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random64_state_t), &random64_glibc2_set, &random64_get, &random64_get_double}; static const gsl_rng_type random128_glibc2_type = {"random128-glibc2", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random128_state_t), &random128_glibc2_set, &random128_get, &random128_get_double}; static const gsl_rng_type random256_glibc2_type = {"random256-glibc2", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random256_state_t), &random256_glibc2_set, &random256_get, &random256_get_double}; static const gsl_rng_type random_libc5_type = {"random-libc5", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random128_state_t), &random128_libc5_set, &random128_get, &random128_get_double}; static const gsl_rng_type random8_libc5_type = {"random8-libc5", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random8_state_t), &random8_libc5_set, &random8_get, &random8_get_double}; static const gsl_rng_type random32_libc5_type = {"random32-libc5", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random32_state_t), &random32_libc5_set, &random32_get, &random32_get_double}; static const gsl_rng_type random64_libc5_type = {"random64-libc5", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random64_state_t), &random64_libc5_set, &random64_get, &random64_get_double}; static const gsl_rng_type random128_libc5_type = {"random128-libc5", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random128_state_t), &random128_libc5_set, &random128_get, &random128_get_double}; static const gsl_rng_type random256_libc5_type = {"random256-libc5", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random256_state_t), &random256_libc5_set, &random256_get, &random256_get_double}; static const gsl_rng_type random_bsd_type = {"random-bsd", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random128_state_t), &random128_bsd_set, &random128_get, &random128_get_double}; static const gsl_rng_type random8_bsd_type = {"random8-bsd", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random8_state_t), &random8_bsd_set, &random8_get, &random8_get_double}; static const gsl_rng_type random32_bsd_type = {"random32-bsd", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random32_state_t), &random32_bsd_set, &random32_get, &random32_get_double}; static const gsl_rng_type random64_bsd_type = {"random64-bsd", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random64_state_t), &random64_bsd_set, &random64_get, &random64_get_double}; static const gsl_rng_type random128_bsd_type = {"random128-bsd", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random128_state_t), &random128_bsd_set, &random128_get, &random128_get_double}; static const gsl_rng_type random256_bsd_type = {"random256-bsd", /* name */ 0x7fffffffUL, /* RAND_MAX */ 0, /* RAND_MIN */ sizeof (random256_state_t), &random256_bsd_set, &random256_get, &random256_get_double}; const gsl_rng_type *gsl_rng_random_libc5 = &random_libc5_type; const gsl_rng_type *gsl_rng_random8_libc5 = &random8_libc5_type; const gsl_rng_type *gsl_rng_random32_libc5 = &random32_libc5_type; const gsl_rng_type *gsl_rng_random64_libc5 = &random64_libc5_type; const gsl_rng_type *gsl_rng_random128_libc5 = &random128_libc5_type; const gsl_rng_type *gsl_rng_random256_libc5 = &random256_libc5_type; const gsl_rng_type *gsl_rng_random_glibc2 = &random_glibc2_type; const gsl_rng_type *gsl_rng_random8_glibc2 = &random8_glibc2_type; const gsl_rng_type *gsl_rng_random32_glibc2 = &random32_glibc2_type; const gsl_rng_type *gsl_rng_random64_glibc2 = &random64_glibc2_type; const gsl_rng_type *gsl_rng_random128_glibc2 = &random128_glibc2_type; const gsl_rng_type *gsl_rng_random256_glibc2 = &random256_glibc2_type; const gsl_rng_type *gsl_rng_random_bsd = &random_bsd_type; const gsl_rng_type *gsl_rng_random8_bsd = &random8_bsd_type; const gsl_rng_type *gsl_rng_random32_bsd = &random32_bsd_type; const gsl_rng_type *gsl_rng_random64_bsd = &random64_bsd_type; const gsl_rng_type *gsl_rng_random128_bsd = &random128_bsd_type; const gsl_rng_type *gsl_rng_random256_bsd = &random256_bsd_type;
{ "alphanum_fraction": 0.6398049375, "avg_line_length": 24.9695585997, "ext": "c", "hexsha": "08697a974e874bbf49e735d6defd5cac5a146d0d", "lang": "C", "max_forks_count": 40, "max_forks_repo_forks_event_max_datetime": "2022-03-03T23:23:37.000Z", "max_forks_repo_forks_event_min_datetime": "2015-02-26T15:31:16.000Z", "max_forks_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "manggoguy/parsec-modified", "max_forks_repo_path": "pkgs/libs/gsl/src/rng/random.c", "max_issues_count": 12, "max_issues_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_issues_repo_issues_event_max_datetime": "2022-03-13T03:54:24.000Z", "max_issues_repo_issues_event_min_datetime": "2020-12-15T08:30:19.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "manggoguy/parsec-modified", "max_issues_repo_path": "pkgs/libs/gsl/src/rng/random.c", "max_line_length": 81, "max_stars_count": 64, "max_stars_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "manggoguy/parsec-modified", "max_stars_repo_path": "pkgs/libs/gsl/src/rng/random.c", "max_stars_repo_stars_event_max_datetime": "2022-03-24T13:26:53.000Z", "max_stars_repo_stars_event_min_datetime": "2015-03-06T00:30:56.000Z", "num_tokens": 4880, "size": 16405 }
//-*-C++-*- /*************************************************************************** * * Copyright (C) 2010 by Paul Demorest * Licensed under the Academic Free License version 2.1 * ***************************************************************************/ #ifndef __Pulsar_SplineFit_h #define __Pulsar_SplineFit_h #include "Estimate.h" #include "Reference.h" #include <gsl/gsl_bspline.h> #include <gsl/gsl_matrix.h> //! Spline fitting for smoothing and/or interpolation, using //! GSL's bspline routines. class SplineFit : public Reference::Able { public: //! Default constructor SplineFit (); //! Destructor virtual ~SplineFit (); //! Clear all current data, results void reset(); //! Set the degree of the fit void set_order(int n) { order=n; calculated=false; } //! Get the current degree int get_order() { return order; } //! Set uniform breakpoints to span the data void set_uniform_breaks(int nint); //! Add a data point void add_data(double x, Estimate<double> y); //! Compute the fit using current data void compute(); //! Evaluate the fit solution at the given x double evaluate(double x); //! Evaluate the fit solution's derivative at the given x double evaluate_deriv(double x); //! Get the reduced chi2 of the fit double get_rchi2(); protected: //! The x values for the fit std::vector<double> x; //! The y values/errors for the fit std::vector< Estimate<double> > y; //! The spline breakpoints std::vector<double> bp; //! Spline order (0=const, 3=cubic, etc) int order; //! The fit chi2 double chi2; //! Fit NDOF int ndof; //! Has the fit been calculated? bool calculated; //! The fitted coeffs gsl_vector *coeffs; //! The fit cov matrix gsl_matrix *cov; //! Check if a requested x val is in the fit range bool check_range(double x); //! Check if spline intervals and data make sense void interval_check(bool fix=false); //! Free spline workspaces void free_workspaces(); //! bspline temp space gsl_bspline_workspace *bwork; private: }; #endif
{ "alphanum_fraction": 0.6291012839, "avg_line_length": 20.4174757282, "ext": "h", "hexsha": "2c5daca04db580a5d74c706b194bb5b06b4e91c4", "lang": "C", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-02-13T20:08:14.000Z", "max_forks_repo_forks_event_min_datetime": "2020-02-13T20:08:14.000Z", "max_forks_repo_head_hexsha": "453c4dc05b8e901ea661cd02d4f0a30665dcaf35", "max_forks_repo_licenses": [ "AFL-2.1" ], "max_forks_repo_name": "xuanyuanstar/psrchive_CDFT", "max_forks_repo_path": "Util/genutil/SplineFit.h", "max_issues_count": null, "max_issues_repo_head_hexsha": "453c4dc05b8e901ea661cd02d4f0a30665dcaf35", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "AFL-2.1" ], "max_issues_repo_name": "xuanyuanstar/psrchive_CDFT", "max_issues_repo_path": "Util/genutil/SplineFit.h", "max_line_length": 77, "max_stars_count": null, "max_stars_repo_head_hexsha": "453c4dc05b8e901ea661cd02d4f0a30665dcaf35", "max_stars_repo_licenses": [ "AFL-2.1" ], "max_stars_repo_name": "xuanyuanstar/psrchive_CDFT", "max_stars_repo_path": "Util/genutil/SplineFit.h", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 514, "size": 2103 }