hexsha stringlengths 40 40 | size int64 22 2.4M | ext stringclasses 5
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 260 | max_stars_repo_name stringlengths 5 109 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 9 | max_stars_count float64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 260 | max_issues_repo_name stringlengths 5 109 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 9 | max_issues_count float64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 260 | max_forks_repo_name stringlengths 5 109 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 9 | max_forks_count float64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 22 2.4M | avg_line_length float64 5 169k | max_line_length int64 5 786k | alphanum_fraction float64 0.06 0.95 | matches listlengths 1 11 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
591baf35a6529195580c7cf518ad44edb9211e7f | 8,791 | h | C | zircon/system/ulib/fs/include/fs/vfs.h | OpenTrustGroup/fuchsia | 647e593ea661b8bf98dcad2096e20e8950b24a97 | [
"BSD-3-Clause"
] | 1 | 2019-04-21T18:02:26.000Z | 2019-04-21T18:02:26.000Z | zircon/system/ulib/fs/include/fs/vfs.h | OpenTrustGroup/fuchsia | 647e593ea661b8bf98dcad2096e20e8950b24a97 | [
"BSD-3-Clause"
] | 16 | 2020-09-04T19:01:11.000Z | 2021-05-28T03:23:09.000Z | zircon/system/ulib/fs/include/fs/vfs.h | OpenTrustGroup/fuchsia | 647e593ea661b8bf98dcad2096e20e8950b24a97 | [
"BSD-3-Clause"
] | null | null | null | // Copyright 2016 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#pragma once
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <lib/fdio/vfs.h>
#include <fs/locking.h>
#include <zircon/assert.h>
#include <zircon/compiler.h>
#include <zircon/device/vfs.h>
#include <zircon/types.h>
#ifdef __Fuchsia__
#include <lib/async/dispatcher.h>
#include <lib/fdio/io.h>
#include <lib/zx/channel.h>
#include <lib/zx/event.h>
#include <lib/zx/vmo.h>
#include <fbl/intrusive_hash_table.h>
#include <fbl/mutex.h>
#include <fs/client.h>
#include <fs/mount_channel.h>
#include <fs/vnode.h>
#endif // __Fuchsia__
#include <fbl/function.h>
#include <fbl/intrusive_double_list.h>
#include <fbl/macros.h>
#include <fbl/ref_counted.h>
#include <fbl/ref_ptr.h>
#include <fbl/string_piece.h>
#include <fbl/unique_ptr.h>
#include <utility>
namespace fs {
class Connection;
class Vnode;
inline constexpr bool IsVnodeRefOnly(uint32_t flags) { return flags & ZX_FS_FLAG_VNODE_REF_ONLY; }
inline constexpr bool IsWritable(uint32_t flags) { return flags & ZX_FS_RIGHT_WRITABLE; }
inline constexpr bool IsReadable(uint32_t flags) { return flags & ZX_FS_RIGHT_READABLE; }
inline constexpr bool HasAdminRight(uint32_t flags) { return flags & ZX_FS_RIGHT_ADMIN; }
// A storage class for a vdircookie which is passed to Readdir.
// Common vnode implementations may use this struct as scratch
// space, or cast it to an alternative structure of the same
// size (or smaller).
//
// TODO(smklein): To implement seekdir and telldir, the size
// of this vdircookie may need to shrink to a 'long'.
typedef struct vdircookie {
void Reset() { memset(this, 0, sizeof(struct vdircookie)); }
uint64_t n;
void* p;
} vdircookie_t;
// The Vfs object contains global per-filesystem state, which
// may be valid across a collection of Vnodes.
//
// The Vfs object must outlive the Vnodes which it serves.
//
// This class is thread-safe.
class Vfs {
public:
Vfs();
virtual ~Vfs();
// Traverse the path to the target vnode, and create / open it using
// the underlying filesystem functions (lookup, create, open).
//
// If the node represented by |path| contains a remote node,
// set |pathout| to the remaining portion of the path yet to
// be traversed (or ".", if the endpoint of |path| is the mount point),
// and return the node containing the node in |out|.
zx_status_t Open(fbl::RefPtr<Vnode> vn, fbl::RefPtr<Vnode>* out, fbl::StringPiece path,
fbl::StringPiece* pathout, uint32_t flags, uint32_t mode)
FS_TA_EXCLUDES(vfs_lock_);
zx_status_t Unlink(fbl::RefPtr<Vnode> vn, fbl::StringPiece path) FS_TA_EXCLUDES(vfs_lock_);
// Sets whether this file system is read-only.
void SetReadonly(bool value) FS_TA_EXCLUDES(vfs_lock_);
#ifdef __Fuchsia__
// Unmounts the underlying filesystem.
//
// The closure may be invoked before or after |Shutdown| returns.
using ShutdownCallback = fbl::Function<void(zx_status_t status)>;
virtual void Shutdown(ShutdownCallback closure) = 0;
// Identifies if the filesystem is in the process of terminating.
// May be checked by active connections, which, upon reading new
// port packets, should ignore them and close immediately.
virtual bool IsTerminating() const = 0;
void TokenDiscard(zx::event ios_token) FS_TA_EXCLUDES(vfs_lock_);
zx_status_t VnodeToToken(fbl::RefPtr<Vnode> vn, zx::event* ios_token, zx::event* out)
FS_TA_EXCLUDES(vfs_lock_);
zx_status_t Link(zx::event token, fbl::RefPtr<Vnode> oldparent, fbl::StringPiece oldStr,
fbl::StringPiece newStr) FS_TA_EXCLUDES(vfs_lock_);
zx_status_t Rename(zx::event token, fbl::RefPtr<Vnode> oldparent, fbl::StringPiece oldStr,
fbl::StringPiece newStr) FS_TA_EXCLUDES(vfs_lock_);
// Calls readdir on the Vnode while holding the vfs_lock, preventing path
// modification operations for the duration of the operation.
zx_status_t Readdir(Vnode* vn, vdircookie_t* cookie, void* dirents, size_t len,
size_t* out_actual) FS_TA_EXCLUDES(vfs_lock_);
Vfs(async_dispatcher_t* dispatcher);
async_dispatcher_t* dispatcher() { return dispatcher_; }
void SetDispatcher(async_dispatcher_t* dispatcher) { dispatcher_ = dispatcher; }
// Begins serving VFS messages over the specified connection.
zx_status_t ServeConnection(fbl::unique_ptr<Connection> connection) FS_TA_EXCLUDES(vfs_lock_);
// Called by a VFS connection when it is closed remotely.
// The VFS is now responsible for destroying the connection.
void OnConnectionClosedRemotely(Connection* connection) FS_TA_EXCLUDES(vfs_lock_);
// Serves a Vnode over the specified channel (used for creating new filesystems)
zx_status_t ServeDirectory(fbl::RefPtr<Vnode> vn, zx::channel channel, uint32_t rights);
// Convenience wrapper over |ServeDirectory| with maximum default permissions.
zx_status_t ServeDirectory(fbl::RefPtr<Vnode> vn, zx::channel channel) {
return ServeDirectory(vn, std::move(channel), ZX_FS_RIGHTS);
}
// Pins a handle to a remote filesystem onto a vnode, if possible.
zx_status_t InstallRemote(fbl::RefPtr<Vnode> vn, MountChannel h) FS_TA_EXCLUDES(vfs_lock_);
// Create and mount a directory with a provided name
zx_status_t MountMkdir(fbl::RefPtr<Vnode> vn, fbl::StringPiece name, MountChannel h,
uint32_t flags) FS_TA_EXCLUDES(vfs_lock_);
// Unpin a handle to a remote filesystem from a vnode, if one exists.
zx_status_t UninstallRemote(fbl::RefPtr<Vnode> vn, zx::channel* h) FS_TA_EXCLUDES(vfs_lock_);
// Forwards an open request to a remote handle.
// If the remote handle is closed (handing off returns ZX_ERR_PEER_CLOSED),
// it is automatically unmounted.
zx_status_t ForwardOpenRemote(fbl::RefPtr<Vnode> vn, zx::channel channel, fbl::StringPiece path,
uint32_t flags, uint32_t mode) FS_TA_EXCLUDES(vfs_lock_);
// Unpins all remote filesystems in the current filesystem, and waits for the
// response of each one with the provided deadline.
zx_status_t UninstallAll(zx_time_t deadline) FS_TA_EXCLUDES(vfs_lock_);
#endif
protected:
// Whether this file system is read-only.
bool ReadonlyLocked() const FS_TA_REQUIRES(vfs_lock_) { return readonly_; }
private:
// Starting at vnode |vn|, walk the tree described by the path string,
// until either there is only one path segment remaining in the string
// or we encounter a vnode that represents a remote filesystem
//
// On success,
// |out| is the vnode at which we stopped searching.
// |pathout| is the remainder of the path to search.
zx_status_t Walk(fbl::RefPtr<Vnode> vn, fbl::RefPtr<Vnode>* out, fbl::StringPiece path,
fbl::StringPiece* pathout) FS_TA_REQUIRES(vfs_lock_);
zx_status_t OpenLocked(fbl::RefPtr<Vnode> vn, fbl::RefPtr<Vnode>* out, fbl::StringPiece path,
fbl::StringPiece* pathout, uint32_t flags, uint32_t mode)
FS_TA_REQUIRES(vfs_lock_);
bool readonly_{};
#ifdef __Fuchsia__
zx_status_t TokenToVnode(zx::event token, fbl::RefPtr<Vnode>* out) FS_TA_REQUIRES(vfs_lock_);
zx_status_t InstallRemoteLocked(fbl::RefPtr<Vnode> vn, MountChannel h) FS_TA_REQUIRES(vfs_lock_);
zx_status_t UninstallRemoteLocked(fbl::RefPtr<Vnode> vn, zx::channel* h)
FS_TA_REQUIRES(vfs_lock_);
fbl::HashTable<zx_koid_t, std::unique_ptr<VnodeToken>> vnode_tokens_;
// Non-intrusive node in linked list of vnodes acting as mount points
class MountNode final : public fbl::DoublyLinkedListable<fbl::unique_ptr<MountNode>> {
public:
using ListType = fbl::DoublyLinkedList<fbl::unique_ptr<MountNode>>;
constexpr MountNode();
~MountNode();
void SetNode(fbl::RefPtr<Vnode> vn);
zx::channel ReleaseRemote();
bool VnodeMatch(fbl::RefPtr<Vnode> vn) const;
private:
fbl::RefPtr<Vnode> vn_;
};
// The mount list is a global static variable, but it only uses
// constexpr constructors during initialization. As a consequence,
// the .init_array section of the compiled vfs-mount object file is
// empty; "remote_list" is a member of the bss section.
MountNode::ListType remote_list_ FS_TA_GUARDED(vfs_lock_){};
async_dispatcher_t* dispatcher_{};
protected:
// A lock which should be used to protect lookup and walk operations
mtx_t vfs_lock_{};
// Starts tracking the lifetime of the connection.
virtual void RegisterConnection(fbl::unique_ptr<Connection> connection) = 0;
// Stops tracking the lifetime of the connection.
virtual void UnregisterConnection(Connection* connection) = 0;
#endif // ifdef __Fuchsia__
};
} // namespace fs
| 38.89823 | 99 | 0.734615 | [
"object"
] |
591c958ddb98b95018cae2cdc31d7746e3a480b7 | 6,393 | h | C | resonance_audio/base/constants_and_types.h | seba10000/resonance-audio | e1923fe6fe733ae4d3c8460ff883c87e2ad05d6b | [
"Apache-2.0"
] | 396 | 2018-03-14T09:55:52.000Z | 2022-03-27T14:58:38.000Z | resonance_audio/base/constants_and_types.h | seba10000/resonance-audio | e1923fe6fe733ae4d3c8460ff883c87e2ad05d6b | [
"Apache-2.0"
] | 46 | 2018-04-18T17:14:29.000Z | 2022-02-19T21:35:57.000Z | resonance_audio/base/constants_and_types.h | seba10000/resonance-audio | e1923fe6fe733ae4d3c8460ff883c87e2ad05d6b | [
"Apache-2.0"
] | 96 | 2018-03-14T17:20:50.000Z | 2022-03-03T01:12:37.000Z | /*
Copyright 2018 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS-IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef RESONANCE_AUDIO_BASE_CONSTANTS_AND_TYPES_H_
#define RESONANCE_AUDIO_BASE_CONSTANTS_AND_TYPES_H_
#include <cmath>
#include <string> // for size_t
namespace vraudio {
// Sound object / ambisonic source identifier.
typedef int SourceId;
// Invalid source id that can be used to initialize handler variables during
// class construction.
static const SourceId kInvalidSourceId = -1;
// Defines memory alignment of audio buffers. Note that not only the first
// element of the |data_| buffer is memory aligned but also the address of the
// first elements of the |ChannelView|s.
const size_t kMemoryAlignmentBytes = 64;
// Maximum Ambisonic order currently supported in vr audio, equivalent to High
// Quality sound object rendering mode. This number is limited by a) number of
// HRIR data points used in the binaural renderer; b) size of the lookup table
// controlling the angular spread of a sound source in the Ambisonic Lookup
// Table class.
static const int kMaxSupportedAmbisonicOrder = 3;
// Maximum allowed size of internal buffers.
const size_t kMaxSupportedNumFrames = 16384;
// Number of mono channels.
static const size_t kNumMonoChannels = 1;
// Number of stereo channels.
static const size_t kNumStereoChannels = 2;
// Number of surround 5.1 channels.
static const size_t kNumSurroundFiveDotOneChannels = 6;
// Number of surround 7.1 channels.
static const size_t kNumSurroundSevenDotOneChannels = 8;
// Number of first-order ambisonic channels.
static const size_t kNumFirstOrderAmbisonicChannels = 4;
// Number of second-order ambisonic channels.
static const size_t kNumSecondOrderAmbisonicChannels = 9;
// Number of third-order ambisonic channels.
static const size_t kNumThirdOrderAmbisonicChannels = 16;
// Number of first-order ambisonic with non-diegetic stereo channels.
static const size_t kNumFirstOrderAmbisonicWithNonDiegeticStereoChannels = 6;
// Number of second-order ambisonic with non-diegetic stereo channels.
static const size_t kNumSecondOrderAmbisonicWithNonDiegeticStereoChannels = 11;
// Number of third-order ambisonic with non-diegetic stereo channels.
static const size_t kNumThirdOrderAmbisonicWithNonDiegeticStereoChannels = 18;
// Negative 60dB in amplitude.
static const float kNegative60dbInAmplitude = 0.001f;
// Tolerated error margins for floating points.
static const double kEpsilonDouble = 1e-6;
static const float kEpsilonFloat = 1e-6f;
// Inverse square root of two (equivalent to -3dB audio signal attenuation).
static const float kInverseSqrtTwo = 1.0f / std::sqrt(2.0f);
// Square roots.
static const float kSqrtTwo = std::sqrt(2.0f);
static const float kSqrtThree = std::sqrt(3.0f);
// Pi in radians.
static const float kPi = static_cast<float>(M_PI);
// Half pi in radians.
static const float kHalfPi = static_cast<float>(M_PI / 2.0);
// Two pi in radians.
static const float kTwoPi = static_cast<float>(2.0 * M_PI);
// Defines conversion factor from degrees to radians.
static const float kRadiansFromDegrees = static_cast<float>(M_PI / 180.0);
// Defines conversion factor from radians to degrees.
static const float kDegreesFromRadians = static_cast<float>(180.0 / M_PI);
// The negated natural logarithm of 1000.
static const float kNegativeLog1000 = -std::log(1000.0f);
// The lowest octave band for computing room effects.
static const float kLowestOctaveBandHz = 31.25f;
// Number of octave bands in which room effects are computed.
static const size_t kNumReverbOctaveBands = 9;
// Centers of possible frequency bands up 8 kHz.
// ------------------------------------
// Band no. Low Center High [Frequencies in Hz]
// ------------------------------------
// 0 22 31.25 44.2
// 1 44.2 62.5 88.4
// 2 88.4 125 176.8
// 3 176.8 250 353.6
// 4 353.6 500 707.1
// 5 707.1 1000 1414.2
// 6 1414.2 2000 2828.4
// 7 2828.4 4000 5656.9
// 8 5656.9 8000 11313.7
//--------------------------------------
const float kOctaveBandCentres[kNumReverbOctaveBands] = {
31.25f, 62.5f, 125.0f, 250.0f, 500.0f, 1000.0f, 2000.0f, 4000.0f, 8000.0f};
// Number of surfaces in a shoe-box room.
static const size_t kNumRoomSurfaces = 6;
// Speed of sound in air at 20 degrees Celsius in meters per second.
// http://www.sengpielaudio.com/calculator-speedsound.htm
static const float kSpeedOfSound = 343.0f;
// Locations of the stereo virtual loudspeakers in degrees.
static const float kStereoLeftDegrees = 90.0f;
static const float kStereoRightDegrees = -90.0f;
// Conversion factor from seconds to milliseconds.
static const float kMillisecondsFromSeconds = 1000.0f;
// Conversion factor from milliseconds to seconds.
static const float kSecondsFromMilliseconds = 0.001f;
// Conversion factor from seconds to milliseconds.
static const double kMicrosecondsFromSeconds = 1e6;
// Conversion factor from milliseconds to seconds.
static const double kSecondsFromMicroseconds = 1e-6;
// The distance threshold where the near field effect should fade in.
static const float kNearFieldThreshold = 1.0f;
// Minimum allowed distance of a near field sound source used to cap the allowed
// energy boost.
static const float kMinNearFieldDistance = 0.1f;
// Maximum gain applied by Near Field Effect to the mono source signal.
static const float kMaxNearFieldEffectGain = 9.0f;
// Number of samples across which the gain value should be interpolated for
// a unit gain change of 1.0f.
static const size_t kUnitRampLength = 2048;
// Rotation quantization which applies in ambisonic soundfield rotators.
static const float kRotationQuantizationRad = 1.0f * kRadiansFromDegrees;
} // namespace vraudio
#endif // RESONANCE_AUDIO_BASE_CONSTANTS_AND_TYPES_H_
| 36.118644 | 80 | 0.743782 | [
"object"
] |
592d868666b99e0472047d6cf3d920820d2a5cfc | 2,017 | h | C | tools/mosesdecoder-master/moses/TranslationModel/UG/sapt_pscore_logcnt.h | shasha79/nectm | 600044a6fe2c3a73e0d9327bc85883831a26dcae | [
"Apache-2.0"
] | 3 | 2020-02-28T21:42:44.000Z | 2021-03-12T13:56:16.000Z | tools/mosesdecoder-master/moses/TranslationModel/UG/sapt_pscore_logcnt.h | Pangeamt/nectm | 6b84f048698f2530b9fdbb30695f2e2217c3fbfe | [
"Apache-2.0"
] | 2 | 2020-11-06T14:40:10.000Z | 2020-12-29T19:03:11.000Z | tools/mosesdecoder-master/moses/TranslationModel/UG/sapt_pscore_logcnt.h | Pangeamt/nectm | 6b84f048698f2530b9fdbb30695f2e2217c3fbfe | [
"Apache-2.0"
] | 2 | 2020-03-26T16:05:11.000Z | 2020-08-06T16:35:39.000Z | // -*- c++ -*-
// Phrase scorer that rewards the number of phrase pair occurrences in a bitext
// with the asymptotic function x/(j+x) where x > 0 is a function
// parameter that determines the steepness of the rewards curve
// written by Ulrich Germann
#include "sapt_pscore_base.h"
#include <boost/dynamic_bitset.hpp>
namespace sapt {
template<typename Token>
class
PScoreLogCnt : public PhraseScorer<Token>
{
std::string m_specs;
public:
PScoreLogCnt(std::string const specs)
{
this->m_index = -1;
this->m_specs = specs;
if (specs.find("r1") != std::string::npos) // raw source phrase counts
this->m_feature_names.push_back("log-r1");
if (specs.find("s1") != std::string::npos)
this->m_feature_names.push_back("log-s1"); // L1 sample size
if (specs.find("g1") != std::string::npos) // coherent phrases
this->m_feature_names.push_back("log-g1");
if (specs.find("j") != std::string::npos) // joint counts
this->m_feature_names.push_back("log-j");
if (specs.find("r2") != std::string::npos) // raw target phrase counts
this->m_feature_names.push_back("log-r2");
this->m_num_feats = this->m_feature_names.size();
}
bool
isIntegerValued(int i) const { return true; }
void
operator()(Bitext<Token> const& bt,
PhrasePair<Token>& pp,
std::vector<float> * dest = NULL) const
{
if (!dest) dest = &pp.fvals;
assert(pp.raw1);
assert(pp.sample1);
assert(pp.good1);
assert(pp.joint);
assert(pp.raw2);
size_t i = this->m_index;
if (m_specs.find("r1") != std::string::npos)
(*dest)[i++] = log(pp.raw1);
if (m_specs.find("s1") != std::string::npos)
(*dest)[i++] = log(pp.sample1);
if (m_specs.find("g1") != std::string::npos)
(*dest)[i++] = log(pp.good1);
if (m_specs.find("j") != std::string::npos)
(*dest)[i++] = log(pp.joint);
if (m_specs.find("r2") != std::string::npos)
(*dest)[i] = log(pp.raw2);
}
};
} // namespace sapt
| 32.015873 | 79 | 0.616262 | [
"vector"
] |
59335c6c75b1e0403d9a5bfe56ba2395335c46d1 | 3,144 | h | C | src/tracer/include/agz/tracer/core/camera.h | AirGuanZ/Atrc | a0c4bc1b7bb96ddffff8bb1350f88b651b94d993 | [
"MIT"
] | 358 | 2018-11-29T08:15:05.000Z | 2022-03-31T07:48:37.000Z | src/tracer/include/agz/tracer/core/camera.h | happyfire/Atrc | 74cac111e277be53eddea5638235d97cec96c378 | [
"MIT"
] | 23 | 2019-04-06T17:23:58.000Z | 2022-02-08T14:22:46.000Z | src/tracer/include/agz/tracer/core/camera.h | happyfire/Atrc | 74cac111e277be53eddea5638235d97cec96c378 | [
"MIT"
] | 22 | 2019-03-04T01:47:56.000Z | 2022-01-13T06:06:49.000Z | #pragma once
#include <any>
#include <agz/tracer/common.h>
AGZ_TRACER_BEGIN
/**
* @brief result of sampling we
*/
struct CameraSampleWeResult
{
CameraSampleWeResult(
const FVec3 &pos_on_cam,
const FVec3 &pos_to_out,
const FVec3 &nor_on_cam,
const FSpectrum &throughput) noexcept
: pos_on_cam(pos_on_cam),
pos_to_out(pos_to_out),
nor_on_cam(nor_on_cam),
throughput(throughput)
{
}
FVec3 pos_on_cam;
FVec3 pos_to_out;
FVec3 nor_on_cam;
FSpectrum throughput;
};
/**
* @brief pdf of sampling we
*/
struct CameraWePDFResult
{
CameraWePDFResult(real pdf_pos, real pdf_dir) noexcept
: pdf_pos(pdf_pos), pdf_dir(pdf_dir)
{
}
real pdf_pos;
real pdf_dir;
};
/**
* @brief result of eval we
*/
struct CameraEvalWeResult
{
CameraEvalWeResult(
const FSpectrum &we,
const Vec2 &film_coord,
const FVec3 &nor_on_cam) noexcept
: we(we), film_coord(film_coord), nor_on_cam(nor_on_cam)
{
}
FSpectrum we;
Vec2 film_coord;
FVec3 nor_on_cam;
};
inline const CameraEvalWeResult CAMERA_EVAL_WE_RESULT_ZERO =
CameraEvalWeResult({}, {}, {});
/**
* @brief result of sampling camera wi
*/
struct CameraSampleWiResult
{
CameraSampleWiResult(
const FVec3 &pos_on_cam,
const FVec3 &nor_at_pos,
const FVec3 &ref_to_pos,
const FSpectrum &we,
real pdf,
const Vec2 &film_coord) noexcept
: pos_on_cam(pos_on_cam),
nor_at_pos(nor_at_pos),
ref_to_pos(ref_to_pos),
we(we),
pdf(pdf),
film_coord(film_coord)
{
}
FVec3 pos_on_cam; // position on camera lens
FVec3 nor_at_pos; // lens normal
FVec3 ref_to_pos; // from reference point to position on lens
FSpectrum we; // initial importance function
real pdf = 0; // pdf w.r.t. solid angle at ref
Vec2 film_coord; // where on the film does this sample correspond to
};
inline const CameraSampleWiResult CAMERA_SAMPLE_WI_RESULT_INVALID =
CameraSampleWiResult({}, {}, {}, {}, 0, {});
/**
* @brief camera interface
*/
class Camera
{
public:
virtual ~Camera() = default;
/**
* @brief generate a ray
*
* @param film_coord film coordinate. origin is at the left-bottom corner
* and the coordinate range is [0, 1]^2
* @param aperture_sam used to sample the aperture
*/
virtual CameraSampleWeResult sample_we(
const Vec2 &film_coord, const Sample2 &aperture_sam) const noexcept = 0;
/**
* @brief eval we(pos_on_cam -> pos_to_out)
*/
virtual CameraEvalWeResult eval_we(
const FVec3 &pos_on_cam, const FVec3 &pos_to_out) const noexcept = 0;
/**
* @brief pdf of sample_we
*/
virtual CameraWePDFResult pdf_we(
const FVec3 &pos_on_cam, const FVec3 &pos_to_out) const noexcept = 0;
/**
* @brief sample camera wi
*/
virtual CameraSampleWiResult sample_wi(
const FVec3 &ref, const Sample2 &sam) const noexcept = 0;
};
AGZ_TRACER_END
| 22.140845 | 80 | 0.633906 | [
"solid"
] |
593e97e36f7ff43f8bce674c128b4b0eec70c389 | 4,568 | h | C | src/hssh/metrical/mapping/mapping_params.h | anuranbaka/Vulcan | 56339f77f6cf64b5fda876445a33e72cd15ce028 | [
"MIT"
] | 3 | 2020-03-05T23:56:14.000Z | 2021-02-17T19:06:50.000Z | src/hssh/metrical/mapping/mapping_params.h | anuranbaka/Vulcan | 56339f77f6cf64b5fda876445a33e72cd15ce028 | [
"MIT"
] | 1 | 2021-03-07T01:23:47.000Z | 2021-03-07T01:23:47.000Z | src/hssh/metrical/mapping/mapping_params.h | anuranbaka/Vulcan | 56339f77f6cf64b5fda876445a33e72cd15ce028 | [
"MIT"
] | 1 | 2021-03-03T07:54:16.000Z | 2021-03-03T07:54:16.000Z | /* Copyright (C) 2010-2019, The Regents of The University of Michigan.
All rights reserved.
This software was developed as part of the The Vulcan project in the Intelligent Robotics Lab
under the direction of Benjamin Kuipers, kuipers@umich.edu. Use of this code is governed by an
MIT-style License that can be found at "https://github.com/h2ssh/Vulcan".
*/
/**
* \file mapping_params.h
* \author Collin Johnson
*
* Declaration of the params structs needed by the local_metric mapping subsystem.
*/
#ifndef HSSH_LOCAL_METRIC_MAPPING_MAPPING_PARAMS_H
#define HSSH_LOCAL_METRIC_MAPPING_MAPPING_PARAMS_H
#include <cassert>
#include <string>
namespace vulcan
{
namespace utils
{
class ConfigFile;
}
namespace hssh
{
struct laser_scan_rasterizer_params_t
{
float maxLaserDistance;
uint16_t minLaserIntensity;
int8_t occCellCostChange;
int8_t freeCellCostChange;
int8_t initialOccupiedCostChange;
int8_t initialFreeCostChange;
};
struct lpm_params_t
{
int width;
int height;
float scale;
uint8_t maxCellCost;
uint8_t occupiedCellCost;
uint8_t freeCellCost;
};
struct glass_map_builder_params_t
{
float maxLaserRange;
int numAngleBins;
bool shouldFilterDynamic; // flag indicating if the dynamic object filter should be run
bool shouldFilterReflections; // flag indicating if reflections should be removed from the laser scan
bool canSeeCellsFromBothSides; // have the cells represent 180 degrees, not 360 degrees.
// Implemented such that a hit at theta or theta + 180 results in changing the
// count for the angle bin
int hitThreshold; // number of hits in a bin for it to get marked as a hit while building the map
int missThreshold; // number of misses in a bin for it to get marked as a miss while building the map
double minVisibleOccupiedRange; // [radians] minimum visible range for a cell to be considered occupied in the
// flattened map
double minHighlyVisibleRange; // [radians] minimum visible range for a cell to be highly visible in the dynamic
// object filter
uint16_t minHighlyVisibleIntensity; // [sensor-specific counts] minimum intensity reading for a cell to be
// considered highly-visible glass
};
struct mapper_params_t
{
std::string type;
bool shouldBuildGlassMap;
bool shouldUseMovingLaser;
int maxMapWidthMeters;
int maxMapHeightMeters;
float shiftRadius;
float placeBoundaryRadius;
double maxMappingPositionStdDev;
double maxMappingOrientationStdDev;
double minRadiusOfCurvature;
laser_scan_rasterizer_params_t rasterizerParams;
lpm_params_t lpmParams;
glass_map_builder_params_t glassBuilderParams;
};
/**
* load_mapper_params loads the parameters for mapping subsystem of the local_metric layer of the HSSH.
* In addition to the ConfigFile, the heading under which the parameters are located is provided.
*
* \param config ConfigFile containing the values parsed from a .cfg file
* \param heading Heading under which the parameters are located
* \return mapper_params_t created from the config.
*/
mapper_params_t load_mapper_params(const utils::ConfigFile& config, const std::string& heading);
/**
* load_lpm_params loads the parameter set for an LPM. In addition to
* the ConfigFile, the heading under which the parameters are located is provided. It is likely
* that LPMs will exist in multiple modules, so the heading of their parameters may
* need to change.
*
* \param config File with the parameters
* \param heading Heading under which the parameters are located
* \return Parameters for creating an LPM.
*/
lpm_params_t load_lpm_params(const utils::ConfigFile& config, const std::string& heading);
/**
* load_rasterizer_params loads the parameter set for a rasterizer. In addition to
* the ConfigFile, the heading under which the parameters are located is provided.
*
* \param config File with the parameters
* \param heading Heading under which the parameters are located
* \return Parameters for creating a rasterizer.
*/
laser_scan_rasterizer_params_t load_rasterizer_params(const utils::ConfigFile& config, const std::string& heading);
} // namespace hssh
} // namespace vulcan
#endif // HSSH_LOCAL_METRIC_MAPPING_MAPPING_PARAMS_H
| 34.606061 | 119 | 0.720009 | [
"object"
] |
594286ef73d6d11e7869abdebfb24528d99286cf | 7,195 | h | C | es/include/tencentcloud/es/v20180416/model/KibanaNodeInfo.h | TencentCloud/tencentcloud-sdk-cpp-intl-en | 752c031f5ad2c96868183c5931eae3a42dd5ae6c | [
"Apache-2.0"
] | 1 | 2022-01-27T09:27:34.000Z | 2022-01-27T09:27:34.000Z | es/include/tencentcloud/es/v20180416/model/KibanaNodeInfo.h | TencentCloud/tencentcloud-sdk-cpp-intl-en | 752c031f5ad2c96868183c5931eae3a42dd5ae6c | [
"Apache-2.0"
] | null | null | null | es/include/tencentcloud/es/v20180416/model/KibanaNodeInfo.h | TencentCloud/tencentcloud-sdk-cpp-intl-en | 752c031f5ad2c96868183c5931eae3a42dd5ae6c | [
"Apache-2.0"
] | null | null | null | /*
* Copyright (c) 2017-2019 THL A29 Limited, a Tencent company. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TENCENTCLOUD_ES_V20180416_MODEL_KIBANANODEINFO_H_
#define TENCENTCLOUD_ES_V20180416_MODEL_KIBANANODEINFO_H_
#include <string>
#include <vector>
#include <map>
#include <tencentcloud/core/utils/rapidjson/document.h>
#include <tencentcloud/core/utils/rapidjson/writer.h>
#include <tencentcloud/core/utils/rapidjson/stringbuffer.h>
#include <tencentcloud/core/AbstractModel.h>
namespace TencentCloud
{
namespace Es
{
namespace V20180416
{
namespace Model
{
/**
* Kibana node information
*/
class KibanaNodeInfo : public AbstractModel
{
public:
KibanaNodeInfo();
~KibanaNodeInfo() = default;
void ToJsonObject(rapidjson::Value &value, rapidjson::Document::AllocatorType& allocator) const;
CoreInternalOutcome Deserialize(const rapidjson::Value &value);
/**
* 获取Kibana node specification
* @return KibanaNodeType Kibana node specification
*/
std::string GetKibanaNodeType() const;
/**
* 设置Kibana node specification
* @param KibanaNodeType Kibana node specification
*/
void SetKibanaNodeType(const std::string& _kibanaNodeType);
/**
* 判断参数 KibanaNodeType 是否已赋值
* @return KibanaNodeType 是否已赋值
*/
bool KibanaNodeTypeHasBeenSet() const;
/**
* 获取Number of Kibana nodes
* @return KibanaNodeNum Number of Kibana nodes
*/
uint64_t GetKibanaNodeNum() const;
/**
* 设置Number of Kibana nodes
* @param KibanaNodeNum Number of Kibana nodes
*/
void SetKibanaNodeNum(const uint64_t& _kibanaNodeNum);
/**
* 判断参数 KibanaNodeNum 是否已赋值
* @return KibanaNodeNum 是否已赋值
*/
bool KibanaNodeNumHasBeenSet() const;
/**
* 获取Number of Kibana node's CPUs
* @return KibanaNodeCpuNum Number of Kibana node's CPUs
*/
uint64_t GetKibanaNodeCpuNum() const;
/**
* 设置Number of Kibana node's CPUs
* @param KibanaNodeCpuNum Number of Kibana node's CPUs
*/
void SetKibanaNodeCpuNum(const uint64_t& _kibanaNodeCpuNum);
/**
* 判断参数 KibanaNodeCpuNum 是否已赋值
* @return KibanaNodeCpuNum 是否已赋值
*/
bool KibanaNodeCpuNumHasBeenSet() const;
/**
* 获取Kibana node's memory in GB
* @return KibanaNodeMemSize Kibana node's memory in GB
*/
uint64_t GetKibanaNodeMemSize() const;
/**
* 设置Kibana node's memory in GB
* @param KibanaNodeMemSize Kibana node's memory in GB
*/
void SetKibanaNodeMemSize(const uint64_t& _kibanaNodeMemSize);
/**
* 判断参数 KibanaNodeMemSize 是否已赋值
* @return KibanaNodeMemSize 是否已赋值
*/
bool KibanaNodeMemSizeHasBeenSet() const;
/**
* 获取Kibana node's disk type
* @return KibanaNodeDiskType Kibana node's disk type
*/
std::string GetKibanaNodeDiskType() const;
/**
* 设置Kibana node's disk type
* @param KibanaNodeDiskType Kibana node's disk type
*/
void SetKibanaNodeDiskType(const std::string& _kibanaNodeDiskType);
/**
* 判断参数 KibanaNodeDiskType 是否已赋值
* @return KibanaNodeDiskType 是否已赋值
*/
bool KibanaNodeDiskTypeHasBeenSet() const;
/**
* 获取Kibana node's disk size
* @return KibanaNodeDiskSize Kibana node's disk size
*/
uint64_t GetKibanaNodeDiskSize() const;
/**
* 设置Kibana node's disk size
* @param KibanaNodeDiskSize Kibana node's disk size
*/
void SetKibanaNodeDiskSize(const uint64_t& _kibanaNodeDiskSize);
/**
* 判断参数 KibanaNodeDiskSize 是否已赋值
* @return KibanaNodeDiskSize 是否已赋值
*/
bool KibanaNodeDiskSizeHasBeenSet() const;
private:
/**
* Kibana node specification
*/
std::string m_kibanaNodeType;
bool m_kibanaNodeTypeHasBeenSet;
/**
* Number of Kibana nodes
*/
uint64_t m_kibanaNodeNum;
bool m_kibanaNodeNumHasBeenSet;
/**
* Number of Kibana node's CPUs
*/
uint64_t m_kibanaNodeCpuNum;
bool m_kibanaNodeCpuNumHasBeenSet;
/**
* Kibana node's memory in GB
*/
uint64_t m_kibanaNodeMemSize;
bool m_kibanaNodeMemSizeHasBeenSet;
/**
* Kibana node's disk type
*/
std::string m_kibanaNodeDiskType;
bool m_kibanaNodeDiskTypeHasBeenSet;
/**
* Kibana node's disk size
*/
uint64_t m_kibanaNodeDiskSize;
bool m_kibanaNodeDiskSizeHasBeenSet;
};
}
}
}
}
#endif // !TENCENTCLOUD_ES_V20180416_MODEL_KIBANANODEINFO_H_
| 35.618812 | 116 | 0.479222 | [
"vector",
"model"
] |
0d8f545a64eb5abd750942574f07be3bc7c9e459 | 598 | h | C | sound.h | DMPRO2021-Audio/mcu | 435e2a197e0a9e55e527ed9fab7d0e77ade071e3 | [
"MIT"
] | null | null | null | sound.h | DMPRO2021-Audio/mcu | 435e2a197e0a9e55e527ed9fab7d0e77ade071e3 | [
"MIT"
] | 4 | 2021-09-28T13:14:05.000Z | 2021-11-16T17:24:15.000Z | sound.h | DMPRO2021-Audio/mcu | 435e2a197e0a9e55e527ed9fab7d0e77ade071e3 | [
"MIT"
] | null | null | null | #ifndef SOUNDH
#define SOUNDH
#include <stdint.h>
#include "synth.h"
#define CONCERT_PITCH 442
#define CONCERT_PITCH_NOTE 69
#define OCTAVE 12
#define REVERB_PRESET_LEN 4
#define REVERB_PRESET_OFF 0
#define REVERB_PRESET_SMALL_ROOM 1
#define REVERB_PRESET_LARGE_ROOM 2
#define REVERB_PRESET_HALL 3
extern const Reverb reverb_presets[REVERB_PRESET_LEN];
typedef struct {
uint8_t shape;
const EnvelopeStep *press_envelope;
const EnvelopeStep *release_envelope;
} Program;
extern const Program programs[];
extern const int num_programs;
uint32_t freq_from_note(float note);
#endif
| 19.290323 | 54 | 0.802676 | [
"shape"
] |
0db69e37a7e5b6fb3dabbdeecb617835b07cbc37 | 1,199 | h | C | ios/chrome/browser/ui/settings/cells/byo_textfield_item.h | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | ios/chrome/browser/ui/settings/cells/byo_textfield_item.h | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | ios/chrome/browser/ui/settings/cells/byo_textfield_item.h | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef IOS_CHROME_BROWSER_UI_SETTINGS_CELLS_BYO_TEXTFIELD_ITEM_H_
#define IOS_CHROME_BROWSER_UI_SETTINGS_CELLS_BYO_TEXTFIELD_ITEM_H_
#import <UIKit/UIKit.h>
#import "ios/chrome/browser/ui/table_view/cells/table_view_item.h"
// Bring-your-own-text-field: Item that hosts a text field provided by the user
// of this class.
// Useful for adding a text field to a table view, where the view may be
// scrolled out of view and back in. By using this object, even if the table
// view cell containing the text field is cleared and reused, the user-entered
// content remains unchanged by using the same text field. Not recommended for
// large models, as the text field is not reused.
@interface BYOTextFieldItem : TableViewItem
// The text field that will be installed in the cell during -configureCell:.
@property(nonatomic, strong) UITextField* textField;
@end
// Cell class associated to BYOTextFieldItem.
@interface BYOTextFieldCell : TableViewCell
@end
#endif // IOS_CHROME_BROWSER_UI_SETTINGS_CELLS_BYO_TEXTFIELD_ITEM_H_
| 38.677419 | 79 | 0.794829 | [
"object"
] |
0dbbe35e404f9c2020980da6f831a61d077ee693 | 7,087 | h | C | SourceCode/include/Math/Matrix4X4f.h | LalaChen/SDEngine | ae5931308ae8b02f4237a1e26ef8448f773f9b7a | [
"MIT"
] | null | null | null | SourceCode/include/Math/Matrix4X4f.h | LalaChen/SDEngine | ae5931308ae8b02f4237a1e26ef8448f773f9b7a | [
"MIT"
] | null | null | null | SourceCode/include/Math/Matrix4X4f.h | LalaChen/SDEngine | ae5931308ae8b02f4237a1e26ef8448f773f9b7a | [
"MIT"
] | null | null | null | /*
MIT License
Copyright (c) 2019 Kuan-Chih, Chen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/*! \file Matrix4X4f.h
* \brief 4X4 matrix.
* \author Kuan-Chih, Chen
* \date 2019/04/18
* \copyright MIT License.
*/
#pragma once
#include <string>
#include <glm/glm.hpp>
#include <glm/gtx/matrix_decompose.hpp>
#include "SDEngineMacro.h"
_______________SD_START_MATH_NAMESPACE_______________
class Quaternion;
class Vector3f;
/*! \class Matrix4X4f
* Class Matrix4X4f is used to represent the transformation of objects in 3D space. Please note that
* the Matrix4X4f is column-major. (glm::mat4)
*/
class SDENGINE_CLASS Matrix4X4f
{
public:
/*! \fn static bool decompose(const Matrix4X4f &i_mat, Vector3f &io_scale, Quaternion &io_rot, Vector3f &io_skew, Vector3f &io_translation);
* \param [in] i_mat target matrix.
* \param [inout] io_scale the scale factor decomposed from matrix.
* \param [inout] io_rot the rotation decomposed from matrix.
* \param [inout] io_skew the sharing decomposed from matrix.
* \param [inout] io_translation the io_translation decomposed from matrix.
* \param [inout] io_prespective the io_prespective decomposed from matrix.
* \brief decompose matrix to transformation.
*/
static bool decompose(const Matrix4X4f &i_mat, Vector3f &io_scale, Quaternion &io_rot, Vector3f &io_skew, Vector3f &io_translation, Vector3f &io_prespective);
public:
/*! \fn Matrix4X4f();
* \brief Default constructor. Initialize matrix by identity.
*/
Matrix4X4f();
/*! \fn Matrix4X4f(float i_datas[]);
* \param [in] i_data.
* \brief array constructor.
*/
Matrix4X4f(float i_datas[]);
/*! \fn Matrix4X4f(const Matrix4X4f &i_src);
* \param [in] i_src target matrix
* \brief Copy constructor.
*/
Matrix4X4f(const Matrix4X4f &i_src);
/*! \fn Matrix4X4f(const glm::mat &i_src);
* \param [in] i_src target matrix
* \brief Copy constructor.
*/
Matrix4X4f(const glm::mat4 &i_src);
/*! \fn Matrix4X4f();
* \brief Destructor.
*/
~Matrix4X4f();
public: //operator
/*! \fn Matrix4X4f& operator=(const Matrix4X4f &i_src);
* \param [in] i_src target matrix.
* \brief Assign operator.
*/
Matrix4X4f& operator=(const Matrix4X4f &i_src);
/*! \fn Matrix4X4f& operator*=(const Matrix4X4f &i_src);
* \param [in] i_src target matrix.
* \brief *= operator.
*/
Matrix4X4f& operator*=(const Matrix4X4f &i_src);
/*! \fn Matrix4X4f operator*(const Matrix4X4f &i_src) const;
* \param [in] i_src target matrix.
* \brief * operator.
*/
Matrix4X4f operator*(const Matrix4X4f &i_src) const;
/*! \fn Vector3f operator*(const Vector3f &i_src) const;
* \param [in] i_src target position
* \brief * operator for transforming the i_src.
*/
Vector3f operator*(const Vector3f &i_src) const;
/*! \fn Matrix4X4f inverse() const;
* \brief return inverse matrix.
*/
Matrix4X4f inverse() const;
/*! \fn Matrix4X4f transpose() const;
* \brief return transpose matrix.
*/
Matrix4X4f transpose() const;
//--------------- World Space Using --------------------
/*! \fn void translate(const Vector3f &i_trans);
* \param [in] i_trans set translation.
* \brief translate matrix.
*/
void translate(const Vector3f &i_trans);
/*! \fn void rotate(const Quaternion &i_rotate);
* \param [in] i_rotate set rotate matrix.
* \brief rotate matrix.
*/
void rotate(const Quaternion &i_rotate);
/*! \fn void scale(float i_scale);
* \param [in] i_scale set scale factor.
* \brief translate matrix.
*/
void scale(float i_scale);
/*! \fn void scale(const Vector3f &i_scale);
* \param [in] i_scale set scale factor for (x,y,z).
* \brief scale matrix.
*/
void scale(const Vector3f &i_scale);
//----------------- View Space Using ---------------------
/*! \fn void lookAt(const Vector3f &i_eye, const Vector3f &i_view_center, const Vector3f &i_up);
* \param [in] i_eye eye position.
* \param [in] i_view_center set view center.
* \param [in] i_up view up.
* \brief set view matrix.
*/
void lookAt(const Vector3f &i_eye, const Vector3f &i_view_center, const Vector3f &i_up);
//--------------- Project Space Using --------------------
/*! \fn void perspective(float i_fovy, float i_aspect, float i_near, float i_far);
* \param [in] i_fovy set angle.
* \param [in] i_aspect set ratio between w and h.
* \param [in] i_near near distance.
* \param [in] i_far far distance.
* \brief set perspective projection matrix.
*/
void perspective(float i_fovy, float i_aspect, float i_near, float i_far);
/*! \fn void ortho(float i_left, float i_right, float i_bottom, float i_top, float i_near, float i_far);
* \param [in] i_left x left.
* \param [in] i_right x right.
* \param [in] i_bottom set y buttom.
* \param [in] i_top y top.
* \param [in] i_near set z near.
* \param [in] i_far set z far.
* \brief set orthogonal projection matrix.
*/
void ortho(float i_left, float i_right, float i_bottom, float i_top, float i_near, float i_far);
public:
/*! \fn std::string ToString() const;
* \brief Return matrix.
*/
std::string ToString() const;
/*! \fn std::string ToFormatString(const std::string &i_mat_name, const std::string &i_prefix) const;
* \param [in] i_mat_name Name about matrix.
* \param [in] i_prefix Line about matrix.
* \brief Return matrix.
*/
std::string ToFormatString(const std::string &i_mat_name, const std::string &i_prefix) const;
/*! \fn const float* GetDataAddr() const;
* \brief return raw pointer.
*/
const float* GetDataAddr() const;
public:
/*! \var glm::mat4 m_matrix;
* \brief matrix data.
*/
glm::mat4 m_matrix;
};
________________SD_END_MATH_NAMESPACE________________ | 34.570732 | 162 | 0.658812 | [
"3d"
] |
0dcfb0318d3aecef6024fa88a28e9885fc6ce84b | 1,291 | h | C | src/universe/vehicle/wire/Port.h | Ghimli/new-ospgl | 31bd84e52d954683671211ff16ce8702bdb87312 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | src/universe/vehicle/wire/Port.h | Ghimli/new-ospgl | 31bd84e52d954683671211ff16ce8702bdb87312 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | src/universe/vehicle/wire/Port.h | Ghimli/new-ospgl | 31bd84e52d954683671211ff16ce8702bdb87312 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | #pragma once
#include <stdint.h>
#include <string>
#include "../../../util/Logger.h"
#include <sol.hpp>
class Machine;
class Port;
struct PortValue
{
enum Type
{
NUMBER
};
union
{
double as_number;
};
Type type;
static std::string get_name(Type type);
static Type get_type(const std::string& str);
void call_lua(sol::safe_function& func, Port* port);
PortValue(double v);
};
struct PortResult
{
enum ResultType
{
GOOD,
INVALID_TYPE,
PORT_BLOCKED,
PORT_NOT_FOUND
};
ResultType result;
};
class Port
{
public:
// On both, on output ports it's used for blocking writing,
// on receiving ports it's used for handling written to ports
// (Zero-latency input)
bool blocked;
// This is lua's way to differentiate ports,
// so they need to be unique. This is enforced
// on port creation
// (Port names must be unique inside of a part outputs
// or inputs, you can have two ports named "x"" on the same
// vehicle, and you can even have two ports named "x" on the same
// part, but they cannot be both input or output
std::string name;
bool is_output;
// Only on input ports
sol::safe_function callback;
// Only on output ports
std::vector<Port*> to;
PortValue::Type type;
Machine* in_machine;
void receive(PortValue& val);
};
| 15.938272 | 67 | 0.690163 | [
"vector"
] |
0dd6f40a608c7f2826506e1ed925729a59742ea7 | 55,663 | h | C | ThorLabsLTS/Thorlabs.MotionControl.Benchtop.Piezo.h | ag6520/GEECS-Device-Drivers | d83928b2591f5b674a256a0d27d27c04ccd31b08 | [
"BSD-3-Clause-LBNL"
] | 1 | 2020-05-19T22:00:31.000Z | 2020-05-19T22:00:31.000Z | ThorLabsLTS/Thorlabs.MotionControl.Benchtop.Piezo.h | ag6520/GEECS-Device-Drivers | d83928b2591f5b674a256a0d27d27c04ccd31b08 | [
"BSD-3-Clause-LBNL"
] | null | null | null | ThorLabsLTS/Thorlabs.MotionControl.Benchtop.Piezo.h | ag6520/GEECS-Device-Drivers | d83928b2591f5b674a256a0d27d27c04ccd31b08 | [
"BSD-3-Clause-LBNL"
] | null | null | null | // summary: Declares the functions class
// The following ifdef block is the standard way of creating macros which make exporting
// from a DLL simpler. All files within this DLL are compiled with the BENCHPIEZO_EXPORTS
// symbol defined on the command line. This symbol should not be defined on any project
// that uses this DLL. This way any other project whose source files include this file see
// BENCHPIEZO_API functions as being imported from a DLL, whereas this DLL sees symbols
// defined with this macro as being exported.
#pragma once
#ifdef BENCHPIEZODLL_EXPORTS
#define BENCHPIEZO_API __declspec(dllexport)
#else
#define BENCHPIEZO_API __declspec(dllimport)
#endif
#include <OaIdl.h>
/** @defgroup BenchtopPiezo Benchtop Piezo
* This section details the Structures and Functions relavent to the @ref BPC103_page "Benchtop Piezo"<br />
* For an example of how to connect to the device and perform simple operations use the following links:
* <list type=bullet>
* <item> \ref namespaces_bpc_ex_1 "Example of using the Thorlabs.MotionControl.Benchtop.Piezo.DLL from a C or C++ project."<br />
* This requires the DLL to be dynamical linked. </item>
* <item> \ref namespaces_bpc_ex_2 "Example of using the Thorlabs.MotionControl.Benchtop.Piezo.DLL from a C# project"<br />
* This uses Marshalling to load and access the C DLL. </item>
* </list>
* The Thorlabs.MotionControl.Benchtop.Piezo.DLL requires the following DLLs
* <list type=bullet>
* <item> Thorlabs.MotionControl.DeviceManager. </item>
* </list>
* @{
*/
extern "C"
{
/// \cond NOT_MASTER
/// <summary> Values that represent FT_Status. </summary>
typedef enum FT_Status : short
{
FT_OK = 0x00, /// <OK - no error.
FT_InvalidHandle = 0x01, ///<Invalid handle.
FT_DeviceNotFound = 0x02, ///<Device not found.
FT_DeviceNotOpened = 0x03, ///<Device not opened.
FT_IOError = 0x04, ///<I/O error.
FT_InsufficientResources = 0x05, ///<Insufficient resources.
FT_InvalidParameter = 0x06, ///<Invalid parameter.
FT_DeviceNotPresent = 0x07, ///<Device not present.
FT_IncorrectDevice = 0x08 ///<Incorrect device.
} FT_Status;
/// <summary> Values that represent THORLABSDEVICE_API. </summary>
typedef enum MOT_MotorTypes
{
MOT_NotMotor = 0,
MOT_DCMotor = 1,
MOT_StepperMotor = 2,
MOT_BrushlessMotor = 3,
MOT_CustomMotor = 100,
} MOT_MotorTypes;
/// \endcond
/// <summary> Information about the device generated from serial number. </summary>
#pragma pack(1)
typedef struct TLI_DeviceInfo
{
/// <summary> The device Type ID, see \ref C_DEVICEID_page "Device serial numbers". </summary>
DWORD typeID;
/// <summary> The device description. </summary>
char description[65];
/// <summary> The device serial number. </summary>
char serialNo[9];
/// <summary> The USB PID number. </summary>
DWORD PID;
/// <summary> <c>true</c> if this object is a type known to the Motion Control software. </summary>
bool isKnownType;
/// <summary> The motor type (if a motor).
/// <list type=table>
/// <item><term>MOT_NotMotor</term><term>0</term></item>
/// <item><term>MOT_DCMotor</term><term>1</term></item>
/// <item><term>MOT_StepperMotor</term><term>2</term></item>
/// <item><term>MOT_BrushlessMotor</term><term>3</term></item>
/// <item><term>MOT_CustomMotor</term><term>100</term></item>
/// </list> </summary>
MOT_MotorTypes motorType;
/// <summary> <c>true</c> if the device is a piezo device. </summary>
bool isPiezoDevice;
/// <summary> <c>true</c> if the device is a laser. </summary>
bool isLaser;
/// <summary> <c>true</c> if the device is a custom type. </summary>
bool isCustomType;
/// <summary> <c>true</c> if the device is a rack. </summary>
bool isRack;
/// <summary> Defines the number of channels available in this device. </summary>
short maxChannels;
} TLI_DeviceInfo;
/// <summary> Structure containing the Hardware Information. </summary>
/// <value> Hardware Information retrieved from tthe device. </value>
typedef struct TLI_HardwareInformation
{
/// <summary> The device serial number. </summary>
/// <remarks> The device serial number is a serial number,<br />starting with 2 digits representing the device type<br /> and a 6 digit unique value.</remarks>
DWORD serialNumber;
/// <summary> The device model number. </summary>
/// <remarks> The model number uniquely identifies the device type as a string. </remarks>
char modelNumber[8];
/// <summary> The device type. </summary>
/// <remarks> Each device type has a unique Type ID: see \ref C_DEVICEID_page "Device serial numbers" </remarks>
WORD type;
/// <summary> The number of channels the device provides. </summary>
short numChannels;
/// <summary> The device notes read from the device. </summary>
char notes[48];
/// <summary> The device firmware version. </summary>
DWORD firmwareVersion;
/// <summary> The device hardware version. </summary>
WORD hardwareVersion;
/// <summary> The device dependant data. </summary>
BYTE deviceDependantData[12];
/// <summary> The device modification state. </summary>
WORD modificationState;
} TLI_HardwareInformation;
/// <summary> The Piezo Control Modes. </summary>
/// \ingroup Common
typedef enum PZ_ControlModeTypes : short
{
PZ_Undefined = 0, ///<Undefined
PZ_OpenLoop = 1, ///<Open Loop mode.
PZ_CloseLoop = 2, ///<Closed Loop mode.
PZ_OpenLoopSmooth = 3, ///<Open Loop mode with smoothing.
PZ_CloseLoopSmooth = 4 ///<Closed Loop mode with smoothing.
} PZ_ControlModeTypes;
/// <summary> The Piezo Input Source Flags. </summary>
/// \ingroup Common
typedef enum PZ_InputSourceFlags : short
{
PZ_SoftwareOnly = 0, ///<Only read input from software.
PZ_ExternalSignal = 0x01, ///<Read input from software and External Signal.
PZ_Potentiometer = 0x02, ///<Read input from software and Potentiometer.
PZ_All = PZ_ExternalSignal | PZ_Potentiometer ///<Read input from all sources.
} PZ_InputSourceFlags;
/// <summary>The Piezo Output LUT Operating Flags. </summary>
/// \ingroup Common
typedef enum PZ_OutputLUTModes : short
{
PZ_Continuous = 0x01, ///<LUT waveform output continuously.
PZ_Fixed = 0x02, ///<LUT waveform output for a Fixed number of cycles.
PZ_OutputTrigEnable = 0x04, ///<Enable Output Triggering.
PZ_InputTrigEnable = 0x08, ///<Enable Input triggering.
PZ_OutputTrigSenseHigh = 0x10, ///<Output trigger sense is high.
PZ_InputTrigSenseHigh = 0x20, ///<Input trigger sense is high.
PZ_OutputGated = 0x40, ///<Output is gated.
PZ_OutputTrigRepeat = 0x80, ///<Output trigger repeats.
} PZ_OutputLUTModes;
/// <summary> Structure containing feedback loop constants. </summary>
typedef struct PZ_FeedbackLoopConstants
{
/// <summary> The proportional term. </summary>
short proportionalTerm;
/// <summary> The integral term. </summary>
short integralTerm;
} PZ_FeedbackLoopConstants;
/// <summary> Structure containing LUT output wave parameters. </summary>
typedef struct PZ_LUTWaveParameters
{
/// <summary> Specifies the LUT (waveform) output mode (continuous or fixed) </summary>
PZ_OutputLUTModes mode;
/// <summary> Specifies the number of LUT values to output for a single waveform cycle (0 to 7999) </summary>
short cycleLength;
/// <summary> Specifies the number of waveform cycles to output. </summary>
unsigned int numCycles;
/// <summary> Specifies the delay in milliseconds that the system waits after setting each LUT
/// output value. </summary>
unsigned int LUTValueDelay;
/// <summary> The delay time before the system clocks out the LUT values. </summary>
unsigned int preCycleDelay;
/// <summary> The delay time after the system clocks out the LUT values. </summary>
unsigned int postCycleDelay;
/// <summary> Zero-based index at which the output trigger is fired.<br />
/// Requires Triggering is enabled. </summary>
short outTriggerStart;
/// <summary> Duration of the output trigger in milliseconds.<br />
/// Requires Triggering is enabled. </summary>
unsigned int outTriggerDuration;
/// <summary> The output trigger repeat interval.<br />
/// Requires Repeat Triggering is enabled </summary>
short numOutTriggerRepeat;
} PZ_LUTWaveParameters;
#pragma pack()
/// <summary> Build the DeviceList. </summary>
/// <remarks> This function builds an internal collection of all devices found on the USB that are not currently open. <br />
/// NOTE, if a device is open, it will not appear in the list until the device has been closed. </remarks>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// \include CodeSnippet_identification.cpp
/// <seealso cref="TLI_GetDeviceListSize()" />
/// <seealso cref="TLI_GetDeviceList(SAFEARRAY** stringsReceiver)" />
/// <seealso cref="TLI_GetDeviceListByType(SAFEARRAY** stringsReceiver, int typeID)" />
/// <seealso cref="TLI_GetDeviceListByTypes(SAFEARRAY** stringsReceiver, int * typeIDs, int length)" />
/// <seealso cref="TLI_GetDeviceListExt(char *receiveBuffer, DWORD sizeOfBuffer)" />
/// <seealso cref="TLI_GetDeviceListByTypeExt(char *receiveBuffer, DWORD sizeOfBuffer, int typeID)" />
/// <seealso cref="TLI_GetDeviceListByTypesExt(char *receiveBuffer, DWORD sizeOfBuffer, int * typeIDs, int length)" />
BENCHPIEZO_API short __cdecl TLI_BuildDeviceList(void);
/// <summary> Gets the device list size. </summary>
/// \include CodeSnippet_identification.cpp
/// <returns> Number of devices in device list. </returns>
/// <seealso cref="TLI_BuildDeviceList()" />
/// <seealso cref="TLI_GetDeviceList(SAFEARRAY** stringsReceiver)" />
/// <seealso cref="TLI_GetDeviceListByType(SAFEARRAY** stringsReceiver, int typeID)" />
/// <seealso cref="TLI_GetDeviceListByTypes(SAFEARRAY** stringsReceiver, int * typeIDs, int length)" />
/// <seealso cref="TLI_GetDeviceListExt(char *receiveBuffer, DWORD sizeOfBuffer)" />
/// <seealso cref="TLI_GetDeviceListByTypeExt(char *receiveBuffer, DWORD sizeOfBuffer, int typeID)" />
/// <seealso cref="TLI_GetDeviceListByTypesExt(char *receiveBuffer, DWORD sizeOfBuffer, int * typeIDs, int length)" />
BENCHPIEZO_API short __cdecl TLI_GetDeviceListSize();
/// <summary> Get the entire contents of the device list. </summary>
/// <param name="stringsReceiver"> Outputs a SAFEARRAY of strings holding device serial numbers. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// \include CodeSnippet_identification.cpp
/// <seealso cref="TLI_GetDeviceListSize()" />
/// <seealso cref="TLI_BuildDeviceList()" />
/// <seealso cref="TLI_GetDeviceListByType(SAFEARRAY** stringsReceiver, int typeID)" />
/// <seealso cref="TLI_GetDeviceListByTypes(SAFEARRAY** stringsReceiver, int * typeIDs, int length)" />
/// <seealso cref="TLI_GetDeviceListExt(char *receiveBuffer, DWORD sizeOfBuffer)" />
/// <seealso cref="TLI_GetDeviceListByTypeExt(char *receiveBuffer, DWORD sizeOfBuffer, int typeID)" />
/// <seealso cref="TLI_GetDeviceListByTypesExt(char *receiveBuffer, DWORD sizeOfBuffer, int * typeIDs, int length)" />
BENCHPIEZO_API short __cdecl TLI_GetDeviceList(SAFEARRAY** stringsReceiver);
/// <summary> Get the contents of the device list which match the supplied typeID. </summary>
/// <param name="stringsReceiver"> Outputs a SAFEARRAY of strings holding device serial numbers. </param>
/// <param name="typeID"> The typeID of devices to match. </param>
/// <param name="typeID">The typeID of devices to match, see \ref C_DEVICEID_page "Device serial numbers". </param>
/// \include CodeSnippet_identification.cpp
/// <seealso cref="TLI_GetDeviceListSize()" />
/// <seealso cref="TLI_BuildDeviceList()" />
/// <seealso cref="TLI_GetDeviceList(SAFEARRAY** stringsReceiver)" />
/// <seealso cref="TLI_GetDeviceListByTypes(SAFEARRAY** stringsReceiver, int * typeIDs, int length)" />
/// <seealso cref="TLI_GetDeviceListExt(char *receiveBuffer, DWORD sizeOfBuffer)" />
/// <seealso cref="TLI_GetDeviceListByTypeExt(char *receiveBuffer, DWORD sizeOfBuffer, int typeID)" />
/// <seealso cref="TLI_GetDeviceListByTypesExt(char *receiveBuffer, DWORD sizeOfBuffer, int * typeIDs, int length)" />
BENCHPIEZO_API short __cdecl TLI_GetDeviceListByType(SAFEARRAY** stringsReceiver, int typeID);
/// <summary> Get the contents of the device list which match the supplied typeIDs. </summary>
/// <param name="stringsReceiver"> Outputs a SAFEARRAY of strings holding device serial numbers. </param>
/// <param name="typeIDs"> list of typeIDs of devices to be matched, see \ref C_DEVICEID_page "Device serial numbers"</param>
/// <param name="length"> length of type list</param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// \include CodeSnippet_identification.cpp
/// <seealso cref="TLI_GetDeviceListSize()" />
/// <seealso cref="TLI_BuildDeviceList()" />
/// <seealso cref="TLI_GetDeviceList(SAFEARRAY** stringsReceiver)" />
/// <seealso cref="TLI_GetDeviceListByType(SAFEARRAY** stringsReceiver, int typeID)" />
/// <seealso cref="TLI_GetDeviceListExt(char *receiveBuffer, DWORD sizeOfBuffer)" />
/// <seealso cref="TLI_GetDeviceListByTypeExt(char *receiveBuffer, DWORD sizeOfBuffer, int typeID)" />
/// <seealso cref="TLI_GetDeviceListByTypesExt(char *receiveBuffer, DWORD sizeOfBuffer, int * typeIDs, int length)" />
BENCHPIEZO_API short __cdecl TLI_GetDeviceListByTypes(SAFEARRAY** stringsReceiver, int * typeIDs, int length);
/// <summary> Get the entire contents of the device list. </summary>
/// <param name="receiveBuffer"> a buffer in which to receive the list as a comma separated string. </param>
/// <param name="sizeOfBuffer"> The size of the output string buffer. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// \include CodeSnippet_identification.cpp
/// <seealso cref="TLI_GetDeviceListSize()" />
/// <seealso cref="TLI_BuildDeviceList()" />
/// <seealso cref="TLI_GetDeviceList(SAFEARRAY** stringsReceiver)" />
/// <seealso cref="TLI_GetDeviceListByType(SAFEARRAY** stringsReceiver, int typeID)" />
/// <seealso cref="TLI_GetDeviceListByTypes(SAFEARRAY** stringsReceiver, int * typeIDs, int length)" />
/// <seealso cref="TLI_GetDeviceListByTypeExt(char *receiveBuffer, DWORD sizeOfBuffer, int typeID)" />
/// <seealso cref="TLI_GetDeviceListByTypesExt(char *receiveBuffer, DWORD sizeOfBuffer, int * typeIDs, int length)" />
BENCHPIEZO_API short __cdecl TLI_GetDeviceListExt(char *receiveBuffer, DWORD sizeOfBuffer);
/// <summary> Get the contents of the device list which match the supplied typeID. </summary>
/// <param name="receiveBuffer"> a buffer in which to receive the list as a comma separated string. </param>
/// <param name="sizeOfBuffer"> The size of the output string buffer. </param>
/// <param name="typeID"> The typeID of devices to be matched, see \ref C_DEVICEID_page "Device serial numbers"</param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// \include CodeSnippet_identification.cpp
/// <seealso cref="TLI_GetDeviceListSize()" />
/// <seealso cref="TLI_BuildDeviceList()" />
/// <seealso cref="TLI_GetDeviceList(SAFEARRAY** stringsReceiver)" />
/// <seealso cref="TLI_GetDeviceListByType(SAFEARRAY** stringsReceiver, int typeID)" />
/// <seealso cref="TLI_GetDeviceListByTypes(SAFEARRAY** stringsReceiver, int * typeIDs, int length)" />
/// <seealso cref="TLI_GetDeviceListExt(char *receiveBuffer, DWORD sizeOfBuffer)" />
/// <seealso cref="TLI_GetDeviceListByTypesExt(char *receiveBuffer, DWORD sizeOfBuffer, int * typeIDs, int length)" />
BENCHPIEZO_API short __cdecl TLI_GetDeviceListByTypeExt(char *receiveBuffer, DWORD sizeOfBuffer, int typeID);
/// <summary> Get the contents of the device list which match the supplied typeIDs. </summary>
/// <param name="receiveBuffer"> a buffer in which to receive the list as a comma separated string. </param>
/// <param name="sizeOfBuffer"> The size of the output string buffer. </param>
/// <param name="typeIDs"> list of typeIDs of devices to be matched, see \ref C_DEVICEID_page "Device serial numbers"</param>
/// <param name="length"> length of type list</param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// \include CodeSnippet_identification.cpp
/// <seealso cref="TLI_GetDeviceListSize()" />
/// <seealso cref="TLI_BuildDeviceList()" />
/// <seealso cref="TLI_GetDeviceList(SAFEARRAY** stringsReceiver)" />
/// <seealso cref="TLI_GetDeviceListByType(SAFEARRAY** stringsReceiver, int typeID)" />
/// <seealso cref="TLI_GetDeviceListByTypes(SAFEARRAY** stringsReceiver, int * typeIDs, int length)" />
/// <seealso cref="TLI_GetDeviceListExt(char *receiveBuffer, DWORD sizeOfBuffer)" />
/// <seealso cref="TLI_GetDeviceListByTypeExt(char *receiveBuffer, DWORD sizeOfBuffer, int typeID)" />
BENCHPIEZO_API short __cdecl TLI_GetDeviceListByTypesExt(char *receiveBuffer, DWORD sizeOfBuffer, int * typeIDs, int length);
/// <summary> Get the device information from the USB port. </summary>
/// <remarks> The Device Info is read from the USB port not from the device itself.<remarks>
/// <param name="serialNo"> The serial number of the device. </param>
/// <param name="info"> The <see cref="TLI_DeviceInfo"/> device information. </param>
/// <returns> 1 if successful, 0 if not. </returns>
/// \include CodeSnippet_identification.cpp
/// <seealso cref="TLI_GetDeviceListSize()" />
/// <seealso cref="TLI_BuildDeviceList()" />
/// <seealso cref="TLI_GetDeviceList(SAFEARRAY** stringsReceiver)" />
/// <seealso cref="TLI_GetDeviceListByType(SAFEARRAY** stringsReceiver, int typeID)" />
/// <seealso cref="TLI_GetDeviceListByTypes(SAFEARRAY** stringsReceiver, int * typeIDs, int length)" />
/// <seealso cref="TLI_GetDeviceListExt(char *receiveBuffer, DWORD sizeOfBuffer)" />
/// <seealso cref="TLI_GetDeviceListByTypeExt(char *receiveBuffer, DWORD sizeOfBuffer, int typeID)" />
/// <seealso cref="TLI_GetDeviceListByTypesExt(char *receiveBuffer, DWORD sizeOfBuffer, int * typeIDs, int length)" />
BENCHPIEZO_API short _cdecl TLI_GetDeviceInfo(char const * serialNo, TLI_DeviceInfo *info);
/// <summary> Open the device for communications. </summary>
/// <param name="serialNo"> The serial no of the controller to be connected. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// \include CodeSnippet_connectionN.cpp
/// <seealso cref="PBC_Close(char const * serialNo)" />
BENCHPIEZO_API short __cdecl PBC_Open(char const * serialNo);
/// <summary> Disconnect and close the device. </summary>
/// <param name="serialNo"> The serial no of the controller to be disconnected. </param>
/// \include CodeSnippet_connectionN.cpp
/// <seealso cref="PBC_Open(char const * serialNo)" />
BENCHPIEZO_API void __cdecl PBC_Close(char const * serialNo);
/// <summary> Sends a command to the device to make it identify iteself. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
BENCHPIEZO_API void __cdecl PBC_Identify(char const * serialNo, short channel);
/// <summary> Tells the device that it is being disconnected. </summary>
/// <remarks> This does not disconnect the communications.<br />
/// To disconnect the communications, call the <see cref="PBC_Close(char const * serialNo)" /> function. </remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
BENCHPIEZO_API short __cdecl PBC_Disconnect(char const * serialNo);
/// <summary> Gets the number of channels available to this device. </summary>
/// <remarks> This function returns ther number of available bays, not the number of bays filled.</remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <returns> The number of channels available on this device. </returns>
/// \include CodeSnippet_connectionN.cpp
BENCHPIEZO_API int __cdecl PBC_MaxChannelCount(char const * serialNo);
/// <summary> Verifies that the specified channel is valid. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The requested channel (1 to n). </param>
/// <returns> <c>true</c> if the channel is valid. </returns>
/// \include CodeSnippet_connectionN.cpp
BENCHPIEZO_API bool __cdecl PBC_IsChannelValid(char const * serialNo, short channel);
/// <summary> Gets the hardware information from the device. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="modelNo"> Address of a buffer to receive the model number string. Minimum 8 characters </param>
/// <param name="sizeOfModelNo"> The size of the model number buffer, minimum of 8 characters. </param>
/// <param name="type"> Address of a WORD to receive the hardware type number. </param>
/// <param name="numChannels"> Address of a short to receive the number of channels. </param>
/// <param name="notes"> Address of a buffer to receive the notes describing the device. </param>
/// <param name="sizeOfNotes"> The size of the notes buffer, minimum of 48 characters. </param>
/// <param name="firmwareVersion"> Address of a DWORD to receive the firmware version number made up of 4 byte parts. </param>
/// <param name="hardwareVersion"> Address of a WORD to receive the hardware version number. </param>
/// <param name="modificationState"> Address of a WORD to receive the hardware modification state number. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// \include CodeSnippet_identify.cpp
BENCHPIEZO_API short __cdecl PBC_GetHardwareInfo(char const * serialNo, short channel, char * modelNo, DWORD sizeOfModelNo, WORD * type, WORD * numChannels,
char * notes, DWORD sizeOfNotes, DWORD * firmwareVersion, WORD * hardwareVersion, WORD * modificationState);
/// <summary> Gets the hardware information in a block. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="hardwareInfo"> Address of a TLI_HardwareInformation structure to receive the hardware information. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// \include CodeSnippet_identify.cpp
BENCHPIEZO_API short __cdecl PBC_GetHardwareInfoBlock(char const * serialNo, short channel, TLI_HardwareInformation *hardwareInfo);
/// <summary> Gets the number of channels in the device. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <returns> The number of channels. </returns>
BENCHPIEZO_API short __cdecl PBC_GetNumChannels(char const * serialNo);
/// <summary> Gets version number of the device firmware. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <returns> The device firmware version number made up of 4 byte parts. </returns>
/// \include CodeSnippet_identify.cpp
BENCHPIEZO_API DWORD __cdecl PBC_GetFirmwareVersion(char const * serialNo);
/// <summary> Gets version number of the device software. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <returns> The device software version number made up of 4 byte parts. </returns>
/// \include CodeSnippet_identify.cpp
BENCHPIEZO_API DWORD __cdecl PBC_GetSoftwareVersion(char const * serialNo);
/// <summary> Update device with stored settings. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> <c>true</c> if successful, false if not. </returns>
BENCHPIEZO_API bool __cdecl PBC_LoadSettings(char const * serialNo, short channel);
/// <summary> Disable the channel so that motor can be moved by hand. </summary>
/// <remarks> When disabled power is removed from the actuator.</remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// <seealso cref="PBC_EnableChannel(char const * serialNo, short channel)" />
BENCHPIEZO_API short __cdecl PBC_DisableChannel(char const * serialNo, short channel);
/// <summary> Enable channel for computer control. </summary>
/// <remarks> When enabled power is applied to the actuator so it is fixed in position.</remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// <seealso cref="PBC_DisableChannel(char const * serialNo, short channel)" />
BENCHPIEZO_API short __cdecl PBC_EnableChannel(char const * serialNo, short channel);
/// <summary> Registers a callback on the message queue. </summary>
/// <remarks> see \ref C_MESSAGES_page "Device Messages" for details on how to use messages. </remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="functionPointer"> A function pointer to be called whenever messages are received. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
BENCHPIEZO_API short __cdecl PBC_RegisterMessageCallback(char const * serialNo, short channel, void (* functionPointer)());
/// <summary> Gets the MessageQueue size. </summary>
/// <remarks> see \ref C_MESSAGES_page "Device Messages" for details on how to use messages. </remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> number of messages in the queue. </returns>
BENCHPIEZO_API int __cdecl PBC_MessageQueueSize(char const * serialNo, short channel);
/// <summary> Clears the device message queue. </summary>
/// <remarks> see \ref C_MESSAGES_page "Device Messages" for details on how to use messages. </remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
BENCHPIEZO_API short __cdecl PBC_ClearMessageQueue(char const * serialNo, short channel);
/// <summary> Get the next MessageQueue item if it is available. </summary>
/// <remarks> see \ref C_MESSAGES_page "Device Messages" for details on how to use messages. </remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="messageType"> Address of the WORD to receive the message type. </param>
/// <param name="messageID"> Address of the WORD to receive themessage ID. </param>
/// <param name="messageData"> Address of the DWORD to receive the messageData. </param>
/// <returns> <c>true</c> if successful, false if not. </returns>
BENCHPIEZO_API bool __cdecl PBC_GetNextMessage(char const * serialNo, short channel, WORD * messageType, WORD * messageID, DWORD *messageData);
/// <summary> Get the next MessageQueue item if it is available. </summary>
/// <remarks> see \ref C_MESSAGES_page "Device Messages" for details on how to use messages. </remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="messageType"> Address of the WORD to receive the message type. </param>
/// <param name="messageID"> Address of the WORD to receive themessage ID. </param>
/// <param name="messageData"> Address of the DWORD to receive the messageData. </param>
/// <returns> <c>true</c> if successful, false if not. </returns>
BENCHPIEZO_API bool __cdecl PBC_WaitForMessage(char const * serialNo, short channel, WORD * messageType, WORD * messageID, DWORD *messageData);
/// <summary> Starts the internal polling loop which continuously requests position and status. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="milliseconds"> The milliseconds polling rate. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> <c>true</c> if successful, false if not. </returns>
/// <seealso cref="PBC_StopPolling(char const * serialNo, short channel)" />
/// <seealso cref="PBC_PollingDuration(char const * serialNo, short channel)" />
/// <seealso cref="PBC_RequestStatusBits(char const * serialNo, short channel)" />
/// <seealso cref="PBC_RequestPosition(char const * serialNo, short channel)" />
/// \include CodeSnippet_connectionN.cpp
BENCHPIEZO_API bool __cdecl PBC_StartPolling(char const * serialNo, short channel, int milliseconds);
/// <summary> Gets the polling loop duration. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The time between polls in milliseconds or 0 if polling is not active. </returns>
/// <seealso cref="PBC_StartPolling(char const * serialNo, short channel, int milliseconds)" />
/// <seealso cref="PBC_StopPolling(char const * serialNo, short channel)" />
/// \include CodeSnippet_connectionN.cpp
BENCHPIEZO_API long __cdecl PBC_PollingDuration(char const * serialNo, short channel);
/// <summary> Stops the internal polling loop. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <seealso cref="PBC_StartPolling(char const * serialNo, short channel, int milliseconds)" />
/// <seealso cref="PBC_PollingDuration(char const * serialNo, short channel)" />
/// \include CodeSnippet_connectionN.cpp
BENCHPIEZO_API void __cdecl PBC_StopPolling(char const * serialNo, short channel);
/// <summary> Gets the time in milliseconds since tha last message was received from the device. </summary>
/// <remarks> This can be used to determine whether communications with the device is still good</remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="lastUpdateTimeMS"> The time since the last message was received in milliseconds. </param>
/// <returns> True if monitoring is enabled otherwize False. </returns>
/// <seealso cref="PBC_EnableLastMsgTimer(char const * serialNo, short channel, bool enable, __int32 lastMsgTimeout )" />
/// <seealso cref="PBC_HasLastMsgTimerOverrun(char const * serialNo, short channel)" />
/// \include CodeSnippet_connectionMonitoring.cpp
BENCHPIEZO_API bool __cdecl PBC_TimeSinceLastMsgReceived(char const * serialNo, short channel, __int64 &lastUpdateTimeMS );
/// <summary> Enables the last message monitoring timer. </summary>
/// <remarks> This can be used to determine whether communications with the device is still good</remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="enable"> True to enable monitoring otherwise False to disable. </param>
/// <param name="lastMsgTimeout"> The last message error timeout in ms. 0 to disable. </param>
/// <seealso cref="PBC_TimeSinceLastMsgReceived(char const * serialNo, short channel, __int64 &lastUpdateTimeMS )" />
/// <seealso cref="PBC_HasLastMsgTimerOverrun(char const * serialNo, short channel)" />
/// \include CodeSnippet_connectionMonitoring.cpp
BENCHPIEZO_API void __cdecl PBC_EnableLastMsgTimer(char const * serialNo, short channel, bool enable, __int32 lastMsgTimeout );
/// <summary> Queries if the time since the last message has exceeded the lastMsgTimeout set by <see cref="PBC_EnableLastMsgTimer(char const * serialNo, bool enable, __int32 lastMsgTimeout )"/>. </summary>
/// <remarks> This can be used to determine whether communications with the device is still good</remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> True if last message timer has elapsed, False if monitoring is not enabled or if time of last message received is less than lastMsgTimeout. </returns>
/// <seealso cref="PBC_TimeSinceLastMsgReceived(char const * serialNo, short channel, __int64 &lastUpdateTimeMS )" />
/// <seealso cref="PBC_EnableLastMsgTimer(char const * serialNo, short channel, bool enable, __int32 lastMsgTimeout )" />
/// \include CodeSnippet_connectionMonitoring.cpp
BENCHPIEZO_API bool __cdecl PBC_HasLastMsgTimerOverrun(char const * serialNo, short channel);
/// <summary> Requests that all settings are download from device. </summary>
/// <remarks> This function requests that the device upload all it's settings to the DLL.</remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successfully requested. </returns>
BENCHPIEZO_API short __cdecl PBC_RequestSettings(char const * serialNo, short channel);
/// <summary> Requests the status bits and position. </summary>
/// <remarks> This needs to be called to get the device to send it's current position and status bits.<br />
/// NOTE this is called automatically if Polling is enabled for the device using <see cref="PBC_StartPolling(char const * serialNo, short channel, int milliseconds)" />. </remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successfully requested. </returns>
/// <seealso cref="PBC_RequestStatusBits(char const * serialNo, short channel)" />
/// <seealso cref="PBC_GetPosition(char const * serialNo, short channel)" />
/// <seealso cref="PBC_GetStatusBits(char const * serialNo, short channel)" />
/// <seealso cref="PBC_StartPolling(char const * serialNo, short channel, int milliseconds)" />
BENCHPIEZO_API short __cdecl PBC_RequestStatus(char const * serialNo, short channel);
/// <summary> Request the status bits which identify the current device state. </summary>
/// <remarks> This needs to be called to get the device to send it's current status bits.<br />
/// NOTE this is called automatically if Polling is enabled for the device using <see cref="PBC_StartPolling(char const * serialNo, short channel, int milliseconds)" />. </remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successfully requested. </returns>
/// <seealso cref="PBC_GetStatusBits(char const * serialNo, short channel)" />
/// <seealso cref="PBC_StartPolling(char const * serialNo, short channel, int milliseconds)" />
BENCHPIEZO_API short __cdecl PBC_RequestStatusBits(char const * serialNo, short channel);
/// <summary>Get the current status bits. </summary>
/// <remarks> This returns the latest status bits received from the device.<br />
/// To get new status bits, use <see cref="PBC_RequestStatusBits(char const * serialNo, short channel)" /> <br />
/// or use <see cref="BPC_RequestStatus(char const * serialNo)" />
/// or use the polling functions, <see cref="PBC_StartPolling(char const * serialNo, short channel, int milliseconds)" />.</remarks>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="serialNo"> The controller serial no. </param>
/// <returns> The status bits from the device <list type=table>
/// <item><term>0x00000001</term><term>Piezo actuator connected (1=Connected, 0=Not connected).</term></item>
/// <item><term>0x00000002</term><term>For Future Use.</term></item>
/// <item><term>0x00000004</term><term>For Future Use.</term></item>
/// <item><term>0x00000008</term><term>For Future Use.</term></item>
/// <item><term>0x00000010</term><term>Piezo channel has been zeroed (1=Zeroed, 0=Not zeroed).</term></item>
/// <item><term>0x00000020</term><term>Piezo channel is zeroing (1=Zeroing, 0=Not zeroing).</term></item>
/// <item><term>0x00000040</term><term>For Future Use.</term></item>
/// <item><term>0x00000080</term><term>For Future Use.</term></item>
/// <item><term>0x00000100</term><term>Strain gauge feedback connected (1=Connected, 0=Not connected).</term></item>
/// <item><term>0x00000200</term><term>For Future Use.</term></item>
/// <item><term>0x00000400</term><term>Position control mode (1=Closed loop, 0=Open loop).</term></item>
/// <item><term>0x00000800</term><term>For Future Use.</term></item>
/// <item><term>0x00001000</term><term></term></item>
/// <item><term>...</term><term></term></item>
/// <item><term>0x00080000</term><term></term></item>
/// <item><term>0x00100000</term><term>Digital input 1 state (1=Logic High, 0=Logic Low).</term></item>
/// <item><term>0x00200000</term><term>Digital input 2 state (1=Logic High, 0=Logic Low).</term></item>
/// <item><term>0x00400000</term><term>Digital input 3 state (1=Logic High, 0=Logic Low).</term></item>
/// <item><term>0x00800000</term><term>Digital input 4 state (1=Logic High, 0=Logic Low).</term></item>
/// <item><term>0x01000000</term><term>Digital input 5 state (1=Logic High, 0=Logic Low).</term></item>
/// <item><term>0x02000000</term><term>Digital input 6 state (1=Logic High, 0=Logic Low).</term></item>
/// <item><term>0x04000000</term><term>Digital input 7 state (1=Logic High, 0=Logic Low).</term></item>
/// <item><term>0x08000000</term><term>Digital input 8 state (1=Logic High, 0=Logic Low).</term></item>
/// <item><term>0x10000000</term><term>For Future Use.</term></item>
/// <item><term>0x20000000</term><term>Active (1=Indicates Unit Is Active, 0=Not Active).</term></item>
/// <item><term>0x40000000</term><term>For Future Use.</term></item>
/// <item><term>0x80000000</term><term>Channel enabled (1=Enabled, 0=Disabled).</term></item>
/// </list> <remarks> Bits 21 to 28 (Digital Input States) are only applicable if the associated digital input is fitted to your controller - see the relevant handbook for more details. </remarks> </returns>
/// <seealso cref="PBC_RequestStatusBits(char const * serialNo, short channel)" />
/// <seealso cref="PBC_RequestStatus(char const * serialNo)" />
/// <seealso cref="PBC_StartPolling(char const * serialNo, short channel, int milliseconds)" />
BENCHPIEZO_API DWORD __cdecl PBC_GetStatusBits(char const * serialNo, short channel);
/// <summary> Requests the current output voltage or position depending on current mode. </summary>
/// <remarks> This needs to be called to get the device to send it's current position.<br />
/// NOTE this is called automatically if Polling is enabled for the device using <see cref="PBC_StartPolling(char const * serialNo, short channel, int milliseconds)" />. </remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successfully requested. </returns>
/// <seealso cref="PBC_GetPosition(char const * serialNo, short channel)" />
/// <seealso cref="PBC_StartPolling(char const * serialNo, short channel, int milliseconds)" />
BENCHPIEZO_API short __cdecl PBC_RequestPosition(char const * serialNo, short channel);
/// <summary> Resets all parameters to power-up values. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
BENCHPIEZO_API short __cdecl PBC_ResetParameters(char const * serialNo, short channel);
/// <summary> Sets the voltage output to zero and defines the ensuing actuator position az zero. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
BENCHPIEZO_API short __cdecl PBC_SetZero(char const * serialNo, short channel);
/// <summary> Gets the Position Control Mode. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The control mode <list type=table>
/// <item><term>Open Loop</term><term>1</term></item>
/// <item><term>Closed Loop</term><term>2</term></item>
/// <item><term>Open Loop smoothed</term><term>3</term></item>
/// <item><term>Closed Loop smoothed</term><term>4</term></item>
/// </list> </returns>
/// <seealso cref="PBC_SetPositionControlMode(char const * serialNo, short channel, PZ_ControlModeTypes mode)" />
BENCHPIEZO_API PZ_ControlModeTypes __cdecl PBC_GetPositionControlMode(char const * serialNo, short channel);
/// <summary> Sets the Position Control Mode. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="mode"> The control mode <list type=table>
/// <item><term>Open Loop</term><term>1</term></item>
/// <item><term>Closed Loop</term><term>2</term></item>
/// <item><term>Open Loop smoothed</term><term>3</term></item>
/// <item><term>Closed Loop smoothed</term><term>4</term></item>
/// </list>. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// <seealso cref="PBC_GetPositionControlMode(char const * serialNo, short channel)" />
BENCHPIEZO_API short __cdecl PBC_SetPositionControlMode(char const * serialNo, short channel, PZ_ControlModeTypes mode);
// Voltage Functions
/// <summary> Gets the maximum output voltage. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The maximum output voltage, 750, 1000 or 1500 (75.0, 100.0, 150.0). </returns>
/// <seealso cref="PBC_SetMaxOutputVoltage(char const * serialNo, short channel, short maxVoltage)" />
BENCHPIEZO_API short __cdecl PBC_GetMaxOutputVoltage(char const * serialNo, short channel);
/// <summary> Sets the maximum output voltage. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="maxVoltage"> The maximum output voltage, 750, 1000 or 1500 (75.0, 100.0, 150.0). </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// <seealso cref="PBC_GetMaxOutputVoltage(char const * serialNo, short channel)" />
BENCHPIEZO_API short __cdecl PBC_SetMaxOutputVoltage(char const * serialNo, short channel, short maxVoltage);
/// <summary> Gets the set Output Voltage. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The voltage as a percentage of MaxOutputVoltage,<br />
/// range -32767 to 32767 equivalent to -100% to 100%. </returns>
/// <seealso cref="PBC_SetOutputVoltage(char const * serialNo, short channel, short volts)" />
/// <seealso cref="PBC_SetMaxOutputVoltage(char const * serialNo, short channel, short eVoltage)" />
/// <seealso cref="PBC_GetMaxOutputVoltage(char const * serialNo, short channel)" />
BENCHPIEZO_API short __cdecl PBC_GetOutputVoltage(char const * serialNo, short channel);
/// <summary> Sets the output voltage. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="volts"> The voltage as a percentage of MaxOutputVoltage,<br />
/// range -32767 to 32767 equivalent to -100% to 100%. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// <seealso cref="PBC_GetOutputVoltage(char const * serialNo, short channel)" />
/// <seealso cref="PBC_SetMaxOutputVoltage(char const * serialNo, short channel, short eVoltage)" />
/// <seealso cref="PBC_GetMaxOutputVoltage(char const * serialNo, short channel)" />
BENCHPIEZO_API short __cdecl PBC_SetOutputVoltage(char const * serialNo, short channel, short volts);
/// <summary> Gets the control voltage source. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The voltage source. <list type=table>
/// <item><term>Software Only</term><term>0</term></item>
/// <item><term>Software and External</term><term>1</term></item>
/// <item><term>Software and Potentiometer</term><term>2</term></item>
/// <item><term>Software, External and Potentiometer</term><term>3</term></item>
/// </list> </returns>
/// <seealso cref="PBC_SetVoltageSource(char const * serialNo, short channel, PZ_InputSourceFlags source)" />
BENCHPIEZO_API PZ_InputSourceFlags __cdecl PBC_GetVoltageSource(char const * serialNo, short channel);
/// <summary> Sets the control voltage source. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="source"> The voltage source <list type=table>
/// <item><term>Software Only</term><term>0</term></item>
/// <item><term>Software and External</term><term>1</term></item>
/// <item><term>Software and Potentiometer</term><term>2</term></item>
/// <item><term>Software, External and Potentiometer</term><term>3</term></item>
/// </list> </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// <seealso cref="PBC_GetVoltageSource(char const * serialNo, short channel)" />
BENCHPIEZO_API short __cdecl PBC_SetVoltageSource(char const * serialNo, short channel, PZ_InputSourceFlags source);
/// <summary> Gets the maximum travel of the device. </summary>
/// <remarks> This requires an actuator with built in position sensing</remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The distance in steps of 100nm,<br />
/// range 0 to 65535 (10000 is equivalent to 1mm). </returns>
BENCHPIEZO_API WORD __cdecl PBC_GetMaximumTravel(char const * serialNo, short channel);
/// <summary> Gets the position when in closed loop mode. </summary>
/// <remarks> The result is undefined if not in closed loop mode</remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The position as a percentage of maximum travel,<br />
/// range -32767 to 32767, equivalent to -100 to 100%. </returns>
/// <seealso cref="PBC_SetPosition(char const * serialNo, short channel, short position)" />
/// <seealso cref="PBC_SetPositionControlMode(char const * serialNo, short channel, PZ_ControlModeTypes mode)" />
/// <seealso cref="PBC_GetPositionControlMode(char const * serialNo, short channel)" />
BENCHPIEZO_API short __cdecl PBC_GetPosition(char const * serialNo, short channel);
/// <summary> Sets the position when in closed loop mode. </summary>
/// <remarks> The command is ignored if not in closed loop mode</remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="position"> The position as a percentage of maximum travel,<br />
/// range 0 to 32767, equivalent to 0 to 100%. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// <seealso cref="PBC_GetPosition(char const * serialNo, short channel)" />
/// <seealso cref="PBC_SetPositionControlMode(char const * serialNo, short channel, PZ_ControlModeTypes mode)" />
/// <seealso cref="PBC_GetPositionControlMode(char const * serialNo, short channel)" />
BENCHPIEZO_API short __cdecl PBC_SetPosition(char const * serialNo, short channel, short position);
/// <summary> Gets the feedback loop parameters. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="proportionalTerm"> The address of the parameter to receive the proportional parameter. </param>
/// <param name="integralTerm"> The address of the parameter to receive the integral parameter. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// <seealso cref="PBC_SetFeedbackLoopPIconsts(char const * serialNo, short channel, short proportionalTerm, short integralTerm)" />
/// <seealso cref="PBC_GetFeedbackLoopPIconstsBlock(const char * serialNo, short channel, PZ_FeedbackLoopConstants *proportionalAndIntegralConstants)" />
/// <seealso cref="PBC_SetFeedbackLoopPIconstsBlock(const char * serialNo, short channel, PZ_FeedbackLoopConstants *proportionalAndIntegralConstants)" />
BENCHPIEZO_API short __cdecl PBC_GetFeedbackLoopPIconsts(char const * serialNo, short channel, short * proportionalTerm, short * integralTerm);
/// <summary> Sets the feedback loop constants. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="proportionalTerm"> The proportional gain term from 0 to 255. </param>
/// <param name="integralTerm"> The integral gain term from 0 to 255. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// <seealso cref="PBC_GetFeedbackLoopPIconsts(char const * serialNo, short channel, short * proportionalTerm, short * integralTerm)" />
/// <seealso cref="PBC_GetFeedbackLoopPIconstsBlock(char const * serialNo, short channel, PZ_FeedbackLoopConstants *proportionalAndIntegralConstants)" />
/// <seealso cref="PBC_SetFeedbackLoopPIconstsBlock(const char * serialNo, short channel, PZ_FeedbackLoopConstants *proportionalAndIntegralConstants)" />
BENCHPIEZO_API short __cdecl PBC_SetFeedbackLoopPIconsts(char const * serialNo, short channel, short proportionalTerm, short integralTerm);
/// <summary> Gets the feedback loop constants in a block. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="proportionalAndIntegralConstants"> The address of the PZ_FeedbackLoopConstants to receive the feedback loop constants. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// <seealso cref="PBC_GetFeedbackLoopPIconsts(char const * serialNo, short channel, short * proportionalTerm, short * integralTerm)" />
/// <seealso cref="PBC_SetFeedbackLoopPIconsts(char const * serialNo, short channel, short proportionalTerm, short integralTerm)" />
/// <seealso cref="PBC_SetFeedbackLoopPIconstsBlock(const char * serialNo, short channel, PZ_FeedbackLoopConstants *proportionalAndIntegralConstants)" />
BENCHPIEZO_API short __cdecl PBC_GetFeedbackLoopPIconstsBlock(const char * serialNo, short channel, PZ_FeedbackLoopConstants *proportionalAndIntegralConstants);
/// <summary> Sets the feedback loop constants in a block. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="proportionalAndIntegralConstants"> The address of the PZ_FeedbackLoopConstants containing the new feedback loop constants. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
/// <seealso cref="PBC_GetFeedbackLoopPIconsts(char const * serialNo, short channel, short * proportionalTerm, short * integralTerm)" />
/// <seealso cref="PBC_SetFeedbackLoopPIconsts(char const * serialNo, short channel, short proportionalTerm, short integralTerm)" />
/// <seealso cref="PBC_GetFeedbackLoopPIconstsBlock(const char * serialNo, short channel, PZ_FeedbackLoopConstants *proportionalAndIntegralConstants)" />
BENCHPIEZO_API short __cdecl PBC_SetFeedbackLoopPIconstsBlock(const char * serialNo, short channel, PZ_FeedbackLoopConstants *proportionalAndIntegralConstants);
/// <summary> Sets the LUT output wave parameters. </summary>
/// <remarks> NOTE, the get function has never been implemented in firmware, so new parameters need to be generated each time.</remarks>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="LUTwaveParams"> Address of the PZ_LUTWaveParameters containing the new LUT wave parameters. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
BENCHPIEZO_API short __cdecl PBC_SetLUTwaveParams(const char * serialNo, short channel, PZ_LUTWaveParameters *LUTwaveParams);
/// <summary> Sets a waveform sample. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <param name="index"> The phase index in waveform where value is to be set. </param>
/// <param name="value"> The voltage or position as a percentage of full scale.<br />
/// range -32767 to 32767, equivalemnt to -100% to 100% of maximum voltage / position. </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
BENCHPIEZO_API short __cdecl PBC_SetLUTwaveSample(char const * serialNo, short channel, short index, WORD value);
/// <summary> Starts the LUT waveform output. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successfully started. </returns>
BENCHPIEZO_API short __cdecl PBC_StartLUTwave(char const * serialNo, short channel);
/// <summary> Stops the LUT waveform output. </summary>
/// <param name="serialNo"> The controller serial no. </param>
/// <param name="channel"> The channel (1 to n). </param>
/// <returns> The error code (see \ref C_DLL_ERRORCODES_page "Error Codes") or zero if successful. </returns>
BENCHPIEZO_API short __cdecl PBC_StopLUTwave(char const * serialNo, short channel);
}
/** @} */ // BenchtopPiezo | 67.799026 | 212 | 0.719742 | [
"object",
"model"
] |
0dd7dad794da43d9488b068c4d661f745c857e54 | 9,158 | h | C | chrome/browser/nearby_sharing/certificates/nearby_share_certificate_manager_impl.h | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575 | 2015-06-18T23:58:20.000Z | 2022-03-23T09:32:39.000Z | chrome/browser/nearby_sharing/certificates/nearby_share_certificate_manager_impl.h | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | chrome/browser/nearby_sharing/certificates/nearby_share_certificate_manager_impl.h | iridium-browser/iridium-browser | 907e31cf5ce5ad14d832796e3a7c11e496828959 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52 | 2015-07-14T10:40:50.000Z | 2022-03-15T01:11:49.000Z | // Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_NEARBY_SHARING_CERTIFICATES_NEARBY_SHARE_CERTIFICATE_MANAGER_IMPL_H_
#define CHROME_BROWSER_NEARBY_SHARING_CERTIFICATES_NEARBY_SHARE_CERTIFICATE_MANAGER_IMPL_H_
#include <memory>
#include <vector>
#include "base/containers/span.h"
#include "base/files/file_path.h"
#include "base/memory/scoped_refptr.h"
#include "base/memory/weak_ptr.h"
#include "base/optional.h"
#include "base/time/clock.h"
#include "base/time/default_clock.h"
#include "base/timer/timer.h"
#include "chrome/browser/nearby_sharing/certificates/nearby_share_certificate_manager.h"
#include "chrome/browser/nearby_sharing/certificates/nearby_share_certificate_storage.h"
#include "chrome/browser/nearby_sharing/certificates/nearby_share_encrypted_metadata_key.h"
#include "chrome/browser/nearby_sharing/certificates/nearby_share_private_certificate.h"
#include "chrome/browser/nearby_sharing/common/nearby_share_http_result.h"
#include "chrome/browser/nearby_sharing/contacts/nearby_share_contact_manager.h"
#include "chrome/browser/nearby_sharing/local_device_data/nearby_share_local_device_data_manager.h"
#include "chrome/browser/nearby_sharing/proto/rpc_resources.pb.h"
#include "chrome/browser/ui/webui/nearby_share/public/mojom/nearby_share_settings.mojom.h"
class NearbyShareClient;
class NearbyShareClientFactory;
class NearbyShareLocalDeviceDataManager;
class NearbyShareScheduler;
class PrefService;
namespace device {
class BluetoothAdapter;
} // namespace device
namespace leveldb_proto {
class ProtoDatabaseProvider;
} // namespace leveldb_proto
namespace nearbyshare {
namespace proto {
class ListPublicCertificatesResponse;
} // namespace proto
} // namespace nearbyshare
// An implementation of the NearbyShareCertificateManager that handles
// 1) creating, storing, and uploading local device certificates, as well as
// removing expired/revoked local device certificates;
// 2) downloading, storing, and decrypting public certificates from trusted
// contacts, as well as removing expired public certificates.
//
// This implementation destroys and recreates all private certificates if there
// are any changes to the user's contact list or allowlist, or if there are any
// changes to the local device data, such as the device name.
class NearbyShareCertificateManagerImpl
: public NearbyShareCertificateManager,
public NearbyShareContactManager::Observer,
public NearbyShareLocalDeviceDataManager::Observer {
public:
class Factory {
public:
static std::unique_ptr<NearbyShareCertificateManager> Create(
NearbyShareLocalDeviceDataManager* local_device_data_manager,
NearbyShareContactManager* contact_manager,
PrefService* pref_service,
leveldb_proto::ProtoDatabaseProvider* proto_database_provider,
const base::FilePath& profile_path,
NearbyShareClientFactory* client_factory,
const base::Clock* clock = base::DefaultClock::GetInstance());
static void SetFactoryForTesting(Factory* test_factory);
protected:
virtual ~Factory();
virtual std::unique_ptr<NearbyShareCertificateManager> CreateInstance(
NearbyShareLocalDeviceDataManager* local_device_data_manager,
NearbyShareContactManager* contact_manager,
PrefService* pref_service,
leveldb_proto::ProtoDatabaseProvider* proto_database_provider,
const base::FilePath& profile_path,
NearbyShareClientFactory* client_factory,
const base::Clock* clock) = 0;
private:
static Factory* test_factory_;
};
~NearbyShareCertificateManagerImpl() override;
private:
NearbyShareCertificateManagerImpl(
NearbyShareLocalDeviceDataManager* local_device_data_manager,
NearbyShareContactManager* contact_manager,
PrefService* pref_service,
leveldb_proto::ProtoDatabaseProvider* proto_database_provider,
const base::FilePath& profile_path,
NearbyShareClientFactory* client_factory,
const base::Clock* clock);
// NearbyShareCertificateManager:
std::vector<nearbyshare::proto::PublicCertificate>
GetPrivateCertificatesAsPublicCertificates(
nearby_share::mojom::Visibility visibility) override;
void GetDecryptedPublicCertificate(
NearbyShareEncryptedMetadataKey encrypted_metadata_key,
CertDecryptedCallback callback) override;
void DownloadPublicCertificates() override;
void OnStart() override;
void OnStop() override;
base::Optional<NearbySharePrivateCertificate> GetValidPrivateCertificate(
nearby_share::mojom::Visibility visibility) const override;
void UpdatePrivateCertificateInStorage(
const NearbySharePrivateCertificate& private_certificate) override;
// NearbyShareContactManager::Observer:
void OnContactsDownloaded(
const std::set<std::string>& allowed_contact_ids,
const std::vector<nearbyshare::proto::ContactRecord>& contacts,
uint32_t num_unreachable_contacts_filtered_out) override;
void OnContactsUploaded(bool did_contacts_change_since_last_upload) override;
// NearbyShareLocalDeviceDataManager::Observer:
void OnLocalDeviceDataChanged(bool did_device_name_change,
bool did_full_name_change,
bool did_icon_url_change) override;
// Used by the private certificate expiration scheduler to determine the next
// private certificate expiration time. Returns base::Time::Min() if
// certificates are missing. This function never returns base::nullopt.
base::Optional<base::Time> NextPrivateCertificateExpirationTime();
// Used by the public certificate expiration scheduler to determine the next
// public certificate expiration time. Returns base::nullopt if no public
// certificates are present, and no expiration event is scheduled.
base::Optional<base::Time> NextPublicCertificateExpirationTime();
// Invoked by the private certificate expiration scheduler when an expired
// private certificate needs to be removed or if no private certificates exist
// yet. New certificate(s) will be created, and an upload to the Nearby Share
// server will be requested.
void OnPrivateCertificateExpiration();
void FinishPrivateCertificateRefresh(
scoped_refptr<device::BluetoothAdapter> bluetooth_adapter);
// Invoked by the certificate upload scheduler when private certificates need
// to be converted to public certificates and uploaded to the Nearby Share
// server.
void OnLocalDeviceCertificateUploadRequest();
void OnLocalDeviceCertificateUploadFinished(bool success);
// Invoked by the public certificate expiration scheduler when an expired
// public certificate needs to be removed from storage.
void OnPublicCertificateExpiration();
void OnExpiredPublicCertificatesRemoved(bool success);
// Invoked by the certificate download scheduler when the public certificates
// from trusted contacts need to be downloaded from Nearby Share server via
// the ListPublicCertificates RPC.
void OnDownloadPublicCertificatesRequest(
base::Optional<std::string> page_token,
size_t page_number,
size_t certificate_count);
void OnListPublicCertificatesSuccess(
size_t page_number,
size_t certificate_count,
const nearbyshare::proto::ListPublicCertificatesResponse& response);
void OnListPublicCertificatesFailure(size_t page_number,
size_t certificate_count,
NearbyShareHttpError error);
void OnListPublicCertificatesTimeout(size_t page_number,
size_t certificate_count);
void OnPublicCertificatesAddedToStorage(
base::Optional<std::string> page_token,
size_t page_number,
size_t certificate_count,
bool success);
void FinishDownloadPublicCertificates(bool success,
NearbyShareHttpResult http_result,
size_t page_number,
size_t certificate_count);
base::OneShotTimer timer_;
NearbyShareLocalDeviceDataManager* local_device_data_manager_ = nullptr;
NearbyShareContactManager* contact_manager_ = nullptr;
PrefService* pref_service_ = nullptr;
NearbyShareClientFactory* client_factory_ = nullptr;
const base::Clock* clock_;
std::unique_ptr<NearbyShareCertificateStorage> certificate_storage_;
std::unique_ptr<NearbyShareScheduler>
private_certificate_expiration_scheduler_;
std::unique_ptr<NearbyShareScheduler>
public_certificate_expiration_scheduler_;
std::unique_ptr<NearbyShareScheduler>
upload_local_device_certificates_scheduler_;
std::unique_ptr<NearbyShareScheduler> download_public_certificates_scheduler_;
std::unique_ptr<NearbyShareClient> client_;
base::WeakPtrFactory<NearbyShareCertificateManagerImpl> weak_ptr_factory_{
this};
};
#endif // CHROME_BROWSER_NEARBY_SHARING_CERTIFICATES_NEARBY_SHARE_CERTIFICATE_MANAGER_IMPL_H_
| 44.028846 | 99 | 0.775606 | [
"vector"
] |
0dd917bc0fdf9ee4800b516a13c42f26a27a556e | 1,786 | h | C | tf_model.h | wheelswang/tf | 6202fa83879315e5abb97c06ab2d0cd44f981e90 | [
"Apache-2.0"
] | 10 | 2018-09-21T07:28:40.000Z | 2019-08-22T14:16:26.000Z | tf_model.h | wheelswang/tf | 6202fa83879315e5abb97c06ab2d0cd44f981e90 | [
"Apache-2.0"
] | null | null | null | tf_model.h | wheelswang/tf | 6202fa83879315e5abb97c06ab2d0cd44f981e90 | [
"Apache-2.0"
] | 2 | 2019-05-31T08:22:22.000Z | 2019-07-06T02:33:34.000Z | /*
+----------------------------------------------------------------------+
| TF |
+----------------------------------------------------------------------+
| This source file is subject to version 2.0 of the Apache license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.apache.org/licenses/LICENSE-2.0.html |
| If you did not receive a copy of the Apache2.0 license and are unable|
| to obtain it through the world-wide-web, please send a note to |
| wheelswang@gmail.com so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: wheelswang <wheelswang@gmail.com> |
+----------------------------------------------------------------------+
*/
#ifndef TF_MODEL_H
#define TF_MODEL_H
#define TF_MODEL_PROPERTY_NAME_FIELDS "_fields"
#define TF_MODEL_PROPERTY_NAME_DATA "_data"
#define TF_MODEL_VAR_TYPE_INT 1
#define TF_MODEL_VAR_TYPE_DOUBLE 2
#define TF_MODEL_VAR_TYPE_STRING 3
#define TF_MODEL_VAR_TYPE_BOOL 4
#define TF_MODEL_VAR_TYPE_ARRAY 5
#define TF_MODEL_PROPERTY_NAME_VAR_TYPE_INT "INT"
#define TF_MODEL_PROPERTY_NAME_VAR_TYPE_DOUBLE "DOUBLE"
#define TF_MODEL_PROPERTY_NAME_VAR_TYPE_STRING "STRING"
#define TF_MODEL_PROPERTY_NAME_VAR_TYPE_BOOL "BOOL"
#define TF_MODEL_PROPERTY_NAME_VAR_TYPE_ARRAY "ARR"
#define TF_MODEL_DEFAULT_VALUE_CURRENT_TIMESTAMP 1
#define TF_MODEL_PROPERTY_NAME_DEFAULT_VALUE_CURRENT_TIMESTAMP "CURRENT_TIMESTAMP"
zval * tf_model_constructor(zval *model TSRMLS_DC);
#endif | 43.560976 | 83 | 0.583427 | [
"model"
] |
0dd9acc2097949bb97b95c201a6c9105d65e3aa8 | 6,252 | c | C | src/OGLSuperBible/chapter_12/planets/planets.c | apportable/vogl | 4f05918a14ba3c4efc3cbd8b05b6964e625c751b | [
"MIT"
] | 5 | 2016-07-23T00:33:31.000Z | 2020-12-18T09:40:35.000Z | src/OGLSuperBible/chapter_12/planets/planets.c | LunarG/vogl | 172a86d9c4ee08dccbf4e342caa1ba63f1ea2b0e | [
"MIT"
] | null | null | null | src/OGLSuperBible/chapter_12/planets/planets.c | LunarG/vogl | 172a86d9c4ee08dccbf4e342caa1ba63f1ea2b0e | [
"MIT"
] | null | null | null | // Planets.c
// OpenGL SuperBible, 3rd Edition
// Richard S. Wright Jr.
// rwright@starstonesoftware.com
#include "../../common/openglsb.h" // System and OpenGL Stuff
///////////////////////////////
// Define object names
#define SUN 1
#define MERCURY 2
#define VENUS 3
#define EARTH 4
#define MARS 5
///////////////////////////////////////////////////////////
// Just draw a sphere of some given radius
void DrawSphere(float radius)
{
GLUquadricObj *pObj;
pObj = gluNewQuadric();
gluQuadricNormals(pObj, GLU_SMOOTH);
gluSphere(pObj, radius, 26, 13);
gluDeleteQuadric(pObj);
}
///////////////////////////////////////////////////////////
// Called to draw scene
void RenderScene(void)
{
// Clear the window with current clearing color
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Save the matrix state and do the rotations
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
// Translate the whole scene out and into view
glTranslatef(0.0f, 0.0f, -300.0f);
// Initialize the names stack
glInitNames();
glPushName(0);
// Name and draw the Sun
glColor3f(1.0f, 1.0f, 0.0f);
glLoadName(SUN);
DrawSphere(15.0f);
// Draw Mercury
glColor3f(0.5f, 0.0f, 0.0f);
glPushMatrix();
glTranslatef(24.0f, 0.0f, 0.0f);
glLoadName(MERCURY);
DrawSphere(2.0f);
glPopMatrix();
// Draw Venus
glColor3f(0.5f, 0.5f, 1.0f);
glPushMatrix();
glTranslatef(60.0f, 0.0f, 0.0f);
glLoadName(VENUS);
DrawSphere(4.0f);
glPopMatrix();
// Draw the Earth
glColor3f(0.0f, 0.0f, 1.0f);
glPushMatrix();
glTranslatef(100.0f,0.0f,0.0f);
glLoadName(EARTH);
DrawSphere(8.0f);
glPopMatrix();
// Draw Mars
glColor3f(1.0f, 0.0f, 0.0f);
glPushMatrix();
glTranslatef(150.0f, 0.0f, 0.0f);
glLoadName(MARS);
DrawSphere(4.0f);
glPopMatrix();
// Restore the matrix state
glPopMatrix(); // Modelview matrix
glutSwapBuffers();
}
///////////////////////////////////////////////////////////
// Present the information on which planet/sun was selected
// and displayed
void ProcessPlanet(GLuint id)
{
switch(id)
{
case SUN:
glutSetWindowTitle("You clicked on the Sun!");
break;
case MERCURY:
glutSetWindowTitle("You clicked on Mercury!");
break;
case VENUS:
glutSetWindowTitle("You clicked on Venus!");
break;
case EARTH:
glutSetWindowTitle("You clicked on Earth!");
break;
case MARS:
glutSetWindowTitle("You clicked on Mars!");
break;
default:
glutSetWindowTitle("Nothing was clicked on!");
break;
}
}
///////////////////////////////////////////////////////////
// Process the selection, which is triggered by a right mouse
// click at (xPos, yPos).
#define BUFFER_LENGTH 64
void ProcessSelection(int xPos, int yPos)
{
GLfloat fAspect;
// Space for selection buffer
static GLuint selectBuff[BUFFER_LENGTH];
// Hit counter and viewport storage
GLint hits, viewport[4];
// Setup selection buffer
glSelectBuffer(BUFFER_LENGTH, selectBuff);
// Get the viewport
glGetIntegerv(GL_VIEWPORT, viewport);
// Switch to projection and save the matrix
glMatrixMode(GL_PROJECTION);
glPushMatrix();
// Change render mode
glRenderMode(GL_SELECT);
// Establish new clipping volume to be unit cube around
// mouse cursor point (xPos, yPos) and extending two pixels
// in the vertical and horizontal direction
glLoadIdentity();
gluPickMatrix(xPos, viewport[3] - yPos, 2,2, viewport);
// Apply perspective matrix
fAspect = (float)viewport[2] / (float)viewport[3];
gluPerspective(45.0f, fAspect, 1.0, 425.0);
// Draw the scene
RenderScene();
// Collect the hits
hits = glRenderMode(GL_RENDER);
// If a single hit occurred, display the info.
if(hits == 1)
ProcessPlanet(selectBuff[3]);
else
glutSetWindowTitle("Nothing was clicked on!");
// Restore the projection matrix
glMatrixMode(GL_PROJECTION);
glPopMatrix();
// Go back to modelview for normal rendering
glMatrixMode(GL_MODELVIEW);
}
///////////////////////////////////////////////////////////
// Process the mouse click
void MouseCallback(int button, int state, int x, int y)
{
if(button == GLUT_LEFT_BUTTON && state == GLUT_DOWN)
ProcessSelection(x, y);
}
///////////////////////////////////////////////////////////
// This function does any needed initialization on the
// rendering context.
void SetupRC()
{
// Lighting values
GLfloat dimLight[] = { 0.1f, 0.1f, 0.1f, 1.0f };
GLfloat sourceLight[] = { 0.65f, 0.65f, 0.65f, 1.0f };
GLfloat lightPos[] = { 0.0f, 0.0f, 0.0f, 1.0f };
// Light values and coordinates
glEnable(GL_DEPTH_TEST); // Hidden surface removal
glFrontFace(GL_CCW); // Counter clock-wise polygons face out
glEnable(GL_CULL_FACE); // Do not calculate insides
// Enable lighting
glEnable(GL_LIGHTING);
// Setup and enable light 0
glLightfv(GL_LIGHT0, GL_AMBIENT, dimLight);
glLightfv(GL_LIGHT0,GL_DIFFUSE,sourceLight);
glLightfv(GL_LIGHT0,GL_POSITION,lightPos);
glEnable(GL_LIGHT0);
// Enable color tracking
glEnable(GL_COLOR_MATERIAL);
// Set Material properties to follow glColor values
glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE);
// Gray background
glClearColor(0.60f, 0.60f, 0.60f, 1.0f );
}
///////////////////////////////////////////////////////////
// Window changed size, reset viewport and projection
void ChangeSize(int w, int h)
{
GLfloat fAspect;
// Prevent a divide by zero
if(h == 0)
h = 1;
// Set Viewport to window dimensions
glViewport(0, 0, w, h);
// Calculate aspect ratio of the window
fAspect = (GLfloat)w/(GLfloat)h;
// Set the perspective coordinate system
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// Field of view of 45 degrees, near and far planes 1.0 and 425
gluPerspective(45.0f, fAspect, 1.0, 425.0);
// Modelview matrix reset
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
///////////////////////////////////////////////////////////
// Entry point of the program
int main(int argc, char* argv[])
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowSize(800,600);
glutCreateWindow("Pick a Planet");
glutReshapeFunc(ChangeSize);
glutMouseFunc(MouseCallback);
glutDisplayFunc(RenderScene);
SetupRC();
glutMainLoop();
return 0;
}
| 22.408602 | 64 | 0.646993 | [
"render",
"object"
] |
0ddb0226a071d191978893c2d1752652cb4df0e4 | 2,162 | h | C | src/structures/difference_atom.h | Ezibenroc/satsolver | 5f58b8f9502090f05cbc2351304a289530b74f63 | [
"MIT"
] | 1 | 2017-11-29T00:46:23.000Z | 2017-11-29T00:46:23.000Z | src/structures/difference_atom.h | Ezibenroc/satsolver | 5f58b8f9502090f05cbc2351304a289530b74f63 | [
"MIT"
] | null | null | null | src/structures/difference_atom.h | Ezibenroc/satsolver | 5f58b8f9502090f05cbc2351304a289530b74f63 | [
"MIT"
] | null | null | null | #ifndef STRUCTURES_DIFFERENCE_ATOM_H
#define STRUCTURES_DIFFERENCE_ATOM_H
#include <string>
#include "structures/extended_formula.h"
namespace theorysolver {
class DifferenceAtom;
typedef std::vector<std::shared_ptr<DifferenceAtom>> DA_list;
class DifferenceAtom {
public:
enum Operator {
LOWER, GREATER, LEQ, GEQ, EQUAL, UNEQUAL
};
// Determines whether this variable represents an atom in the original extended formula.
static bool is_atom_variable(std::string variable);
// Determines whether this literal represents an atom in the original formula.
static bool is_atom_literal(const std::map<int, std::string> &literal_to_name, unsigned int literal);
// If literal represents an atom in the original formula, return its id.
static unsigned int atom_id_from_literal(const std::map<int, std::string> &literal_to_name, unsigned int literal);
// Return a variable name that can be used for representing the atom.
static std::string variable_name_from_atom_id(unsigned long int atom_id);
static int literal_from_atom_id(const std::map<std::string, int> &name_to_variable, unsigned int atom_id);
// If literal represents an atom in the original formula, return this atom.
static std::shared_ptr<DifferenceAtom> SPDA_from_literal(const DA_list &literal_to_DA, std::map<int, std::string> &literal_to_name, unsigned int literal);
// If literal represents an atom in the original extended formula, return this atom.
static std::shared_ptr<DifferenceAtom> SPDA_from_variable(const DA_list &literal_to_DA, std::string variable);
// Add a DifferenceAtom and return its id.
static long unsigned int add_DA(DA_list &literal_to_DA, unsigned int i, unsigned int j, Operator op, int n);
unsigned int i, j;
Operator op;
int n;
std::shared_ptr<satsolver::ExtendedFormula> canonical;
DifferenceAtom(unsigned int i, enum Operator op, int n);
DifferenceAtom(unsigned int i, unsigned int j, enum Operator op, int n);
std::string to_string() const;
};
}
#endif
| 41.576923 | 162 | 0.716004 | [
"vector"
] |
0ddc60db0f410931516e3353c2f902c444f84ddc | 1,108 | h | C | src/net/Epoller.h | adlternative/adlServer | 8e57ea6605c844d520477ef5a44fce32b4e2a55f | [
"Apache-2.0"
] | 2 | 2021-03-01T16:43:41.000Z | 2021-03-03T16:54:25.000Z | src/net/Epoller.h | adlternative/adlServer | 8e57ea6605c844d520477ef5a44fce32b4e2a55f | [
"Apache-2.0"
] | null | null | null | src/net/Epoller.h | adlternative/adlServer | 8e57ea6605c844d520477ef5a44fce32b4e2a55f | [
"Apache-2.0"
] | null | null | null | #ifndef EPOLLER_H
#define EPOLLER_H
#include "../base/timeStamp.h"
#include "Channel.h"
#include "EventLoop.h"
#include <boost/noncopyable.hpp>
#include <map>
#include <memory>
#include <sys/epoll.h>
#include <vector>
namespace adl {
class Epoller : boost::noncopyable {
public:
typedef std::vector<Channel *> ChannelList;
Epoller(const std::shared_ptr<EventLoop> &loop);
~Epoller();
timeStamp poll(int timeoutMs, ChannelList *activeChannels);
void updateChannel(Channel *channel);
void removeChannel(Channel *channel);
bool hasChannel(Channel *channel) const;
private:
void fillActiveChannels(int numEvents, ChannelList *activeChannels) const;
void update(int operation, Channel *channel);
void assertInLoopThread() const;
typedef std::map<int, Channel *> ChannelMap; /* {fd,Channel} */
ChannelMap channels_; /* {fd,Channel}字典 */
int epollfd_;
using EventList = std::vector<struct epoll_event>;
EventList events_; /* Epoller上的监听列表 */
std::weak_ptr<EventLoop> ownerLoop_; /* 拥有本 Epoller 的EventLoop */
};
} // namespace adl
#endif
| 28.410256 | 76 | 0.707581 | [
"vector"
] |
0ddf3b569618b01c91536c033996fce9e02a3296 | 922 | h | C | part-3/assignment-3/recursion.h | pablocarracci/programming-abstractions | d3acb52664dc339a2fdb7d6859f275fb5fb619e3 | [
"MIT"
] | null | null | null | part-3/assignment-3/recursion.h | pablocarracci/programming-abstractions | d3acb52664dc339a2fdb7d6859f275fb5fb619e3 | [
"MIT"
] | null | null | null | part-3/assignment-3/recursion.h | pablocarracci/programming-abstractions | d3acb52664dc339a2fdb7d6859f275fb5fb619e3 | [
"MIT"
] | null | null | null | #pragma once
/* Needed for warmup.cpp */
int factorial(int n);
double iterativePower(int base, int exp);
double power(int base, int exp);
/* Needed for balanced.cpp */
#include <string>
bool isBalanced(std::string str);
std::string operatorsFrom(std::string str);
bool operatorsAreMatched(std::string ops);
/* Needed for karel.cpp */
int countRoutes(int street, int avenue);
/* Needed for sierpinski.cpp */
#include "gtypes.h"
#include "gwindow.h"
void fillBlackTriangle(GWindow& window, GPoint one, GPoint two, GPoint three);
int drawSierpinskiTriangle(GWindow& window,
GPoint one, GPoint two, GPoint three,
int order);
void runInteractiveDemo();
/* Needed for merge.cpp */
#include "queue.h"
#include "vector.h"
Queue<int> merge(Queue<int> a, Queue<int> b);
Queue<int> multiMerge(Vector<Queue<int>>& all);
Queue<int> recMultiMerge(Vector<Queue<int>>& all);
| 27.117647 | 78 | 0.690889 | [
"vector"
] |
0dfbda246f1a956bb3ee2e33827ef1fb4a30ed49 | 1,153 | h | C | Readme_Engine/ModuleRenderer3D.h | dafral/Readme_Engine | b815e1a29bfdf5e1acc9ff133d406ac9f12085d4 | [
"MIT"
] | null | null | null | Readme_Engine/ModuleRenderer3D.h | dafral/Readme_Engine | b815e1a29bfdf5e1acc9ff133d406ac9f12085d4 | [
"MIT"
] | null | null | null | Readme_Engine/ModuleRenderer3D.h | dafral/Readme_Engine | b815e1a29bfdf5e1acc9ff133d406ac9f12085d4 | [
"MIT"
] | null | null | null | #pragma once
#include "Module.h"
#include "Globals.h"
#include "glmath.h"
#include "Light.h"
#include <vector>
class GameObject;
#define MAX_LIGHTS 8
class ModuleRenderer3D : public Module
{
public:
ModuleRenderer3D(Application* app, bool start_enabled = true);
~ModuleRenderer3D();
bool Init();
update_status PreUpdate(float dt);
update_status PostUpdate(float dt);
bool CleanUp();
void OnResize(int width, int height);
void SetVsync(bool vsync);
bool GetVsync();
float GetDepthRange();
void SetDepthRange(float new_range);
void SwitchBFCulling();
void SwitchDepthTest();
void SwitchColor();
void SwitchTexture();
void SwitchLights();
void AddObjectToDraw(GameObject* go) { objects_to_draw.push_back(go); };
public:
Light lights[MAX_LIGHTS];
SDL_GLContext context;
mat3x3 NormalMatrix;
mat4x4 ModelMatrix, ViewMatrix, ProjectionMatrix;
bool grid = true;
bool wireframe = false;
bool points = false;
bool bf_culling = false;
bool depth = true;
bool color = true;
bool texture = true;
bool light = true;
float p_color[3];
private:
bool vsync;
float depth_range;
std::vector<GameObject*> objects_to_draw;
}; | 18.901639 | 73 | 0.740676 | [
"vector"
] |
df013266e216e9f19760c6ecceb7877b287a1dba | 262 | h | C | src/example/renderer.h | JohnnyonFlame/texture-atlas | a5d53c985ae099492c199b7d09386434ad857dfd | [
"BSD-2-Clause"
] | 1 | 2021-03-31T17:11:21.000Z | 2021-03-31T17:11:21.000Z | src/example/renderer.h | JohnnyonFlame/texture-atlas | a5d53c985ae099492c199b7d09386434ad857dfd | [
"BSD-2-Clause"
] | null | null | null | src/example/renderer.h | JohnnyonFlame/texture-atlas | a5d53c985ae099492c199b7d09386434ad857dfd | [
"BSD-2-Clause"
] | null | null | null | #ifndef __RENDERER_H__
#define __RENDERER_H__
namespace Renderer
{
void Init();
void Render(int w, int h);
int LoadScene(const std::string &path, const std::string &fullpath);
void Destroy();
}; // namespace Renderer
#endif /* __RENDERER_H__ */ | 21.833333 | 72 | 0.694656 | [
"render"
] |
df01342e68e0d7c6ede14078b478709324be642d | 2,031 | h | C | Common/DataModel/vtkNonOverlappingAMR.h | cjh1/VTK | 92d6c65803d0835ae41766f68e4504fd8e9de9c0 | [
"BSD-3-Clause"
] | null | null | null | Common/DataModel/vtkNonOverlappingAMR.h | cjh1/VTK | 92d6c65803d0835ae41766f68e4504fd8e9de9c0 | [
"BSD-3-Clause"
] | null | null | null | Common/DataModel/vtkNonOverlappingAMR.h | cjh1/VTK | 92d6c65803d0835ae41766f68e4504fd8e9de9c0 | [
"BSD-3-Clause"
] | null | null | null | /*=========================================================================
Program: Visualization Toolkit
Module: vtkNonOverlappingAMR.h
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
// .NAME vtkNonOverlappingAMR.h -- Non-Overlapping AMR
//
// .SECTION Description
// A concrete instance of vtkUniformGridAMR to store uniform grids at different
// levels of resolution that do not overlap with each other.
//
// .SECTION See Also
// vtkUniformGridAMR vtkNonOverlappingAMR
#ifndef VTKNONOVERLAPPINGAMR_H_
#define VTKNONOVERLAPPINGAMR_H_
#include "vtkCommonDataModelModule.h" // For export macro
#include "vtkUniformGridAMR.h"
class VTKCOMMONDATAMODEL_EXPORT vtkNonOverlappingAMR : public vtkUniformGridAMR
{
public:
static vtkNonOverlappingAMR* New();
vtkTypeMacro(vtkNonOverlappingAMR,vtkUniformGridAMR);
void PrintSelf(ostream& os, vtkIndent indent);
// Description:
// Returns object type (see vtkType.h for definitions).
virtual int GetDataObjectType() {return VTK_NON_OVERLAPPING_AMR; }
// Description:
// Shallow/Deep & CopyStructure.
virtual void ShallowCopy(vtkDataObject *src)
{this->Superclass::ShallowCopy(src);}
virtual void DeepCopy(vtkDataObject *src)
{this->Superclass::DeepCopy(src);}
virtual void CopyStructure(vtkCompositeDataSet* input)
{this->Superclass::CopyStructure(input);}
protected:
vtkNonOverlappingAMR();
virtual ~vtkNonOverlappingAMR();
private:
vtkNonOverlappingAMR(const vtkNonOverlappingAMR&); // Not implemented
void operator=(const vtkNonOverlappingAMR&); // Not implemented
};
#endif /* VTKNONOVERLAPPINGAMR_H_ */
| 33.85 | 80 | 0.705071 | [
"object"
] |
df0213233c4837c03b8353d40850e2434cf606e7 | 6,153 | h | C | PhysX-3.2.4_PC_SDK_Core/Samples/SampleFramework/renderer/src/d3d11/D3D11RendererVariableManager.h | JayStilla/AIEPhysics | d3e45c1bbe44987a96ed12781ef9781fba06bcfa | [
"MIT"
] | null | null | null | PhysX-3.2.4_PC_SDK_Core/Samples/SampleFramework/renderer/src/d3d11/D3D11RendererVariableManager.h | JayStilla/AIEPhysics | d3e45c1bbe44987a96ed12781ef9781fba06bcfa | [
"MIT"
] | null | null | null | PhysX-3.2.4_PC_SDK_Core/Samples/SampleFramework/renderer/src/d3d11/D3D11RendererVariableManager.h | JayStilla/AIEPhysics | d3e45c1bbe44987a96ed12781ef9781fba06bcfa | [
"MIT"
] | null | null | null | // This code contains NVIDIA Confidential Information and is disclosed to you
// under a form of NVIDIA software license agreement provided separately to you.
//
// Notice
// NVIDIA Corporation and its licensors retain all intellectual property and
// proprietary rights in and to this software and related documentation and
// any modifications thereto. Any use, reproduction, disclosure, or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA Corporation is strictly prohibited.
//
// ALL NVIDIA DESIGN SPECIFICATIONS, CODE ARE PROVIDED "AS IS.". NVIDIA MAKES
// NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO
// THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT,
// MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE.
//
// Information and code furnished is believed to be accurate and reliable.
// However, NVIDIA Corporation assumes no responsibility for the consequences of use of such
// information or for any infringement of patents or other rights of third parties that may
// result from its use. No license is granted by implication or otherwise under any patent
// or patent rights of NVIDIA Corporation. Details are subject to change without notice.
// This code supersedes and replaces all information previously supplied.
// NVIDIA Corporation products are not authorized for use as critical
// components in life support devices or systems without express written approval of
// NVIDIA Corporation.
//
// Copyright (c) 2008-2014 NVIDIA Corporation. All rights reserved.
#ifndef D3D11_RENDERER_VARIABLE_MANAGER_H
#define D3D11_RENDERER_VARIABLE_MANAGER_H
#include <RendererConfig.h>
#if defined(RENDERER_ENABLE_DIRECT3D11)
#include <RendererMaterial.h>
#include "D3D11RendererMaterial.h"
#include "D3D11RendererTraits.h"
#include "D3D11RendererUtils.h"
#include "D3Dcompiler.h"
#include <set>
#include <string>
#include <limits>
// Enable to check that binding a shared variable by name actually
// finds the specified shared variable
#define RENDERER_ASSERT_SHARED_VARIABLE_EXISTS 0
namespace SampleRenderer
{
static const PxU32 NUM_SHADER_TYPES = D3DTypes::NUM_SHADER_TYPES + RendererMaterial::NUM_PASSES;
class D3D11RendererVariableManager
{
public:
enum SharedVariableSize
{
USE_DEFAULT = 0,
};
enum BindMode
{
BIND_MAP = 0,
BIND_SUBRESOURCE
};
public:
typedef std::set<std::string> StringSet;
D3D11RendererVariableManager(D3D11Renderer& renderer, StringSet& cbNames, BindMode bindMode = BIND_SUBRESOURCE);
virtual ~D3D11RendererVariableManager(void);
public:
void bind(const void* pResource, D3DType shaderType, RendererMaterial::Pass pass = RendererMaterial::NUM_PASSES) const;
void setSharedVariable(const char* sharedBufferName, const char* variableName, const void* data, UINT size = USE_DEFAULT, UINT offset = 0);
void loadVariables(D3D11RendererMaterial* pMaterial, ID3DBlob* pShader, D3DType shaderType, RendererMaterial::Pass pass = RendererMaterial::NUM_PASSES);
void loadSharedVariables(const void* pResource, ID3DBlob* pShader, D3DType shaderType, RendererMaterial::Pass pass = RendererMaterial::NUM_PASSES);
void unloadVariables(const void* pResource);
class D3D11ConstantBuffer;
class D3D11DataVariable;
class D3D11TextureVariable;
class D3D11SharedVariable;
typedef std::vector<D3D11ConstantBuffer*> ConstantBuffers;
typedef std::vector<ID3D11Buffer*> D3DBuffers;
typedef std::vector<D3D11SharedVariable*> Variables;
typedef std::vector<D3D11RendererMaterial::Variable*> MaterialVariables;
typedef D3D11StringKey StringKey;
typedef const void* ResourceKey;
typedef PxU32 ShaderTypeKey;
typedef std::pair<StringKey, StringKey> VariableKey;
typedef std::map<StringKey, D3D11ConstantBuffer*> NameBuffersMap;
typedef std::map<VariableKey, D3D11SharedVariable*> NameVariablesMap;
typedef std::map<ResourceKey, ConstantBuffers> ResourceBuffersMap;
typedef ConstantBuffers::const_iterator CBIterator;
private:
D3D11RendererVariableManager& operator=(const D3D11RendererVariableManager&)
{
return *this;
}
D3D11ConstantBuffer* loadBuffer(MaterialVariables& variables,
PxU32& variableBufferSize,
ShaderTypeKey typeKey,
ID3D11ShaderReflectionConstantBuffer* pReflectionBuffer,
const D3D11_SHADER_BUFFER_DESC& sbDesc,
const D3D11_BUFFER_DESC& cbDesc);
D3D11ConstantBuffer* loadSharedBuffer(ShaderTypeKey typeKey,
ID3D11ShaderReflectionConstantBuffer* pReflectionBuffer,
const D3D11_SHADER_BUFFER_DESC& sbDesc,
const D3D11_BUFFER_DESC& cbDesc);
void loadConstantVariables(const void* pResource,
ID3DBlob* pShader,
ShaderTypeKey typeKey,
ID3D11ShaderReflection* pReflection,
MaterialVariables* pVariables = NULL,
PxU32* pVariableBufferSize = NULL);
void loadTextureVariables(D3D11RendererMaterial* pMaterial,
ID3DBlob* pShader,
ShaderTypeKey typeKey,
ID3D11ShaderReflection* pReflection);
void internalSetVariable(D3D11ConstantBuffer* pBuffer, PxU32 offset, const void* data, PxU32 size);
void updateVariables(const ConstantBuffers*) const;
void bindVariables(const ConstantBuffers*, bool bFragment) const;
private:
D3D11Renderer& mRenderer;
StringSet mSharedBufferNames;
BindMode mBindMode;
Variables mVariables;
NameBuffersMap mNameToSharedBuffer;
NameVariablesMap mNameToSharedVariables;
ResourceBuffersMap mResourceToBuffers[NUM_SHADER_TYPES];
};
} // namespace SampleRenderer
#endif // #if defined(RENDERER_ENABLE_DIRECT3D11)
#endif
| 40.215686 | 153 | 0.7242 | [
"vector"
] |
df194bccd4576ed49ace2f476e6b89d87056ba73 | 17,859 | h | C | local_addons/ofxFaceTracker2/libs/dlib/include/dlib/gui_widgets/fonts_abstract.h | yxcde/RTP_MIT_RECODED | 181deb2e3228484fa9d4ed0e6bf3f4a639d99419 | [
"MIT"
] | 2,695 | 2015-01-01T21:13:47.000Z | 2022-03-31T04:45:32.000Z | local_addons/ofxFaceTracker2/libs/dlib/include/dlib/gui_widgets/fonts_abstract.h | yxcde/RTP_MIT_RECODED | 181deb2e3228484fa9d4ed0e6bf3f4a639d99419 | [
"MIT"
] | 208 | 2015-01-23T19:29:07.000Z | 2022-02-08T02:55:17.000Z | local_addons/ofxFaceTracker2/libs/dlib/include/dlib/gui_widgets/fonts_abstract.h | yxcde/RTP_MIT_RECODED | 181deb2e3228484fa9d4ed0e6bf3f4a639d99419 | [
"MIT"
] | 567 | 2015-01-06T19:22:19.000Z | 2022-03-21T17:01:04.000Z | // Copyright (C) 2005 Davis E. King (davis@dlib.net), Nils Labugt, Keita Mochizuki
// License: Boost Software License See LICENSE.txt for the full license.
#undef DLIB_FONTs_ABSTRACT_
#ifdef DLIB_FONTs_ABSTRACT_
#include "../gui_core.h"
#include <string>
#include "../serialize.h"
#include "../unicode.h"
#include <iostream>
namespace dlib
{
// ----------------------------------------------------------------------------------------
class letter
{
/*!
WHAT THIS OBJECT REPRESENTS
This object represents a letter in a font. It tells you the nominal
width of the letter and which pixels form the letter.
THREAD SAFETY
const versions of this object are thread safe but if you are going to
be modifying it then you must serialize access to it.
!*/
public:
struct point
{
/*!
WHAT THIS OBJECT REPRESENTS
This object represents one of the pixels of a letter.
The origin (i.e. (0,0)) of the coordinate plane is at the left
side of the letter's baseline. Also note that y is negative when
above the baseline and positive below (it is zero on the baseline
itself).
The x value is positive going to the right and negative to the left.
The meaning of a negative x value is that any points with a negative
x value will overlap with the preceding letter.
!*/
point (
);
/*!
ensures
- This constructor does nothing. The value of x and y
are undefined after its execution.
!*/
point (
signed char x_,
signed char y_
);
/*!
ensures
- #x == x_
- #y == y_
!*/
signed char x;
signed char y;
};
// ---------------------------------
letter (
);
/*!
ensures
- #width() == 0
- #num_of_points() == 0
!*/
letter (
unsigned short width_,
unsigned short point_count
);
/*!
ensures
- #width() == width_
- #num_of_points() == point_count
!*/
~letter(
);
/*!
ensures
- any resources used by *this have been freed
!*/
const unsigned short width (
) const;
/*!
ensures
- returns the width reserved for this letter in pixels. This is the
number of pixels that are reserved for this letter between adjoining
letters. It isn't necessarily the width of the actual letter itself.
(for example, you can make a letter with a width less than how wide it
actually is so that it overlaps with its neighbor letters.)
!*/
const unsigned short num_of_points (
) const;
/*!
ensures
- returns the number of pixels that make up this letter.
!*/
point& operator[] (
unsigned short i
);
/*!
requires
- i < num_of_points()
ensures
- returns a non-const reference to the ith point in this letter.
!*/
const point& operator[] (
unsigned short i
) const;
/*!
requires
- i < num_of_points()
ensures
- returns a const reference to the ith point in this letter.
!*/
void swap (
letter& item
);
/*!
ensures
- swaps *this with item
!*/
private:
// restricted functions
letter(letter&); // copy constructor
letter& operator=(letter&); // assignment operator
};
inline void swap (
letter& a,
letter& b
) { a.swap(b); }
/*!
provides a global swap
!*/
// ----------------------------------------------------------------------------------------
void serialize (
const letter& item,
std::ostream& out
);
/*!
provides serialization support for letter objects
!*/
void deserialize (
letter& item,
std::istream& in
);
/*!
provides deserialization support for letter objects
!*/
// ----------------------------------------------------------------------------------------
class font
{
/*!
WHAT THIS OBJECT REPRESENTS
This object defines an interface for a font type. It provides metrics
for the font and functions to help you draw strings on a canvas object.
THREAD SAFETY
All the functions in this class are thread safe.
!*/
public:
virtual bool has_character (
unichar ch
)const=0;
/*!
ensures
- if (this font has a glyph for the given character) then
- returns true
- else
- returns false
!*/
bool has_character(char ch) const { return this->has_character(zero_extend_cast<unichar>(ch)); }
bool has_character(wchar_t ch) const { return this->has_character(zero_extend_cast<unichar>(ch)); }
/* Cast char and wchar_t to unichar correctly when char or wchar_t is a signed type */
virtual const letter& operator[] (
unichar ch
)const=0;
/*!
ensures
- if (has_character(ch) == true) then
- returns a letter object that tells you how to draw this character.
- else
- returns some default glyph for characters that aren't in this font.
!*/
const letter& operator[] (char ch) const { return (*this)[zero_extend_cast<unichar>(ch)]; };
const letter& operator[] (wchar_t ch) const { return (*this)[zero_extend_cast<unichar>(ch)]; };
/* Cast char and wchar_t to unichar correctly when char or wchar_t is a signed type */
virtual const unsigned long height (
) const = 0;
/*!
ensures
- returns the height in pixels of the tallest letter in the font
!*/
virtual const unsigned long ascender (
) const = 0;
/*!
ensures
- returns the height() minus the number of pixels below the baseline used
by the letter that hangs the lowest below the baseline.
!*/
virtual const unsigned long left_overflow (
) const = 0;
/*!
ensures
- returns how far outside and to the left of its width a letter
from this font may set pixels. (i.e. how many extra pixels to its
left may a font use)
!*/
virtual const unsigned long right_overflow (
) const = 0;
/*!
ensures
- returns how far outside and to the right of its width a letter
from this font may set pixels. (i.e. how many extra pixels to its
right may a font use)
!*/
template <typename T, typename traits, typename alloc>
void compute_size (
const std::basic_string<T,traits,alloc>& str,
unsigned long& width,
unsigned long& height,
typename std::basic_string<T,traits,alloc>::size_type first = 0,
typename std::basic_string<T,traits,alloc>::size_type last = std::basic_string<T,traits,alloc>::npos
) const;
/*!
requires
- if (last != std::basic_string<T,traits,alloc>::npos) then
- first <= last
- last < str.size()
ensures
- all characters in str with an index < first are ignored by this
function.
- if (last != std::basic_string<T,traits,alloc>::npos) then
- all characters in str with an index > last are ignored by
this function.
- if (str.size() == 0) then
- #width == 0
- #height == 0
- else
- #width == sum of the widths of the characters in the widest
line in str + left_overflow() + right_overflow().
- #height == (count(str.begin(),str.end(),'\n')+1)*height()
!*/
template <typename T, typename traits, typename alloc, typename pixel_type>
void draw_string (
const canvas& c,
const rectangle& rect,
const std::basic_string<T,traits,alloc>& str,
const pixel_type& color = rgb_pixel(0,0,0),
typename std::basic_string<T,traits,alloc>::size_type first = 0,
typename std::basic_string<T,traits,alloc>::size_type last = std::basic_string<T,traits,alloc>::npos,
const rectangle area = rectangle(-infinity,-infinity,infinity,infinity)
) const;
/*!
requires
- if (last != std::basic_string<T,traits,alloc>::npos) then
- first <= last
- last < str.size()
ensures
- all characters in str with an index < first are ignored by this
function.
- if (last != std::basic_string<T,traits,alloc>::npos) then
- all characters in str with an index > last are ignored by
this function.
- if (str.size() == 0) then
- does nothing
- else
- draws str on the given canvas at the position defined by rect.
Also uses the given pixel colors for the font color.
- If the string is too big to fit in rect then the right and
bottom sides of it will be clipped to make it fit.
- only the part of the string that is contained inside the area
rectangle will be drawn
!*/
template <typename T, typename traits, typename alloc>
const rectangle compute_cursor_rect (
const rectangle& rect,
const std::basic_string<T,traits,alloc>& str,
unsigned long index,
typename std::basic_string<T,traits,alloc>::size_type first = 0,
typename std::basic_string<T,traits,alloc>::size_type last = std::basic_string<T,traits,alloc>::npos
) const;
/*!
requires
- if (last != std::basic_string<T,traits,alloc>::npos) then
- first <= last
- last < str.size()
ensures
- the returned rectangle has a width of 1 and a
height of this->height().
- computes the location of the cursor that would sit just before
the character str[index] if str were drawn on the screen by
draw_string(rect,str,...,first,last). The cursor location is
returned in the form of a rectangle.
- if (index < first) then
- the returned cursor will be just before the character str[first].
- if (last != std::basic_string<T,traits,alloc>::npos && index > last) then
- the returned cursor will be just after the character str[last]
- if (str.size() == 0) then
- the returned cursor will be just at the start of the rectangle where
str would be drawn if it wasn't empty.
- if (index > str.size()-1) then
- the returned cursor will be just after the character str[str.size()-1]
!*/
template <typename T, typename traits, typename alloc>
const unsigned long compute_cursor_pos (
const rectangle& rect,
const std::basic_string<T,traits,alloc>& str,
long x,
long y,
typename std::basic_string<T,traits,alloc>::size_type first = 0,
typename std::basic_string<T,traits,alloc>::size_type last = std::basic_string<T,traits,alloc>::npos
) const;
/*!
requires
- if (last != std::basic_string<T,traits,alloc>::npos) then
- first <= last
- last < str.size()
ensures
- returns a number idx that has the following properties:
- if (first < str.size()) then
- first <= idx
- else
- idx == str.size()
- if (last != std::basic_string<T,traits,alloc>::npos) then
- idx <= last + 1
- compute_cursor_rect(rect,str,idx,first,last) == the cursor
position that is closest to the pixel (x,y)
!*/
private:
// restricted functions
font(font&); // copy constructor
font& operator=(font&); // assignment operator
};
// ----------------------------------------------------------------------------------------
class default_font : public font
{
/*!
WHAT THIS OBJECT REPRESENTS
This is an implementation of the Helvetica 12 point font.
THREAD SAFETY
It is safe to call get_font() and access the returned font from any
thread and no synchronization is needed as long as it is called
after the main() function has been entered.
!*/
public:
static const shared_ptr_thread_safe<font> get_font(
);
/*!
ensures
- returns an instance of this font.
throws
- std::bad_alloc
This exception is thrown if there is a problem gathering the needed
memory for the font object.
!*/
private:
// restricted functions
default_font(); // normal constructor
default_font(default_font&); // copy constructor
default_font& operator=(default_font&); // assignment operator
};
// ----------------------------------------------------------------------------------------
class bdf_font : public font
{
/*!
WHAT THIS OBJECT REPRESENTS
This is a font object that is capable of loading of loading BDF (Glyph
Bitmap Distribution Format) font files.
THREAD SAFETY
If you only access this object via the functions in the parent class font
then this object is thread safe. But if you need to call any of the
functions introduced in this derived class then you need to serialize
access to this object while you call these functions.
!*/
public:
bdf_font(
long default_char = -1
);
/*!
ensures
- for all x:
- #has_character(x) == false
(i.e. this font starts out empty. You have to call read_bdf_file()
to load it with data)
- if (default_char == -1) then
- the letter returned by (*this)[ch] for values of
ch where has_character(ch) == false will be the
default glyph defined in the bdf file.
- else
- the letter returned by (*this)[ch] for values of
ch where has_character(ch) == false will be the
letter (*this)[default_char].
!*/
long read_bdf_file(
std::istream& in,
unichar max_enc,
unichar min_enc = 0
);
/*!
ensures
- attempts to read the font data from the given input stream into
*this. The input stream is expected to contain a valid BDF file.
- reads in characters with encodings in the range min_enc to max_enc
into this font. All characters in the font file outside this range
are ignored.
- returns the number of characters loaded into this font from the
given input stream.
!*/
void adjust_metrics();
/*!
ensures
- Computes metrics based on actual glyphs loaded, instead of using
the values in the bdf file header. (May be useful if loading glyphs
from more than one file or a small part of a file.)
!*/
private:
bdf_font( bdf_font& ); // copy constructor
bdf_font& operator=( bdf_font& ); // assignment operator
};
// ----------------------------------------------------------------------------------------
const shared_ptr_thread_safe<font> get_native_font(
);
/*!
ensures
- returns a font object that uses the local font
!*/
// ----------------------------------------------------------------------------------------
}
#endif // DLIB_FONTs_ABSTRACT_
| 36.225152 | 113 | 0.490789 | [
"object"
] |
df4910ac490f505e6a356c870d183d28ded158df | 1,428 | h | C | gcc-build/i686-pc-linux-gnu/libjava/java/util/PropertyPermission.h | giraffe/jrate | 764bbf973d1de4e38f93ba9b9c7be566f1541e16 | [
"Xnet",
"Linux-OpenIB",
"X11"
] | 1 | 2021-06-15T05:43:22.000Z | 2021-06-15T05:43:22.000Z | gcc-build/i686-pc-linux-gnu/libjava/java/util/PropertyPermission.h | giraffe/jrate | 764bbf973d1de4e38f93ba9b9c7be566f1541e16 | [
"Xnet",
"Linux-OpenIB",
"X11"
] | null | null | null | gcc-build/i686-pc-linux-gnu/libjava/java/util/PropertyPermission.h | giraffe/jrate | 764bbf973d1de4e38f93ba9b9c7be566f1541e16 | [
"Xnet",
"Linux-OpenIB",
"X11"
] | null | null | null | // DO NOT EDIT THIS FILE - it is machine generated -*- c++ -*-
#ifndef __java_util_PropertyPermission__
#define __java_util_PropertyPermission__
#pragma interface
#include <java/security/BasicPermission.h>
#include <gcj/array.h>
extern "Java"
{
namespace java
{
namespace security
{
class PermissionCollection;
class Permission;
}
}
};
class ::java::util::PropertyPermission : public ::java::security::BasicPermission
{
public:
PropertyPermission (::java::lang::String *, ::java::lang::String *);
private:
void setActions (::java::lang::String *);
void readObject (::java::io::ObjectInputStream *);
void writeObject (::java::io::ObjectOutputStream *);
public:
jboolean implies (::java::security::Permission *);
jboolean equals (::java::lang::Object *);
jint hashCode ();
::java::lang::String *getActions ();
::java::security::PermissionCollection *newPermissionCollection ();
private:
static JArray< ::java::io::ObjectStreamField *> *serialPersistentFields;
static const jlong serialVersionUID = 885438825399942851LL;
static const jint READ = 1L;
static const jint WRITE = 2L;
public: // actually package-private
jint __attribute__((aligned(__alignof__( ::java::security::BasicPermission )))) actions;
private:
static JArray< ::java::lang::String *> *actionStrings;
public:
static ::java::lang::Class class$;
};
#endif /* __java_util_PropertyPermission__ */
| 27.461538 | 91 | 0.712885 | [
"object"
] |
df5588eb003a61a181aa1e26a09a0960082eef4a | 4,004 | h | C | compiler/parser/token.h | cosocaf/Pick | e21a3ff204e8cf60a40985ebda73d90f087205a9 | [
"MIT"
] | 6 | 2021-05-17T14:03:18.000Z | 2021-08-06T11:43:12.000Z | compiler/parser/token.h | cosocaf/Pick | e21a3ff204e8cf60a40985ebda73d90f087205a9 | [
"MIT"
] | null | null | null | compiler/parser/token.h | cosocaf/Pick | e21a3ff204e8cf60a40985ebda73d90f087205a9 | [
"MIT"
] | null | null | null | #ifndef PICKC_PARSER_TOKEN_H_
#define PICKC_PARSER_TOKEN_H_
#include <vector>
#include <string>
#include <fstream>
#include "utils/result.h"
#include "utils/option.h"
namespace pickc::parser
{
enum struct TokenKind
{
DefKeyword, // def
MutKeyword, // mut
FnKeyword, // fn
ClassKeyword, // class
TypeKeyword, // type
ExternKeyword, // extern
ImportKeyword, // import
ReturnKeyword, // return
IfKeyword, // if
ElseKeyword, // else
WhileKeyword, // while
ForKeyword, // for
LoopKeyword, // loop
BreakKeyword, // break
ContinueKeyword, // continue
PubKeyword, // pub
PriKeyword, // pri
ConstructKeyword, // construct
DestructKeyword, // destruct
I8Keyword, // i8
I16Keyword, // i16
I32Keyword, // i32
I64Keyword, // i64
U8Keyword, // u8
U16Keyword, // u16
U32Keyword, // u32
U64Keyword, // u64
F32Keyword, // f32
F64Keyword, // f64
VoidKeyword, // void
BoolKeyword, // bool
CharKeyword, // char
PtrKeyword, // ptr
LParen, // (
RParen, // )
LBrace, // {
RBrase, // }
LBracket, // [
RBracket, // ]
Semicolon, // ;
Plus, // +
Minus, // -
Asterisk, // *
Slash, // /
Percent, // %
Inc, // ++
Dec, // --
BitAnd, // &
BitOr, // |
BitXor, // ^
BitNot, // ~
LShift, // <<
RShift, // >>
Asign, // =
AddAsign, // +=
SubAsign, // -=
MulAsign, // *=
DivAsign, // /=
ModAsign, // %=
BitAndAsign, // &=
BitOrAsign, // |=
BitXorAsign, // ^=
LShiftAsign, // <<=
RShiftAsign, // >>=
LogicalAnd, // &&
LogicalOr, // ||
LogicalNot, // !
Equal, // ==
NotEqual, // !=
LessThan, // <
LessEqual, // <=
GreaterThan, // >
GreaterEqual, // >=
Dot, // .
Range, // ..
Colon, // :
Scope, // ::
Comma, // ,
Copy, // @
LineComment, // //
Integer, // [0-9]+
I8, // [0-9]+i8
I16, // [0-9]+i16
I32, // [0-9]+i32
I64, // [0-9]+i64
U8, // [0-9]+u8
U16, // [0-9]+u16
U32, // [0-9]+u32
U64, // [0-9]+u64
Float, // [0-9].[0-9]
F32, // [0-9].[0-9]+f32
F64, // [0-9].[0-9]+f64
Bool, // true|false
Null, // null
Char, // 'x'
String, // "xxx"
This, // this
Identify, // xxx
};
struct Token
{
TokenKind kind;
std::string value;
size_t line;
size_t letter;
Token(TokenKind kind, const std::string& value, size_t line, size_t letter);
Token(const Token& token);
Token(Token&& token);
Token& operator=(const Token& token);
Token& operator=(Token&& token);
};
struct TokenSequence
{
std::string file;
std::vector<std::string> rawFileData;
std::vector<Token> tokens;
std::string toOutputString(size_t line) const;
};
class Tokenizer
{
TokenSequence sequence;
std::vector<std::string> errors;
std::ifstream stream;
bool done;
Result<Option<Token>, std::string> findToken(std::string& str, size_t& line, size_t& letter);
public:
Tokenizer(const std::string& path);
Result<TokenSequence, std::vector<std::string>> tokenize();
};
}
#endif // PICKC_PARSER_TOKEN_H_ | 27.424658 | 97 | 0.433816 | [
"vector"
] |
df5710721415ecc552e508c0286c5300fdcb5b70 | 1,266 | h | C | include/arch/x86/msr.h | chen-png/zephyr | 11e91a83c1f6923a0078834404e238e5b7df3693 | [
"Apache-2.0"
] | 2 | 2019-11-20T19:45:53.000Z | 2019-11-20T19:46:12.000Z | include/arch/x86/msr.h | chen-png/zephyr | 11e91a83c1f6923a0078834404e238e5b7df3693 | [
"Apache-2.0"
] | 2 | 2019-03-19T13:49:56.000Z | 2019-03-20T10:23:41.000Z | include/arch/x86/msr.h | chen-png/zephyr | 11e91a83c1f6923a0078834404e238e5b7df3693 | [
"Apache-2.0"
] | 5 | 2019-05-09T10:32:26.000Z | 2020-01-14T01:05:55.000Z | /*
* Copyright (c) 2019 Intel Corp.
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_MSR_H_
#define ZEPHYR_INCLUDE_ARCH_X86_MSR_H_
/*
* Model specific registers (MSR). Access with z_x86_msr_read/write().
*/
#define X86_SPEC_CTRL_MSR 0x0048
#define X86_SPEC_CTRL_MSR_IBRS BIT(0)
#define X86_SPEC_CTRL_MSR_SSBD BIT(2)
#define X86_APIC_BASE_MSR 0x001b
#define X86_APIC_BASE_MSR_X2APIC BIT(10)
#define X86_MTRR_DEF_TYPE_MSR 0x02ff
#define X86_MTRR_DEF_TYPE_MSR_ENABLE BIT(11)
#define X86_X2APIC_BASE_MSR 0x0800 /* 0x0800-0x0BFF -> x2APIC */
#ifndef _ASMLANGUAGE
#ifdef __cplusplus
extern "C" {
#endif
/*
* z_x86_msr_write() is shared between 32- and 64-bit implementations, but
* due to ABI differences with long return values, z_x86_msr_read() is not.
*/
static inline void z_x86_msr_write(unsigned int msr, u64_t data)
{
u32_t high = data >> 32;
u32_t low = data & 0xFFFFFFFF;
__asm__ volatile ("wrmsr" : : "c"(msr), "a"(low), "d"(high));
}
#ifndef CONFIG_X86_LONGMODE
static inline u64_t z_x86_msr_read(unsigned int msr)
{
u64_t ret;
__asm__ volatile("rdmsr" : "=A" (ret) : "c" (msr));
return ret;
}
#endif
#ifdef __cplusplus
}
#endif
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_MSR_H_ */
| 20.419355 | 75 | 0.738547 | [
"model"
] |
df624a98c42b0e361190a0f2758f86b8061c016d | 4,175 | h | C | demos/TaakLabyrinth/src/Scene/game_scene.h | kodjodegbey/gba-sprite-engine | 3f480a824320dee01fb94d98b75761a591b86327 | [
"MIT"
] | null | null | null | demos/TaakLabyrinth/src/Scene/game_scene.h | kodjodegbey/gba-sprite-engine | 3f480a824320dee01fb94d98b75761a591b86327 | [
"MIT"
] | null | null | null | demos/TaakLabyrinth/src/Scene/game_scene.h | kodjodegbey/gba-sprite-engine | 3f480a824320dee01fb94d98b75761a591b86327 | [
"MIT"
] | null | null | null | //
// Created by Wouter Groeneveld on 28/07/18.
//
#ifndef GBA_SPRITE_ENGINE_PROJECT_GAME_H
#define GBA_SPRITE_ENGINE_PROJECT_GAME_H
#include <libgba-sprite-engine/sprites/sprite.h>
#include <libgba-sprite-engine/sprites/affine_sprite.h>
#include <libgba-sprite-engine/scene.h>
#include <libgba-sprite-engine/background/background.h>
#include "../gameMap/GameMap.h"
#include "../Model/BonusModel.h"
#include "../Model/BomModel.h"
#include "list"
#include "../Model/speler.h"
#include "start_scene.h"
#include <iostream>
using namespace std ;
#define factor 8
class Game : public Scene {
private:
std::unique_ptr<Sprite> bomSprite1;
std::unique_ptr<Sprite> bomSprite2;
std::unique_ptr<Sprite> bomSprite3;
std::unique_ptr<Sprite> bonusSprite1;
std::unique_ptr<Sprite> bonusSprite2;
std::unique_ptr<Sprite> bonusSprite3;
std::unique_ptr<Sprite> bonusSprite4;
std::unique_ptr<Sprite> box;
std::unique_ptr<Sprite> spelerSprite;
std::unique_ptr<Background> bg;
GameMap gameMap ;
Richting richting;
int score =100 ;
bool dood=false;
int spelerX=0;
int spelerY = 0;
int levens=3;
int scrollX;
int scrollY;
int aantalMunten=4;
int keuzeSpeler=0;
int boxGeraakt =1;
bool tegengenMunt3 =false;
bool isText=false;
public:
Game(std::shared_ptr<GBAEngine> engine,int keuze) : Scene(engine),keuzeSpeler(keuze) {}
std::vector<Sprite *> sprites() override;
std::vector<Background *> backgrounds() override;
void load() override;
void tick(u16 keys) override;
void resetGame() ;
void tegenBom(){
if(spelerSprite->collidesWith(*bomSprite1)){
bomSprite1->moveTo(-100,10);
if(score>0){
score-= (rand()%(15 + 1) );
}
if(levens>0){
levens--;
}else{
dood=true;
}
}else if(spelerSprite->collidesWith(*bomSprite2)){
bomSprite2->moveTo(-150,10);
if(score>0){
score-= (rand()%(15 + 1) );
}
if(levens>0){
levens--;
}else{
dood=true;
}
}else if(spelerSprite->collidesWith(*bomSprite3)){
bomSprite3->moveTo(-200,10);
if(score>0){
score-= (rand()%(15 + 1) );
}
if(levens>0){
levens--;
}else{
dood=true;
}
}
}
void tegenMunt(){
if(spelerSprite->collidesWith(*bonusSprite1)){
bonusSprite1->moveTo(-100,20);
score+= (rand()%(15 + 1) );
aantalMunten -=1;
}else if(spelerSprite->collidesWith(*bonusSprite2)){
bonusSprite2->moveTo(-100,40);
score+= (rand()%(15 + 1) );
aantalMunten -=1;
}else if(spelerSprite->collidesWith(*bonusSprite3)){
bonusSprite3->moveTo(-100,60);
score+= (rand()%(15 + 1) );
tegengenMunt3=true;
aantalMunten -=1;
}else if(spelerSprite->collidesWith(*bonusSprite4)){
bonusSprite4->moveTo(-100,80);
score+= (rand()%(15 + 1) );
aantalMunten -=1;
}
}
void restart(){
bomSprite1->moveTo(16,45);
bomSprite2->moveTo(140,40);
bomSprite3->moveTo(115,45);
bonusSprite1->moveTo(120,140);
bonusSprite2->moveTo(200,115);
bonusSprite3->moveTo(68,30);
bonusSprite4->moveTo(50,120);
spelerSprite->moveTo(100,100);
spelerX=spelerSprite->getX();
spelerY=spelerSprite->getY();
box->moveTo(200,140);
}
void spelerOpScherm(){
if(spelerSprite->getX()<=8 ){
spelerX=9;
}else if(spelerSprite->getX()>=GBA_SCREEN_WIDTH-16){
spelerX=GBA_SCREEN_WIDTH-20;
}
if(spelerSprite->getY()<=22 ){
spelerY=24;
}else if(spelerSprite->getY()>=GBA_SCREEN_HEIGHT-16){
spelerY=GBA_SCREEN_HEIGHT-32;
}
}
void tegenBox();
};
#endif //GBA_SPRITE_ENGINE_FLYING_STUFF_SCENE_H
| 26.592357 | 91 | 0.56479 | [
"vector",
"model"
] |
df629c08b5e7f0f1ad3883d507bed54c28de8dc5 | 16,357 | h | C | tmc3/geometry_octree.h | kento-Y/mpeg-pcc-tmc13 | bff871d2a6d85568420d0c059b21bfedfb107687 | [
"BSD-3-Clause"
] | 110 | 2019-07-03T08:15:18.000Z | 2022-03-23T07:43:45.000Z | tmc3/geometry_octree.h | kento-Y/mpeg-pcc-tmc13 | bff871d2a6d85568420d0c059b21bfedfb107687 | [
"BSD-3-Clause"
] | null | null | null | tmc3/geometry_octree.h | kento-Y/mpeg-pcc-tmc13 | bff871d2a6d85568420d0c059b21bfedfb107687 | [
"BSD-3-Clause"
] | 44 | 2019-07-18T12:20:38.000Z | 2022-03-18T14:29:43.000Z | /* The copyright in this software is being made available under the BSD
* Licence, included below. This software may be subject to other third
* party and contributor rights, including patent rights, and no such
* rights are granted under this licence.
*
* Copyright (c) 2017-2018, ISO/IEC
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of the ISO/IEC nor the names of its contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <cstdint>
#include "DualLutCoder.h"
#include "PCCMath.h"
#include "PCCPointSet.h"
#include "entropy.h"
#include "geometry_params.h"
#include "hls.h"
#include "quantization.h"
#include "ringbuf.h"
#include "tables.h"
namespace pcc {
//============================================================================
const int MAX_NUM_DM_LEAF_POINTS = 2;
//============================================================================
struct PCCOctree3Node {
// 3D position of the current node's origin (local x,y,z = 0).
Vec3<int32_t> pos;
// Range of point indexes spanned by node
uint32_t start;
uint32_t end;
// The current node's number of siblings plus one.
// ie, the number of child nodes present in this node's parent.
uint8_t numSiblingsPlus1;
// The occupancy map used describing the current node and its siblings.
uint8_t siblingOccupancy;
// Indicatest hat the current node qualifies for IDCM
bool idcmEligible;
// The qp used for geometry quantisation.
// NB: this qp value always uses a step size doubling interval of 8 qps
int8_t qp;
// angular
uint8_t laserIndex = 255;
};
//============================================================================
struct OctreeNodePlanar {
// planar; first bit for x, second bit for y, third bit for z
uint8_t planarPossible = 7;
uint8_t planePosBits = 0;
uint8_t planarMode = 0;
};
//---------------------------------------------------------------------------
int neighPatternFromOccupancy(int pos, int occupancy);
//---------------------------------------------------------------------------
uint8_t mapGeometryOccupancy(uint8_t occupancy, uint8_t neighPattern);
uint8_t mapGeometryOccupancyInv(uint8_t occupancy, uint8_t neighPattern);
//---------------------------------------------------------------------------
// Determine if a node is a leaf node based on size.
// A node with all dimension = 0 is a leaf node.
// NB: some dimensions may be less than zero if coding of that dimension
// has already terminated.
inline bool
isLeafNode(const Vec3<int>& sizeLog2)
{
return sizeLog2[0] <= 0 && sizeLog2[1] <= 0 && sizeLog2[2] <= 0;
}
//---------------------------------------------------------------------------
// Generates an idcm enable mask
uint32_t mkIdcmEnableMask(const GeometryParameterSet& gps);
//---------------------------------------------------------------------------
// Determine if direct coding is permitted.
// If tool is enabled:
// - Block must not be near the bottom of the tree
// - The parent / grandparent are sparsely occupied
inline bool
isDirectModeEligible(
int intensity,
int nodeSizeLog2,
int nodeNeighPattern,
const PCCOctree3Node& node,
const PCCOctree3Node& child)
{
if (!intensity)
return false;
if (intensity == 1)
return (nodeSizeLog2 >= 2) && (nodeNeighPattern == 0)
&& (child.numSiblingsPlus1 == 1) && (node.numSiblingsPlus1 <= 2);
if (intensity == 2)
return (nodeSizeLog2 >= 2) && (nodeNeighPattern == 0);
// This is basically unconditionally enabled.
// If a node is that is IDCM-eligible is not coded with IDCM and has only
// one child, then it is likely that the child would also not be able to
// be coded with IDCM (eg, it still contains > 2 unique points).
if (intensity == 3)
return (nodeSizeLog2 >= 2) && (child.numSiblingsPlus1 > 1);
return false;
}
//---------------------------------------------------------------------------
// Select the neighbour pattern reduction table according to GPS config.
inline const uint8_t*
neighPattern64toR1(const GeometryParameterSet& gps)
{
if (gps.neighbour_avail_boundary_log2_minus1 > 0)
return kNeighPattern64to9;
return kNeighPattern64to6;
}
//---------------------------------------------------------------------------
struct CtxModelOctreeOccupancy {
AdaptiveBitModelFast contexts[256];
static const int kCtxFactorShift = 3;
AdaptiveBitModelFast& operator[](int idx)
{
return contexts[idx >> kCtxFactorShift];
}
};
//---------------------------------------------------------------------------
// Encapsulates the derivation of ctxIdx for occupancy coding.
class CtxMapOctreeOccupancy {
public:
struct CtxIdxMap {
uint8_t b0[9];
uint8_t b1[18];
uint8_t b2[35];
uint8_t b3[68];
uint8_t b4[69];
uint8_t b5[134];
uint8_t b6[135];
uint8_t b7[136];
};
CtxMapOctreeOccupancy();
CtxMapOctreeOccupancy(const CtxMapOctreeOccupancy&);
CtxMapOctreeOccupancy(CtxMapOctreeOccupancy&&);
CtxMapOctreeOccupancy& operator=(const CtxMapOctreeOccupancy&);
CtxMapOctreeOccupancy& operator=(CtxMapOctreeOccupancy&&);
const uint8_t* operator[](int bit) const { return b[bit]; }
uint8_t* operator[](int bit) { return b[bit]; }
// return *ctxIdx and update *ctxIdx according to bit
static uint8_t evolve(bool bit, uint8_t* ctxIdx);
private:
std::unique_ptr<CtxIdxMap> map;
std::array<uint8_t*, 8> b;
};
//----------------------------------------------------------------------------
inline uint8_t
CtxMapOctreeOccupancy::evolve(bool bit, uint8_t* ctxIdx)
{
uint8_t retval = *ctxIdx;
if (bit)
*ctxIdx += kCtxMapOctreeOccupancyDelta[(255 - *ctxIdx) >> 4];
else
*ctxIdx -= kCtxMapOctreeOccupancyDelta[*ctxIdx >> 4];
return retval;
}
//---------------------------------------------------------------------------
// generate an array of node sizes according to subsequent qtbt decisions
std::vector<Vec3<int>> mkQtBtNodeSizeList(
const GeometryParameterSet& gps,
const QtBtParameters& qtbt,
const GeometryBrickHeader& gbh);
//---------------------------------------------------------------------------
inline Vec3<int>
qtBtChildSize(const Vec3<int>& nodeSizeLog2, const Vec3<int>& childSizeLog2)
{
Vec3<int> bitpos = 0;
for (int k = 0; k < 3; k++) {
if (childSizeLog2[k] != nodeSizeLog2[k])
bitpos[k] = 1 << childSizeLog2[k];
}
return bitpos;
}
//---------------------------------------------------------------------------
inline int
nonSplitQtBtAxes(const Vec3<int>& nodeSizeLog2, const Vec3<int>& childSizeLog2)
{
int indicator = 0;
for (int k = 0; k < 3; k++) {
indicator <<= 1;
indicator |= nodeSizeLog2[k] == childSizeLog2[k];
}
return indicator;
}
//============================================================================
// Scales quantized positions used internally in angular coding.
//
// NB: this is not used to scale output positions since generated positions
// are not clipped to node boundaries.
//
// NB: there are two different position representations used in the codec:
// ppppppssssss = original position
// ppppppqqqq00 = pos, (quantisation) node size aligned -> use scaleNs()
// 00ppppppqqqq = pos, effective node size aligned -> use scaleEns()
// where p are unquantised bits, q are quantised bits, and 0 are zero bits.
class OctreeAngPosScaler {
QuantizerGeom _quant;
Vec3<uint32_t> _mask;
int _qp;
public:
OctreeAngPosScaler(int qp, const Vec3<uint32_t>& quantMaskBits)
: _quant(qp), _qp(qp), _mask(quantMaskBits)
{}
// Scale an effectiveNodeSize aligned position as the k-th position component.
int scaleEns(int k, int pos) const;
// Scale an effectiveNodeSize aligned position.
Vec3<int> scaleEns(Vec3<int> pos) const;
// Scale a NodeSize aligned position.
Vec3<int> scaleNs(Vec3<int> pos) const;
};
//----------------------------------------------------------------------------
inline int
OctreeAngPosScaler::scaleEns(int k, int pos) const
{
if (!_qp)
return pos;
int shiftBits = QuantizerGeom::qpShift(_qp);
int lowPart = pos & (_mask[k] >> shiftBits);
int highPart = pos ^ lowPart;
int lowPartScaled = _quant.scale(lowPart);
return (highPart << shiftBits) + lowPartScaled;
}
//----------------------------------------------------------------------------
inline Vec3<int32_t>
OctreeAngPosScaler::scaleEns(Vec3<int32_t> pos) const
{
if (!_qp)
return pos;
for (int k = 0; k < 3; k++)
pos[k] = scaleEns(k, pos[k]);
return pos;
}
//----------------------------------------------------------------------------
inline Vec3<int32_t>
OctreeAngPosScaler::scaleNs(Vec3<int32_t> pos) const
{
if (!_qp)
return pos;
// convert pos to effectiveNodeSize form
return scaleEns(pos >> QuantizerGeom::qpShift(_qp));
}
//============================================================================
class AzimuthalPhiZi {
public:
AzimuthalPhiZi(int numLasers, const std::vector<int>& numPhi)
: _delta(numLasers), _invDelta(numLasers)
{
for (int laserIndex = 0; laserIndex < numLasers; laserIndex++) {
constexpr int k2pi = 6588397; // 2**20 * 2 * pi
_delta[laserIndex] = k2pi / numPhi[laserIndex];
_invDelta[laserIndex] =
int64_t((int64_t(numPhi[laserIndex]) << 30) / k2pi);
}
}
const int delta(size_t idx) const { return _delta[idx]; }
const int64_t invDelta(size_t idx) const { return _invDelta[idx]; }
private:
std::vector<int> _delta;
std::vector<int64_t> _invDelta;
};
//============================================================================
struct OctreePlanarBuffer {
static constexpr unsigned numBitsC = 14;
static constexpr unsigned numBitsAb = 5;
static constexpr unsigned rowSize = 1;
static_assert(numBitsC >= 0 && numBitsC <= 32, "0 <= numBitsC <= 32");
static_assert(numBitsAb >= 0 && numBitsAb <= 32, "0 <= numBitsAb <= 32");
static_assert(rowSize > 0, "rowSize must be greater than 0");
static constexpr unsigned shiftAb = 3;
static constexpr int maskAb = ((1 << numBitsAb) - 1) << shiftAb;
static constexpr int maskC = (1 << numBitsC) - 1;
#pragma pack(push)
#pragma pack(1)
struct Elmt {
// maximum of two position components
unsigned int pos : numBitsAb;
// -2: not used, -1: not planar, 0: plane 0, 1: plane 1
int planeIdx : 2;
};
#pragma pack(pop)
typedef Elmt Row[rowSize];
OctreePlanarBuffer();
OctreePlanarBuffer(const OctreePlanarBuffer& rhs);
OctreePlanarBuffer(OctreePlanarBuffer&& rhs);
~OctreePlanarBuffer();
OctreePlanarBuffer& operator=(const OctreePlanarBuffer& rhs);
OctreePlanarBuffer& operator=(OctreePlanarBuffer&& rhs);
void resize(Vec3<int> numBufferRows);
void clear();
// Access to a particular buffer column (dimension)
Row* getBuffer(int dim) { return _col[dim]; }
private:
// Backing storage for the underlying buffer
std::vector<Elmt> _buf;
// Base pointers for the first, second and third position components.
std::array<Row*, 3> _col = {{nullptr, nullptr, nullptr}};
};
//============================================================================
struct OctreePlanarState {
OctreePlanarState(const GeometryParameterSet&);
OctreePlanarState(const OctreePlanarState&);
OctreePlanarState(OctreePlanarState&&);
OctreePlanarState& operator=(const OctreePlanarState&);
OctreePlanarState& operator=(OctreePlanarState&&);
bool _planarBufferEnabled;
OctreePlanarBuffer _planarBuffer;
std::array<int, 3> _rate{{128 * 8, 128 * 8, 128 * 8}};
int _localDensity = 1024 * 4;
std::array<int, 3> _rateThreshold;
void initPlanes(const Vec3<int>& planarDepth);
void updateRate(int occupancy, int numSiblings);
void isEligible(bool eligible[3]);
};
// determine if a 222 block is planar
void setPlanesFromOccupancy(int occupancy, OctreeNodePlanar& planar);
int maskPlanarX(const OctreeNodePlanar& planar);
int maskPlanarY(const OctreeNodePlanar& planar);
int maskPlanarZ(const OctreeNodePlanar& planar);
void maskPlanar(OctreeNodePlanar& planar, int mask[3], int codedAxes);
int determineContextAngleForPlanar(
PCCOctree3Node& node,
const Vec3<int>& nodeSizeLog2,
const Vec3<int>& angularOrigin,
const int* zLaser,
const int* thetaLaser,
const int numLasers,
int deltaAngle,
const AzimuthalPhiZi& phiZi,
int* phiBuffer,
int* contextAnglePhiX,
int* contextAnglePhiY,
Vec3<uint32_t> quantMasks);
//----------------------------------------------------------------------------
int findLaser(point_t point, const int* thetaList, const int numTheta);
//============================================================================
class GeometryOctreeContexts {
public:
void reset();
protected:
AdaptiveBitModel _ctxSingleChild;
AdaptiveBitModel _ctxDupPointCntGt0;
AdaptiveBitModel _ctxDupPointCntGt1;
AdaptiveBitModel _ctxDupPointCntEgl;
AdaptiveBitModel _ctxBlockSkipTh;
AdaptiveBitModel _ctxNumIdcmPointsGt1;
AdaptiveBitModel _ctxSameZ;
// IDCM unordered
AdaptiveBitModel _ctxSameBitHighx[5];
AdaptiveBitModel _ctxSameBitHighy[5];
AdaptiveBitModel _ctxSameBitHighz[5];
// residual laser index
AdaptiveBitModel _ctxThetaRes[3];
AdaptiveBitModel _ctxThetaResSign;
AdaptiveBitModel _ctxThetaResExp;
AdaptiveBitModel _ctxQpOffsetAbsGt0;
AdaptiveBitModel _ctxQpOffsetSign;
AdaptiveBitModel _ctxQpOffsetAbsEgl;
// for planar mode xyz
AdaptiveBitModel _ctxPlanarMode[3];
AdaptiveBitModel _ctxPlanarPlaneLastIndex[3][3][4];
AdaptiveBitModel _ctxPlanarPlaneLastIndexZ[3];
AdaptiveBitModel _ctxPlanarPlaneLastIndexAngular[4];
AdaptiveBitModel _ctxPlanarPlaneLastIndexAngularIdcm[4];
AdaptiveBitModel _ctxPlanarPlaneLastIndexAngularPhi[8];
AdaptiveBitModel _ctxPlanarPlaneLastIndexAngularPhiIDCM[8];
// For bitwise occupancy coding
CtxModelOctreeOccupancy _ctxOccupancy;
CtxMapOctreeOccupancy _ctxIdxMaps[18];
// For bytewise occupancy coding
DualLutCoder<true> _bytewiseOccupancyCoder[10];
};
//----------------------------------------------------------------------------
inline void
GeometryOctreeContexts::reset()
{
this->~GeometryOctreeContexts();
new (this) GeometryOctreeContexts;
}
//============================================================================
// :: octree encoder exposing internal ringbuffer
void encodeGeometryOctree(
const OctreeEncOpts& opt,
const GeometryParameterSet& gps,
GeometryBrickHeader& gbh,
PCCPointSet3& pointCloud,
GeometryOctreeContexts& ctxtMem,
std::vector<std::unique_ptr<EntropyEncoder>>& arithmeticEncoders,
pcc::ringbuf<PCCOctree3Node>* nodesRemaining);
void decodeGeometryOctree(
const GeometryParameterSet& gps,
const GeometryBrickHeader& gbh,
int skipLastLayers,
PCCPointSet3& pointCloud,
GeometryOctreeContexts& ctxtMem,
EntropyDecoder& arithmeticDecoder,
pcc::ringbuf<PCCOctree3Node>* nodesRemaining);
//============================================================================
} // namespace pcc
| 30.573832 | 80 | 0.637709 | [
"geometry",
"vector",
"3d"
] |
df665dbf2e65207539d8b0a4ade3ad934879480e | 376 | h | C | Friendly/Library/Layers/Presentation/Views/Footer/Event/FREventCollectionCellFooter.h | zetplaner/Friendly | b5e97a17123f40b870282e5b48361aed2d3c403a | [
"MIT"
] | null | null | null | Friendly/Library/Layers/Presentation/Views/Footer/Event/FREventCollectionCellFooter.h | zetplaner/Friendly | b5e97a17123f40b870282e5b48361aed2d3c403a | [
"MIT"
] | null | null | null | Friendly/Library/Layers/Presentation/Views/Footer/Event/FREventCollectionCellFooter.h | zetplaner/Friendly | b5e97a17123f40b870282e5b48361aed2d3c403a | [
"MIT"
] | null | null | null | //
// FREventCollectionCellFooter.h
// Friendly
//
// Created by Sergey Borichev on 14.03.16.
// Copyright © 2016 TecSynt. All rights reserved.
//
@class FREventCollectionCellFooterViewModel;
@interface FREventCollectionCellFooter : UIView
@property (nonatomic, strong) UIButton* joinButton;
- (void)updateWithModel:(FREventCollectionCellFooterViewModel*)model;
@end
| 20.888889 | 69 | 0.773936 | [
"model"
] |
df6f726c8c9b21be3d14d5045e1bdda74a08fc10 | 13,847 | c | C | buildHdpUtil.c | kishwarshafin/signalAlign | c9b7b9232ef6fb76aa427670981c969b887f4860 | [
"MIT"
] | null | null | null | buildHdpUtil.c | kishwarshafin/signalAlign | c9b7b9232ef6fb76aa427670981c969b887f4860 | [
"MIT"
] | null | null | null | buildHdpUtil.c | kishwarshafin/signalAlign | c9b7b9232ef6fb76aa427670981c969b887f4860 | [
"MIT"
] | null | null | null | // Utility that builds a Nanopore HDP from an alignment, allows for experimenting with hyperparameters
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include "pairwiseAligner.h"
#include "continuousHmm.h"
void usage() {
fprintf(stderr, "\n\tbuildHdpUtil - make a new hierarchical Dirichlet process model from an alignment file\n\n");
fprintf(stderr, "--help: display this message and exit\n");
fprintf(stderr, "--verbose: enable verbose Gibbs sampling output\n");
fprintf(stderr, "--oneD: flag for 1-D reads, only make a 'template' model\n");
fprintf(stderr, "-a: kmer length to use in the model (usually 5 or 6)\n");
fprintf(stderr, "-p: HDP type. Enum for the type of model to build (alphabet the model uses)\n");
fprintf(stderr, "\tMost people will use 10, 11, 12, or 13\n");
fprintf(stderr, "\t\t0: single level fixed (ACEGOT)\n");
fprintf(stderr, "\t\t1: single level prior (ACEGOT)\n");
fprintf(stderr, "\t\t2: multiset fixed (ACEGOT)\n");
fprintf(stderr, "\t\t3: multiset prior (ACEGOT)\n");
fprintf(stderr, "\t\t4: composiition fixed (ACEGOT)\n");
fprintf(stderr, "\t\t5: composiition prior (ACEGOT)\n");
fprintf(stderr, "\t\t6: middle nucleodides fixed (ACEGOT)\n");
fprintf(stderr, "\t\t7: middle nucleodides prior (ACEGOT)\n");
fprintf(stderr, "\t\t8: group multiset fixed (ACEGOT)\n");
fprintf(stderr, "\t\t9: group multiset prior (ACEGOT)\n");
fprintf(stderr, "\t\t10: single level prior (ACEGT)\n");
fprintf(stderr, "\t\t11: multiset prior (ACEGT)\n");
fprintf(stderr, "\t\t12: single level prior (ecoli) (ACEGIT)\n");
fprintf(stderr, "\t\t13: multiset prior (ecoli) (ACEGIT)\n");
fprintf(stderr, "-T: template lookup table (signalAlign HMM format)\n");
fprintf(stderr, "-C: complement lookup table (signalAlign HMM format)\n");
fprintf(stderr, "-l: alignments to take k-mer assignments from\n");
fprintf(stderr, "-v: template HDP output file path\n");
fprintf(stderr, "-w: complement HDP output file path\n");
fprintf(stderr, "-n: number of samples\n");
fprintf(stderr, "-I: burn in, samples to discard at the start\n");
fprintf(stderr, "-t: thinning\n");
fprintf(stderr, "-B: base gamma\n");
fprintf(stderr, "-M: middle gamma\n");
fprintf(stderr, "-L: leaf gamma\n");
fprintf(stderr, "-g: base gamma alpha\n");
fprintf(stderr, "-r: base gamma beta\n");
fprintf(stderr, "-j: middle gamma alpha\n");
fprintf(stderr, "-y: middle gamma beta\n");
fprintf(stderr, "-i: leaf gamma alpha\n");
fprintf(stderr, "-u: leaf gamma beta\n");
fprintf(stderr, "-s: sampling grid start\n");
fprintf(stderr, "-e: sampling grid end\n");
fprintf(stderr, "-k: sampling grid length\n");
exit(1);
}
void printStartMessage(int64_t hdpType, char *alignmentsFile, char *templateHdpOutfile, char *complementHdpOutfile, bool twoD) {
fprintf(stderr, "Building Nanopore HDP\n");
fprintf(stderr, "Making HDP type %"PRId64"\n", hdpType);
if (alignmentsFile != NULL) {
fprintf(stderr, "Using alignment from %s\n", alignmentsFile);
}
fprintf(stderr, "Putting template here: %s\n", templateHdpOutfile);
if (twoD) {
fprintf(stderr, "Putting complement here: %s\n", complementHdpOutfile);
}
}
void updateHdpFromAssignments(const char *nHdpFile, const char *expectationsFile, const char *nHdpOutFile,
int64_t nbSamples, int64_t burnIn, int64_t thinning, bool verbose) {
NanoporeHDP *nHdp = deserialize_nhdp(nHdpFile);
Hmm *hdpHmm = hdpHmm_loadFromFile(expectationsFile, threeStateHdp, nHdp);
hmmContinuous_destruct(hdpHmm, hdpHmm->type);
fprintf(stderr, "signalAlign - Running Gibbs on HDP doing %"PRId64" samples %"PRId64"burn in %"PRId64"thinning\n",
nbSamples, burnIn, thinning);
execute_nhdp_gibbs_sampling(nHdp, nbSamples, burnIn, thinning, verbose);
finalize_nhdp_distributions(nHdp);
fprintf(stderr, "signalAlign - Serializing HDP to %s\n", nHdpOutFile);
serialize_nhdp(nHdp, nHdpOutFile);
destroy_nanopore_hdp(nHdp);
}
int main(int argc, char *argv[]) {
int64_t hdpType = -1;
char *templateLookupTable = NULL;
char *complementLookupTable = NULL;
char *alignmentsFile = NULL;
char *templateHdpOutfile = NULL;
char *complementHdpOutfile = NULL;
int64_t nbSamples, burnIn, thinning, samplingGridLength, kmerLength, j;
bool verbose = FALSE;
bool twoD = TRUE;
double baseGamma = NULL_HYPERPARAMETER;
double middleGamma = NULL_HYPERPARAMETER;
double leafGamma = NULL_HYPERPARAMETER;
double baseGammaAlpha = NULL_HYPERPARAMETER;
double baseGammaBeta = NULL_HYPERPARAMETER;
double middleGammaAlpha = NULL_HYPERPARAMETER;
double middleGammaBeta = NULL_HYPERPARAMETER;
double leafGammaAlpha = NULL_HYPERPARAMETER;
double leafGammaBeta = NULL_HYPERPARAMETER;
double samplingGridStart, samplingGridEnd;
int key;
while (1) {
static struct option long_options[] = {
{"help", no_argument, 0, 'h'},
{"verbose", no_argument, 0, 'o'},
{"oneD", no_argument, 0, 'q'},
{"kmerLength", required_argument, 0, 'a'},
{"HdpType", required_argument, 0, 'p'},
{"templateLookupTable", required_argument, 0, 'T'},
{"complementLookupTable", required_argument, 0, 'C'},
{"alignments", required_argument, 0, 'l'},
{"templateHdp", required_argument, 0, 'v'},
{"complementHdp", required_argument, 0, 'w'},
{"nbSamples", required_argument, 0, 'n'},
{"burnIn", required_argument, 0, 'I'},
{"thinning", required_argument, 0, 't'},
{"baseGamma", required_argument, 0, 'B'},
{"middleGamma", required_argument, 0, 'M'},
{"leafGamma", required_argument, 0, 'L'},
{"baseGammaAlpha", required_argument, 0, 'g'},
{"baseGammaBeta", required_argument, 0, 'r'},
{"middleGammaAlpha", required_argument, 0, 'j'},
{"middleGammaBeta", required_argument, 0, 'y'},
{"leafGammaAlpha", required_argument, 0, 'i'},
{"leafGammaBeta", required_argument, 0, 'u'},
{"samplingGridStart", required_argument, 0, 's'},
{"samplingGridEnd", required_argument, 0, 'e'},
{"samplingGridLength", required_argument, 0, 'k'},
{0, 0, 0, 0} };
int option_index = 0;
key = getopt_long(argc, argv, "h:a:o:q:p:T:C:l:v:w:n:I:t:B:M:L:g:r:j:y:i:u:s:e:k:",
long_options, &option_index);
if (key == -1) {
//usage();
break;
}
switch (key) {
case 'h':
usage();
return 1;
case 'a':
j = sscanf(optarg, "%" PRIi64 "", &kmerLength);
assert(j == 1);
assert(kmerLength > 0);
break;
case 'p':
j = sscanf(optarg, "%" PRIi64 "", &hdpType);
assert (j == 1);
break;
case 'T':
templateLookupTable = stString_copy(optarg);
break;
case 'C':
complementLookupTable = stString_copy(optarg);
break;
case 'l':
alignmentsFile = stString_copy(optarg);
break;
case 'v':
templateHdpOutfile = stString_copy(optarg);
break;
case 'w':
complementHdpOutfile = stString_copy(optarg);
break;
case 'n':
j = sscanf(optarg, "%" PRIi64 "", &nbSamples);
assert (j == 1);
assert (nbSamples >= 0);
break;
case 'I':
j = sscanf(optarg, "%" PRIi64 "", &burnIn);
assert (j == 1);
assert (burnIn >= 0);
break;
case 't':
j = sscanf(optarg, "%" PRIi64 "", &thinning);
assert (j == 1);
assert (thinning >= 0);
break;
case 'q':
twoD = FALSE;
break;
case 'o':
verbose = TRUE;
break;
case 'B':
j = sscanf(optarg, "%lf", &baseGamma);
assert (j == 1);
assert (baseGamma >= 0);
break;
case 'M':
j = sscanf(optarg, "%lf", &middleGamma);
assert (j == 1);
assert (middleGamma >= 0);
break;
case 'L':
j = sscanf(optarg, "%lf", &leafGamma);
assert (j == 1);
assert (leafGamma >= 0);
break;
case 'g':
j = sscanf(optarg, "%lf", &baseGammaAlpha);
assert (j == 1);
assert (baseGammaAlpha >= 0);
break;
case 'r':
j = sscanf(optarg, "%lf", &baseGammaBeta);
assert (j == 1);
assert (baseGammaBeta >= 0);
break;
case 'j':
j = sscanf(optarg, "%lf", &middleGammaAlpha);
assert (j == 1);
assert (middleGammaAlpha >= 0);
break;
case 'y':
j = sscanf(optarg, "%lf", &middleGammaBeta);
assert (j == 1);
assert (middleGammaBeta >= 0);
break;
case 'i':
j = sscanf(optarg, "%lf", &leafGammaAlpha);
assert (j == 1);
assert (leafGammaAlpha >= 0);
break;
case 'u':
j = sscanf(optarg, "%lf", &leafGammaBeta);
assert (j == 1);
assert (leafGammaBeta >= 0);
break;
case 's':
j = sscanf(optarg, "%lf", &samplingGridStart);
assert (j == 1);
break;
case 'e':
j = sscanf(optarg, "%lf", &samplingGridEnd);
assert (j == 1);
break;
case 'k':
j = sscanf(optarg, "%" PRIi64 "", &samplingGridLength);
assert (j == 1);
assert (samplingGridLength >= 0);
break;
default:
usage();
return 1;
}
}
(void) j;
if ((templateHdpOutfile == NULL) || (complementHdpOutfile == NULL && twoD)) {
st_errAbort("[buildHdpUtil] ERROR: Need to specify where to put the HDP files");
}
if ((templateLookupTable == NULL) || (complementLookupTable == NULL && twoD)) {
st_errAbort("[buildHdpUtil] ERROR: Need lookup tables");
}
printStartMessage(hdpType, alignmentsFile, templateHdpOutfile, complementHdpOutfile, twoD);
if (alignmentsFile == NULL) st_errAbort("[buildHdpUtil]Need to provide build alignment (assignments)");
// option for building from alignment
if (twoD) {
if (!((hdpType >= 0) && (hdpType <= 13))) {
st_errAbort("Invalid HDP type");
}
NanoporeHdpType type = (NanoporeHdpType) hdpType;
nanoporeHdp_buildNanoporeHdpFromAlignment(type, kmerLength,
templateLookupTable, complementLookupTable, alignmentsFile,
templateHdpOutfile, complementHdpOutfile,
nbSamples, burnIn, thinning, verbose,
baseGamma, middleGamma, leafGamma,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength);
} else {
if (hdpType != singleLevelPrior2 && hdpType != multisetPrior2 &&
hdpType != singleLevelFixedCanonical && hdpType != singleLevelFixedM6A) {
st_errAbort("Invalid HDP type for 1D %i", hdpType);
}
NanoporeHdpType type = (NanoporeHdpType) hdpType;
nanoporeHdp_buildOneDHdpFromAlignment(type, kmerLength,
templateLookupTable,
alignmentsFile,
templateHdpOutfile,
nbSamples, burnIn, thinning, verbose,
baseGamma, middleGamma, leafGamma,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength);
}
return 0;
}
| 45.104235 | 128 | 0.507258 | [
"model"
] |
df6fa3fe28b41f63f8f10643cdc93a35b75f7359 | 23,491 | h | C | inc/ubsmear/Helpers/UBMatrixHelper.h | a-d-smith/ubsmear | bbe42165cd8aad45ac654559cb37661d197d26db | [
"MIT"
] | 3 | 2020-11-18T21:35:08.000Z | 2020-11-28T23:13:08.000Z | inc/ubsmear/Helpers/UBMatrixHelper.h | a-d-smith/ubsmear | bbe42165cd8aad45ac654559cb37661d197d26db | [
"MIT"
] | 2 | 2020-12-01T18:16:49.000Z | 2021-04-15T15:13:22.000Z | inc/ubsmear/Helpers/UBMatrixHelper.h | a-d-smith/ubsmear | bbe42165cd8aad45ac654559cb37661d197d26db | [
"MIT"
] | 1 | 2021-11-06T15:22:59.000Z | 2021-11-06T15:22:59.000Z | #ifndef UBSMEAR_UBSMEAR_HELPERS_UBMATRIXHELPER
#define UBSMEAR_UBSMEAR_HELPERS_UBMATRIXHELPER
#include "ubsmear/Objects/UBMatrix.h"
#include <stdexcept>
#include <cmath>
#include <algorithm>
namespace ubsmear
{
/**
* @brief A helper class for common tasks in linear algebra
*/
class UBMatrixHelper
{
public:
/**
* @brief Get a matrix of the specified dimensions with zero for every elememt
*
* @param nRows the number of rows
* @param nCols the number of columns
*
* @return the zero matrix
*/
static UBMatrix GetZeroMatrix(const size_t nRows, const size_t nCols);
/**
* @brief Get a unit square matrix of the specified size
*
* @param size the size of the matrix (both rows and columns)
*
* @return the unit matrix
*/
static UBMatrix GetUnitMatrix(const size_t size);
/**
* @brief Get a diagonal matrix (all off-diagonals are zero) with diagonal entries given by the elements of the input column vector
*
* @param columnVector the input column vector which defines the diagonals of the output matrix
*
* @return the diagonal matrix
*/
static UBMatrix GetDiagonalMatrix(const UBMatrix &columnVector);
/**
* @brief Get a (square) Givens rotation matrix of the specified size, which acts to rotate by a specified angle in the plane
corresponding to the specified row and column indices
*
* @param size the size of the matrix (both rows and columns)
* @param rowIndex the row index (first axis of the plane of rotation)
* @param columnIndex the column index (second axis of the plane of rotation)
* @param angle the angle through which to rotate (radians)
*
* @return the rotation matrix
*/
static UBMatrix GetGivensRotationMatrix(const size_t size, const size_t rowIndex, const size_t columnIndex, const float angle);
/**
* @brief Get the matrix of whose columns are the eigenvectors of the input matrix. The eigenvectors are normalised to unity.
This method in an implementation of the Jacobi eigenvalue algorithm for real and symmetric matrices.
*
* @param matrix the input real symmetric matrix
* @param precision the maximum allowed RMS of the off-diagonal elements of the matrix when transformed by the matrix of eigenvector
*
* @return a pair, first is the column vector of eigenvalues, second is the matrix of eigenvectors
*/
static std::pair<UBMatrix, UBMatrix> GetEigenDecomposition(const UBMatrix &matrix, const float precision);
/**
* @brief Reconstruct a real symmetric matrix from it's eigenvectors and eigenvalues.
*
* @param eigenvalues the input column vector of eigenvalues
* @param eigenvectorMatrix the input matrix whose columns are the eigenvectors
*
* @return the real symmetric matrix which has the supplied eigenvalues and eigenvectors
*/
static UBMatrix GetMatrixFromEigenDecomposition(const UBMatrix &eigenvalues, const UBMatrix &eigenvectorMatrix);
/**
* @brief Reconstruct a real symmetric matrix from a partial set of it's eigenvectors and eigenvalues.
*
* @param eigenvalues the input column vectors of L eigenvalues
* @param eigenvectorMatrix the input (N x L) matrix of eigenvectors, whose columns correspond the eigenvectors
*
* @return the (N x N) real symmetric matrix which has the supplied eigenvalues and eigenvectors
*/
static UBMatrix GetMatrixFromPartialEigenDecomposition(const UBMatrix &eigenvalues, const UBMatrix &eigenvectorMatrix);
/**
* @brief Check if the input matrix is square
*
* @param matrix the input matrix
*
* @return boolean, true if square
*/
static bool IsSquare(const UBMatrix &matrix);
/**
* @brief Check if the input matrix is symmetric
*
* @param matrix the input matrix
*
* @return boolean, true if symmetric
*/
static bool IsSymmetric(const UBMatrix &matrix);
private:
/**
* @brief Get the row and column of the off-diagonal element of the input matrix with the largest absolute value
*
* @param matrix the input matrix
*
* @return a pair (first = row index, second = column index), giving the index of the largest off-diagonal matrix element
*/
static std::pair<size_t, size_t> GetLargestOffDiagonalElement(const UBMatrix &matrix);
/**
* @brief Get the sum of the squares of the off-diagonal terms in the input matrix
*
* @param matrix the input matrix
*
* @return the off-diagonal squared sum
*/
static float GetOffDiagonalSquaredSum(const UBMatrix &matrix);
};
// -----------------------------------------------------------------------------------------------------------------------------------------
// -----------------------------------------------------------------------------------------------------------------------------------------
inline UBMatrix UBMatrixHelper::GetZeroMatrix(const size_t nRows, const size_t nCols)
{
// Get a vector of zeros for the elements of the matrix
std::vector<float> elements(nRows*nCols, 0.f);
// Construct the matrix
return UBMatrix(elements, nRows, nCols);
}
// -----------------------------------------------------------------------------------------------------------------------------------------
inline UBMatrix UBMatrixHelper::GetUnitMatrix(const size_t size)
{
// Get a zero matrix
auto matrix = UBMatrixHelper::GetZeroMatrix(size, size);
// Set the diagonals to be one
for (size_t i = 0; i < size; ++i)
matrix.SetElement(i, i, 1.f);
return matrix;
}
// -----------------------------------------------------------------------------------------------------------------------------------------
inline UBMatrix UBMatrixHelper::GetDiagonalMatrix(const UBMatrix &columnVector)
{
// Check that the input matrix is really a column vector
if (columnVector.GetColumns() != 1u)
throw std::invalid_argument("UBMatrixHelper::GetDiagonalMatrix - Input matrix has more than one column, it should be a column vector");
// Get the size of the output matrix
const auto size = columnVector.GetRows();
// Get a zero matrix
auto matrix = UBMatrixHelper::GetZeroMatrix(size, size);
// Set the diagonals
for (size_t i = 0; i < size; ++i)
matrix.SetElement(i, i, columnVector.At(i, 0));
return matrix;
}
// -----------------------------------------------------------------------------------------------------------------------------------------
inline UBMatrix UBMatrixHelper::GetGivensRotationMatrix(const size_t size, const size_t rowIndex, const size_t columnIndex, const float angle)
{
// Check that the supplied row & column indices are not out of bounds
if (rowIndex >= size)
throw std::out_of_range("UBMatrixHelper::GetGivensRotationMatrix - supplied row index is larger than the supplied matrix size");
if (columnIndex >= size)
throw std::out_of_range("UBMatrixHelper::GetGivensRotationMatrix - supplied column index is larger than the supplied matrix size");
if (rowIndex == columnIndex)
throw std::invalid_argument("UBMatrixHelper::GetGivensRotationMatrix - supplied row and column indices are equal");
// Get a unit matrix
auto matrix = UBMatrixHelper::GetUnitMatrix(size);
// Set the diagonals in the specified plane to be the cosine of the angle
const auto cosTheta = std::cos(angle);
matrix.SetElement(rowIndex, rowIndex, cosTheta);
matrix.SetElement(columnIndex, columnIndex, cosTheta);
// Set the off-diagonals in the specified plane to plus and minus the sine of the angle
const auto sinTheta = std::sin(angle);
matrix.SetElement(rowIndex, columnIndex, -sinTheta);
matrix.SetElement(columnIndex, rowIndex, +sinTheta);
return matrix;
}
// -----------------------------------------------------------------------------------------------------------------------------------------
inline std::pair<UBMatrix, UBMatrix> UBMatrixHelper::GetEigenDecomposition(const UBMatrix &matrix, const float precision)
{
// Here we implement the Jacobi eigenvalue algorithm. A summary of this algorithm is described below.
//
// The intention of this algorithm perfom eigenvalue decomposition, i.e. to express the input matrix, M in diagonal form: M = Q D Q^-1
// - D = diagonal matrix whose elements are the eigenvalues of M
// - Q = matrix whose columns are the eigenvectors of M
// - Q^-1 = the inverse matrix of Q
//
// If M is a real symmetric matrix (which we insist to be true), then the matrix of eigenvectors, Q, is an orthogonal matrix: Q^-1 = Q^T
// - Q^T = the transform matrix of Q (switching rows for columns)
//
// The Jacobi eigenvalue algorithm is iterative. Each iteration (labelled with n), results in a new matrix D_n. This matrix is formed by
// applying a rotation matrix, R_n, to the outcome of the previous iteration: D_n = R_n D_n-1 R_n^T
// - D_n = the outcome of the nth iteration of the algorithm
// - D_n-1 = the outcome of the (n-1)th iteration of the algorithm
// - R_n = the rotation matrix used in the nth iteration
// - R_n^T = the transpose of the rotation matrix, R_n
// - D_0 = M, the algorithm is seeded with the input matrix
//
// Note that rotation matrices are orthogonal, and the matrix product of two orthogonal matrices is itself orthogonal. Hence, the matrix
// product: (R_1 R_2 R_3 ... R_N) is an orthogonal matrix. The aim of the algorithm is to apply successive rotations such that D_n
// converges toward a diagonal matrix. After N iterations, if the sequence has converged, then we can make the approximation:
// - D = D_N
// - Q^T = R_N ... R_3 R_2 R_1
//
// In particular, R_n is chosen to be a Givens rotation matrix (i.e. a rotation in a plane defined by two coordinate axes). The plane
// chosen to correspond to the off-diagonal element of D_n-1 with the largest absolute value (known as the pivot). The angle of rotation
// is chosen such that this pivot element becomes zero after the rotation. The angle is given by:
//
// tan(2 theta) = 2 [ij] / ([jj] - [ii])
// - theta = angle of rotation
// - [ij] = the value of D_n-1 in the ith row and jth column. i and j are the row and column of the pivot.
//
// To determine if D_n has converged to a diagonal matrix, we consider the sum of the squares of its off-diagonal entries, X_n^2. If the
// matrix has size s, then there are s(s-1) off-diagonal elements. We define: S = s(s-1)/2, where series of S rotations is known as a
// "sweep". One can show that after S rotations, we have:
//
// X_{n+S} <= r X_n
// - X_n = square-root of the sum of the squares of the off-diagonal elements of D_n
// - X_{n+S} = the above, after a further S iterations of the algorithm
// - r = (1 - 1/S)^(S/2) = the worst-case-scenario linear convergence rate after one sweep
// - For large matrices, r tends to e^(1/2), which is approximately 0.61
// - S = half the number of the number of off-diagonal elements
// Insist that the input matrix is symmetric
if (!UBMatrixHelper::IsSymmetric(matrix))
throw std::invalid_argument("UBMatrixHelper::GetEigenDecomposition - Input matrix isn't symmetric");
// Get the size of the matrix (it is square)
const auto size = matrix.GetRows();
// If the size is one (i.e. the matrix is just a scalar, then the answer is trivial)
if (size == 1)
{
return std::pair<UBMatrix, UBMatrix> (
{{ matrix.At(0, 0) }, 1, 1}, // Eigenvalue is the entry of the matrix
{{ 1.f }, 1, 1} // Normalised eigenvector is just the number 1.0
);
}
// Determine the maximum number of iterations we should use - start by assuming one
size_t maxIterations = 1;
float squaredSumTarget = -std::numeric_limits<float>::max(); // The target square of sum of diagonals to reach the desired precision
// ATTN for size 1, the answer is trivial. For size 2, only one iteration is required to get the answer precicely
if (size > 2)
{
// Get the sweep length
const size_t sweep = size * (size - 1) / 2;
// Get the initial squared sum of the off-diagonals
const auto squaredSumInitial = UBMatrixHelper::GetOffDiagonalSquaredSum(matrix);
// Get the off-diagonal squared sum we would expect at the target precision
squaredSumTarget = 2 * sweep * std::pow(precision, 2.f);
// ATTN if we are already within the desired precision, then just run one iteration
if (squaredSumInitial > squaredSumTarget && squaredSumInitial > std::numeric_limits<float>::epsilon())
{
// Get the worst-case-scenario convergence rate for a single sweep
const auto convergenceRate = std::pow((1.f - (1.f / static_cast<float>(sweep))), static_cast<float>(sweep) / 2.f);
// Work out how many iterations we would need to reach the desired precision in the worst case scenario
maxIterations = std::ceil(sweep * std::log(squaredSumTarget/squaredSumInitial) / std::log(convergenceRate));
}
}
// Make a copy of the input matrix which will converge to be diagonal
auto diagMatrix = matrix;
// Make a matrix to hold the total transformation we applied to the matrix to make it diagonal (start with a unit matrix)
auto transformationMatrix = UBMatrixHelper::GetUnitMatrix(size);
// Repeat for each iteration of the algorithm
for (size_t i = 0; i < maxIterations; ++i)
{
// Identify the pivot
const auto &[pivotRow, pivotCol] = UBMatrixHelper::GetLargestOffDiagonalElement(diagMatrix);
// Get the angle through which we should rotate
const auto valueRowCol = diagMatrix.At(pivotRow, pivotCol);
const auto valueRowRow = diagMatrix.At(pivotRow, pivotRow);
const auto valueColCol = diagMatrix.At(pivotCol, pivotCol);
const auto theta = std::atan2(2 * valueRowCol, valueColCol - valueRowRow) / 2;
// Get the rotation matrix and it's transpose
const auto rotationMatrix = UBMatrixHelper::GetGivensRotationMatrix(size, pivotRow, pivotCol, theta);
const auto rotationMatrixTranspose = rotationMatrix.GetTranspose();
// Apply the rotation
diagMatrix = rotationMatrix * diagMatrix * rotationMatrixTranspose;
transformationMatrix = rotationMatrix * transformationMatrix;
// Get the squared sum of the off-diagonals after this iteration
const auto squaredSum = UBMatrixHelper::GetOffDiagonalSquaredSum(diagMatrix);
// If we have reached the desired precision, then stop
if (squaredSum < squaredSumTarget)
break;
}
// ATTN the Jacobi eigenvalue algorithm is now complete!
// - The diagonals of diagMatrix are the eigenvalues
// - The transpose of transformationMatrix is the matrix whose columns are the eigenvectors
//
// Below we put this information into a reproducible form, i.e. we sort the eigenvalues by size, and normalize the eigenvectors
// Extract the eigenvalues from the diagonalised matrix along with their indices
std::vector< std::pair<size_t, float> > eigenvalueIndexPairs;
for (size_t i = 0; i < size; ++i)
eigenvalueIndexPairs.emplace_back(i, diagMatrix.At(i, i));
// Sort the eigenvalues by their magnitude (largest first)
std::sort(eigenvalueIndexPairs.begin(), eigenvalueIndexPairs.end(), [] (const auto &a, const auto &b) { return a.second > b.second; });
// Get the elements of the matrix whose rows are the eigenvectors of the input matrix - sorted by eigenvalue and normalised to unity
std::vector<float> eigenvectorMatrixElements;
std::vector<float> eigenvalues;
for (const auto &[iRow, eigenvalue] : eigenvalueIndexPairs)
{
// ATTN we have a choice of the sign of the normalisation
// Here we choose the sign such that the largest element of the eigenvector is positive
float maxValue = -std::numeric_limits<float>::max();
int sign = 1;
float norm2 = 0.f;
for (size_t iCol = 0; iCol < size; ++iCol)
{
// Sum the squares of the elements of the eigenvector
const auto value = transformationMatrix.At(iRow, iCol);
norm2 += std::pow(value, 2.f);
// Get the sign of the largest element of the eigenvector
if (std::abs(value) >= maxValue)
{
maxValue = std::abs(value);
sign = (value > 0.f) ? 1 : -1;
}
}
if (norm2 <= std::numeric_limits<float>::epsilon())
throw std::logic_error("UBMatrixHelper::GetEigenDecomposition - Found an eigenvector with invalid norm");
const auto norm = std::pow(norm2, 0.5f) * sign;
// Normalise the elements of the eigenvector and store them
for (size_t iCol = 0; iCol < size; ++iCol)
eigenvectorMatrixElements.push_back(transformationMatrix.At(iRow, iCol) / norm);
// Store the current eigenvalue
eigenvalues.push_back(eigenvalue);
}
// Return the output matrices as a pair
return std::pair<UBMatrix, UBMatrix> (
{ eigenvalues, size, 1 },
UBMatrix(eigenvectorMatrixElements, size, size).GetTranspose()
);
}
// -----------------------------------------------------------------------------------------------------------------------------------------
inline UBMatrix UBMatrixHelper::GetMatrixFromEigenDecomposition(const UBMatrix &eigenvalues, const UBMatrix &eigenvectorMatrix)
{
// Check that the input matrices have consistent dimensions
if (eigenvalues.GetColumns() != 1u)
throw std::invalid_argument("UBMatrixHelper::GetMatrixFromEigenDecomposition - input eigenvalues is not a column vector");
const auto size = eigenvalues.GetRows();
if (eigenvectorMatrix.GetRows() != size)
throw std::invalid_argument("UBMatrixHelper::GetMatrixFromEigenDecomposition - the number of rows of the input eigenvector matrix, doesn't match the number of supplied eigenvalues");
if (eigenvectorMatrix.GetColumns() != size)
throw std::invalid_argument("UBMatrixHelper::GetMatrixFromEigenDecomposition - the number of columns of the input eigenvector matrix, doesn't match the number of supplied eigenvalues");
return UBMatrixHelper::GetMatrixFromPartialEigenDecomposition(eigenvalues, eigenvectorMatrix);
}
// -----------------------------------------------------------------------------------------------------------------------------------------
inline UBMatrix UBMatrixHelper::GetMatrixFromPartialEigenDecomposition(const UBMatrix &eigenvalues, const UBMatrix &eigenvectorMatrix)
{
// Check that the input matrices have consistent dimensions
if (eigenvalues.GetColumns() != 1u)
throw std::invalid_argument("UBMatrixHelper::GetMatrixFromPartialEigenDecomposition - input eigenvalues is not a column vector");
const auto nEigenvalues = eigenvalues.GetRows();
if (eigenvectorMatrix.GetColumns() != nEigenvalues)
throw std::invalid_argument("UBMatrixHelper::GetMatrixFromPartialEigenDecomposition - the number of columns of the input eigenvector matrix, doesn't match the number of supplied eigenvalues");
const auto size = eigenvectorMatrix.GetRows();
if (size < nEigenvalues)
throw std::invalid_argument("UBMatrixHelper::GetMatrixFromPartialEigenDecomposition - the number of rows of the input eigenvector matrix is smaller than the number of columns");
// Get the diagonal matrix with eigenvalues along the diagonal
const auto eigenvalueMatrix = ubsmear::UBMatrixHelper::GetDiagonalMatrix(eigenvalues);
// Perform the transformation
// ATTN here we are assuming that the output matrix is real and symmetric, and hence the inverse of the matrix of eigenvectors is given
// by it's transpose. This is not true for a general matrix.
return (eigenvectorMatrix * eigenvalueMatrix * eigenvectorMatrix.GetTranspose());
}
// -----------------------------------------------------------------------------------------------------------------------------------------
inline std::pair<size_t, size_t> UBMatrixHelper::GetLargestOffDiagonalElement(const UBMatrix &matrix)
{
float maxElementValue = -std::numeric_limits<float>::max();
std::pair<size_t, size_t> maxElementIndex;
bool foundElement = false;
for (size_t iRow = 0; iRow < matrix.GetRows(); ++iRow)
{
for (size_t iCol = 0; iCol < matrix.GetColumns(); ++iCol)
{
// Skip diagonal entries
if (iRow == iCol)
continue;
// Skip any values that aren't bigger then the current largest value found
const auto value = std::abs(matrix.At(iRow, iCol));
if (value < maxElementValue)
continue;
// Store this element
foundElement = true;
maxElementValue = value;
maxElementIndex.first = iRow;
maxElementIndex.second = iCol;
}
}
if (!foundElement)
throw std::logic_error("UBMatrixHelper::GetLargestOffDiagonalElement - Largest off-diagonal element doesn't exist");
return maxElementIndex;
}
// -----------------------------------------------------------------------------------------------------------------------------------------
inline float UBMatrixHelper::GetOffDiagonalSquaredSum(const UBMatrix &matrix)
{
float sum = 0.f;
for (size_t iRow = 0; iRow < matrix.GetRows(); ++iRow)
{
for (size_t iCol = 0; iCol < matrix.GetColumns(); ++iCol)
{
// Skip diagonal entries
if (iRow == iCol)
continue;
sum += std::pow(matrix.At(iRow, iCol), 2.f);
}
}
return sum;
}
// -----------------------------------------------------------------------------------------------------------------------------------------
inline bool UBMatrixHelper::IsSquare(const UBMatrix &matrix)
{
return (matrix.GetRows() == matrix.GetColumns());
}
// -----------------------------------------------------------------------------------------------------------------------------------------
inline bool UBMatrixHelper::IsSymmetric(const UBMatrix &matrix)
{
// A symmetric matrix is also square
if (!UBMatrixHelper::IsSquare(matrix))
return false;
for (size_t iRow = 0; iRow < matrix.GetRows(); ++iRow)
{
for (size_t iCol = 0; iCol < iRow; ++iCol)
{
// Check if the matrix elements are identical under the exchange of rows and columns
const auto diff = std::abs(matrix.At(iRow, iCol) - matrix.At(iCol, iRow));
if (diff >= std::numeric_limits<float>::epsilon())
return false;
}
}
return true;
}
} // namespace ubsmear
#endif
| 45.088292 | 200 | 0.623473 | [
"vector",
"transform"
] |
df7100d94caa658f00406bdc91957d88e37ba6c0 | 1,833 | h | C | src/core/rnn/regress/Reshaper_v1.h | arjun-k-r/Sibyl | ae2edbb09f58e8b1cef79470e8ca9c02c244fdcb | [
"Apache-2.0"
] | 34 | 2017-03-01T05:49:17.000Z | 2022-01-01T15:30:06.000Z | src/core/rnn/regress/Reshaper_v1.h | arjun-k-r/Sibyl | ae2edbb09f58e8b1cef79470e8ca9c02c244fdcb | [
"Apache-2.0"
] | 1 | 2018-12-19T17:02:52.000Z | 2018-12-19T17:02:52.000Z | src/core/rnn/regress/Reshaper_v1.h | junosan/Sibyl | ae2edbb09f58e8b1cef79470e8ca9c02c244fdcb | [
"Apache-2.0"
] | 24 | 2017-09-19T01:51:50.000Z | 2022-02-04T19:53:16.000Z | /*
Copyright 2017 Hosang Yoon
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef SIBYL_RESHAPER_V1_H_
#define SIBYL_RESHAPER_V1_H_
#include "../Reshaper.h"
#include <array>
#include <vector>
#include <map>
namespace sibyl
{
class Reshaper_v1 : public Reshaper
{
public:
Reshaper_v1(unsigned long maxGTck_, // this will be ignored and overwritten
TradeDataSet *pTradeDataSet_,
std::vector<std::string> *pFileList_,
const unsigned long (TradeDataSet::* ReadRawFile_)(std::vector<FLOAT>&, CSTR&));
void ReadConfig(CSTR &filename) override;
/* raw -> fractal */
/* sibyl -> fractal */
void State2VecIn(FLOAT *vec, const ItemState &state) override;
/* ref -> fractal */
void Reward2VecOut(FLOAT *vec, const Reward &reward, CSTR &code) override;
/* fractal -> sibyl */
void VecOut2Reward(Reward &reward, const FLOAT *vec, CSTR &code) override;
private:
double b_th, s_th;
struct ItemMem {
FLOAT initPr;
std::array<PQ, idx::szTb> lastTb;
std::vector<double> idleG;
std::size_t cursor; // points at idx for current time in idleG
ItemMem() : initPr(0.0f), lastTb{}, cursor(0) {}
};
std::map<STR, ItemMem> items;
};
}
#endif /* SIBYL_RESHAPER_V1_H_ */ | 28.640625 | 96 | 0.666121 | [
"vector"
] |
df7257a63a1ab1e6b6167ad77b9b9c3bc85b23fc | 3,238 | h | C | Samples/2.0/Tutorials/Tutorial_Terrain/include/Terra/Hlms/PbsListener/OgreHlmsPbsTerraShadows.h | Stazer/ogre-next | 4d3aa2aedf4575ac8a9e32a4e2af859562752d51 | [
"MIT"
] | 25 | 2020-04-15T16:59:45.000Z | 2022-02-09T00:07:34.000Z | Samples/2.0/Tutorials/Tutorial_Terrain/include/Terra/Hlms/PbsListener/OgreHlmsPbsTerraShadows.h | Stazer/ogre-next | 4d3aa2aedf4575ac8a9e32a4e2af859562752d51 | [
"MIT"
] | 510 | 2020-04-20T23:26:31.000Z | 2022-03-31T14:33:36.000Z | Samples/2.0/Tutorials/Tutorial_Terrain/include/Terra/Hlms/PbsListener/OgreHlmsPbsTerraShadows.h | Stazer/ogre-next | 4d3aa2aedf4575ac8a9e32a4e2af859562752d51 | [
"MIT"
] | 30 | 2020-05-22T17:41:38.000Z | 2022-02-28T17:07:19.000Z | /*
-----------------------------------------------------------------------------
This source file is part of OGRE
(Object-oriented Graphics Rendering Engine)
For the latest info, see http://www.ogre3d.org/
Copyright (c) 2000-2014 Torus Knot Software Ltd
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-----------------------------------------------------------------------------
*/
#ifndef _OgreHlmsPbsTerraShadows_
#define _OgreHlmsPbsTerraShadows_
#include "OgreGpuProgram.h"
#include "OgreHlmsListener.h"
namespace Ogre
{
class Terra;
class HlmsPbsTerraShadows : public HlmsListener
{
protected:
Terra *mTerra;
HlmsSamplerblock const *mTerraSamplerblock;
#if OGRE_DEBUG_MODE
SceneManager *mSceneManager;
#endif
public:
HlmsPbsTerraShadows();
~HlmsPbsTerraShadows();
void setTerra( Terra *terra );
virtual void shaderCacheEntryCreated( const String &shaderProfile,
const HlmsCache *hlmsCacheEntry,
const HlmsCache &passCache,
const HlmsPropertyVec &properties,
const QueuedRenderable &queuedRenderable );
virtual void preparePassHash( const CompositorShadowNode *shadowNode,
bool casterPass, bool dualParaboloid,
SceneManager *sceneManager, Hlms *hlms );
virtual uint32 getPassBufferSize( const CompositorShadowNode *shadowNode, bool casterPass,
bool dualParaboloid, SceneManager *sceneManager ) const;
virtual float* preparePassBuffer( const CompositorShadowNode *shadowNode, bool casterPass,
bool dualParaboloid, SceneManager *sceneManager,
float *passBufferPtr );
virtual void hlmsTypeChanged( bool casterPass, CommandBuffer *commandBuffer,
const HlmsDatablock *datablock );
};
struct PbsTerraProperty
{
static const IdString TerraEnabled;
};
}
#endif
| 39.487805 | 98 | 0.624768 | [
"object"
] |
df7c097d0097f84d5d98feab6d65eac24dfbda3c | 188,194 | h | C | Translator_file/deal.II/matrix_free/evaluation_kernels.h | jiaqiwang969/deal.ii-course-practice | 0da5ad1537d8152549d8a0e4de5872efe7619c8a | [
"MIT"
] | null | null | null | Translator_file/deal.II/matrix_free/evaluation_kernels.h | jiaqiwang969/deal.ii-course-practice | 0da5ad1537d8152549d8a0e4de5872efe7619c8a | [
"MIT"
] | null | null | null | Translator_file/deal.II/matrix_free/evaluation_kernels.h | jiaqiwang969/deal.ii-course-practice | 0da5ad1537d8152549d8a0e4de5872efe7619c8a | [
"MIT"
] | null | null | null | //include/deal.II-translator/matrix_free/evaluation_kernels_0.txt
// ---------------------------------------------------------------------
//
// Copyright (C) 2017 - 2021 by the deal.II authors
//
// This file is part of the deal.II library.
//
// The deal.II library is free software; you can use it, redistribute
// it, and/or modify it under the terms of the GNU Lesser General
// Public License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
// The full text of the license can be found in the file LICENSE.md at
// the top level directory of deal.II.
//
// ---------------------------------------------------------------------
#ifndef dealii_matrix_free_evaluation_kernels_h
#define dealii_matrix_free_evaluation_kernels_h
#include <deal.II/base/config.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/vectorization.h>
#include <deal.II/matrix_free/dof_info.h>
#include <deal.II/matrix_free/evaluation_flags.h>
#include <deal.II/matrix_free/shape_info.h>
#include <deal.II/matrix_free/tensor_product_kernels.h>
#include <deal.II/matrix_free/type_traits.h>
DEAL_II_NAMESPACE_OPEN
// forward declaration
template <int, typename, bool, typename>
class FEEvaluationBaseData;
namespace internal
{
// Select evaluator type from element shape function type
template <MatrixFreeFunctions::ElementType element, bool is_long>
struct EvaluatorSelector
{};
template <bool is_long>
struct EvaluatorSelector<MatrixFreeFunctions::tensor_general, is_long>
{
static const EvaluatorVariant variant = evaluate_general;
};
template <>
struct EvaluatorSelector<MatrixFreeFunctions::tensor_symmetric, false>
{
static const EvaluatorVariant variant = evaluate_symmetric;
};
template <>
struct EvaluatorSelector<MatrixFreeFunctions::tensor_symmetric, true>
{
static const EvaluatorVariant variant = evaluate_evenodd;
};
template <bool is_long>
struct EvaluatorSelector<MatrixFreeFunctions::truncated_tensor, is_long>
{
static const EvaluatorVariant variant = evaluate_general;
};
template <>
struct EvaluatorSelector<MatrixFreeFunctions::tensor_symmetric_plus_dg0,
false>
{
static const EvaluatorVariant variant = evaluate_general;
};
template <>
struct EvaluatorSelector<MatrixFreeFunctions::tensor_symmetric_plus_dg0, true>
{
static const EvaluatorVariant variant = evaluate_evenodd;
};
template <bool is_long>
struct EvaluatorSelector<MatrixFreeFunctions::tensor_symmetric_collocation,
is_long>
{
static const EvaluatorVariant variant = evaluate_evenodd;
};
/**
* 该结构用于对张量有限元的函数值、梯度和Hessians进行评估。该操作用于对称和非对称情况,在各个坐标方向使用不同的应用函数'值'、'梯度'。值的应用函数是通过EvaluatorTensorProduct模板类之一提供的,而这些模板类又是从
* MatrixFreeFunctions::ElementType 模板参数中选择的。
* 有两个专门的实现类FEEvaluationImplCollocation(用于Gauss-Lobatto元素,其中结点和正交点重合,'值'的操作是identity)和FEEvaluationImplTransformToCollocation(可以转换到collocation空间,然后可以在这些空间使用identity),它们都可以使代码缩短。
*
*/
template <MatrixFreeFunctions::ElementType type,
int dim,
int fe_degree,
int n_q_points_1d,
typename Number>
struct FEEvaluationImpl
{
static void
evaluate(const unsigned int n_components,
const EvaluationFlags::EvaluationFlags evaluation_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
const Number * values_dofs_actual,
Number * values_quad,
Number * gradients_quad,
Number * hessians_quad,
Number * scratch_data);
static void
integrate(const unsigned int n_components,
const EvaluationFlags::EvaluationFlags integration_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
Number * values_dofs_actual,
Number * values_quad,
Number * gradients_quad,
Number * scratch_data,
const bool add_into_values_array);
};
/**
* 对 MatrixFreeFunctions::tensor_none,
* 的特化,它不能使用和-因子化的内核。
*
*/
template <int dim, int fe_degree, int n_q_points_1d, typename Number>
struct FEEvaluationImpl<MatrixFreeFunctions::tensor_none,
dim,
fe_degree,
n_q_points_1d,
Number>
{
static void
evaluate(const unsigned int n_components,
const EvaluationFlags::EvaluationFlags evaluation_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
const Number * values_dofs_actual,
Number * values_quad,
Number * gradients_quad,
Number * hessians_quad,
Number * scratch_data);
static void
integrate(const unsigned int n_components,
const EvaluationFlags::EvaluationFlags integration_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
Number * values_dofs_actual,
Number * values_quad,
Number * gradients_quad,
Number * scratch_data,
const bool add_into_values_array);
};
template <MatrixFreeFunctions::ElementType type,
int dim,
int fe_degree,
int n_q_points_1d,
typename Number>
inline void
FEEvaluationImpl<type, dim, fe_degree, n_q_points_1d, Number>::evaluate(
const unsigned int n_components,
const EvaluationFlags::EvaluationFlags evaluation_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
const Number * values_dofs_actual,
Number * values_quad,
Number * gradients_quad,
Number * hessians_quad,
Number * scratch_data)
{
if (evaluation_flag == EvaluationFlags::nothing)
return;
const EvaluatorVariant variant =
EvaluatorSelector<type, (fe_degree + n_q_points_1d > 4)>::variant;
using Eval = EvaluatorTensorProduct<variant,
dim,
fe_degree + 1,
n_q_points_1d,
Number>;
Eval eval(variant == evaluate_evenodd ?
shape_info.data.front().shape_values_eo :
shape_info.data.front().shape_values,
variant == evaluate_evenodd ?
shape_info.data.front().shape_gradients_eo :
shape_info.data.front().shape_gradients,
variant == evaluate_evenodd ?
shape_info.data.front().shape_hessians_eo :
shape_info.data.front().shape_hessians,
shape_info.data.front().fe_degree + 1,
shape_info.data.front().n_q_points_1d);
const unsigned int temp_size =
Eval::n_rows_of_product == numbers::invalid_unsigned_int ?
0 :
(Eval::n_rows_of_product > Eval::n_columns_of_product ?
Eval::n_rows_of_product :
Eval::n_columns_of_product);
Number *temp1 = scratch_data;
Number *temp2;
if (temp_size == 0)
{
temp2 = temp1 + std::max(Utilities::fixed_power<dim>(
shape_info.data.front().fe_degree + 1),
Utilities::fixed_power<dim>(
shape_info.data.front().n_q_points_1d));
}
else
{
temp2 = temp1 + temp_size;
}
const unsigned int n_q_points =
temp_size == 0 ? shape_info.n_q_points : Eval::n_columns_of_product;
const unsigned int dofs_per_comp =
(type == MatrixFreeFunctions::truncated_tensor) ?
Utilities::fixed_power<dim>(shape_info.data.front().fe_degree + 1) :
shape_info.dofs_per_component_on_cell;
const Number *values_dofs = values_dofs_actual;
if (type == MatrixFreeFunctions::truncated_tensor)
{
Number *values_dofs_tmp =
scratch_data + 2 * (std::max(shape_info.dofs_per_component_on_cell,
shape_info.n_q_points));
const int degree =
fe_degree != -1 ? fe_degree : shape_info.data.front().fe_degree;
for (unsigned int c = 0; c < n_components; ++c)
for (int i = 0, count_p = 0, count_q = 0;
i < (dim > 2 ? degree + 1 : 1);
++i)
{
for (int j = 0; j < (dim > 1 ? degree + 1 - i : 1); ++j)
{
for (int k = 0; k < degree + 1 - j - i;
++k, ++count_p, ++count_q)
values_dofs_tmp[c * dofs_per_comp + count_q] =
values_dofs_actual
[c * shape_info.dofs_per_component_on_cell + count_p];
for (int k = degree + 1 - j - i; k < degree + 1;
++k, ++count_q)
values_dofs_tmp[c * dofs_per_comp + count_q] = Number();
}
for (int j = degree + 1 - i; j < degree + 1; ++j)
for (int k = 0; k < degree + 1; ++k, ++count_q)
values_dofs_tmp[c * dofs_per_comp + count_q] = Number();
}
values_dofs = values_dofs_tmp;
}
switch (dim)
{
case 1:
for (unsigned int c = 0; c < n_components; c++)
{
if (evaluation_flag & EvaluationFlags::values)
eval.template values<0, true, false>(values_dofs, values_quad);
if (evaluation_flag & EvaluationFlags::gradients)
eval.template gradients<0, true, false>(values_dofs,
gradients_quad);
if (evaluation_flag & EvaluationFlags::hessians)
eval.template hessians<0, true, false>(values_dofs,
hessians_quad);
// advance the next component in 1D array
values_dofs += dofs_per_comp;
values_quad += n_q_points;
gradients_quad += n_q_points;
hessians_quad += n_q_points;
}
break;
case 2:
for (unsigned int c = 0; c < n_components; c++)
{
// grad x
if (evaluation_flag & EvaluationFlags::gradients)
{
eval.template gradients<0, true, false>(values_dofs, temp1);
eval.template values<1, true, false>(temp1, gradients_quad);
}
if (evaluation_flag & EvaluationFlags::hessians)
{
// grad xy
if (!(evaluation_flag & EvaluationFlags::gradients))
eval.template gradients<0, true, false>(values_dofs, temp1);
eval.template gradients<1, true, false>(temp1,
hessians_quad +
2 * n_q_points);
// grad xx
eval.template hessians<0, true, false>(values_dofs, temp1);
eval.template values<1, true, false>(temp1, hessians_quad);
}
// grad y
eval.template values<0, true, false>(values_dofs, temp1);
if (evaluation_flag & EvaluationFlags::gradients)
eval.template gradients<1, true, false>(temp1,
gradients_quad +
n_q_points);
// grad yy
if (evaluation_flag & EvaluationFlags::hessians)
eval.template hessians<1, true, false>(temp1,
hessians_quad +
n_q_points);
// val: can use values applied in x
if (evaluation_flag & EvaluationFlags::values)
eval.template values<1, true, false>(temp1, values_quad);
// advance to the next component in 1D array
values_dofs += dofs_per_comp;
values_quad += n_q_points;
gradients_quad += 2 * n_q_points;
hessians_quad += 3 * n_q_points;
}
break;
case 3:
for (unsigned int c = 0; c < n_components; c++)
{
if (evaluation_flag & EvaluationFlags::gradients)
{
// grad x
eval.template gradients<0, true, false>(values_dofs, temp1);
eval.template values<1, true, false>(temp1, temp2);
eval.template values<2, true, false>(temp2, gradients_quad);
}
if (evaluation_flag & EvaluationFlags::hessians)
{
// grad xz
if (!(evaluation_flag & EvaluationFlags::gradients))
{
eval.template gradients<0, true, false>(values_dofs,
temp1);
eval.template values<1, true, false>(temp1, temp2);
}
eval.template gradients<2, true, false>(temp2,
hessians_quad +
4 * n_q_points);
// grad xy
eval.template gradients<1, true, false>(temp1, temp2);
eval.template values<2, true, false>(temp2,
hessians_quad +
3 * n_q_points);
// grad xx
eval.template hessians<0, true, false>(values_dofs, temp1);
eval.template values<1, true, false>(temp1, temp2);
eval.template values<2, true, false>(temp2, hessians_quad);
}
// grad y
eval.template values<0, true, false>(values_dofs, temp1);
if (evaluation_flag & EvaluationFlags::gradients)
{
eval.template gradients<1, true, false>(temp1, temp2);
eval.template values<2, true, false>(temp2,
gradients_quad +
n_q_points);
}
if (evaluation_flag & EvaluationFlags::hessians)
{
// grad yz
if (!(evaluation_flag & EvaluationFlags::gradients))
eval.template gradients<1, true, false>(temp1, temp2);
eval.template gradients<2, true, false>(temp2,
hessians_quad +
5 * n_q_points);
// grad yy
eval.template hessians<1, true, false>(temp1, temp2);
eval.template values<2, true, false>(temp2,
hessians_quad +
n_q_points);
}
// grad z: can use the values applied in x direction stored in
// temp1
eval.template values<1, true, false>(temp1, temp2);
if (evaluation_flag & EvaluationFlags::gradients)
eval.template gradients<2, true, false>(temp2,
gradients_quad +
2 * n_q_points);
// grad zz: can use the values applied in x and y direction stored
// in temp2
if (evaluation_flag & EvaluationFlags::hessians)
eval.template hessians<2, true, false>(temp2,
hessians_quad +
2 * n_q_points);
// val: can use the values applied in x & y direction stored in
// temp2
if (evaluation_flag & EvaluationFlags::values)
eval.template values<2, true, false>(temp2, values_quad);
// advance to the next component in 1D array
values_dofs += dofs_per_comp;
values_quad += n_q_points;
gradients_quad += 3 * n_q_points;
hessians_quad += 6 * n_q_points;
}
break;
default:
AssertThrow(false, ExcNotImplemented());
}
// case additional dof for FE_Q_DG0: add values; gradients and second
// derivatives evaluate to zero
if (type == MatrixFreeFunctions::tensor_symmetric_plus_dg0 &&
(evaluation_flag & EvaluationFlags::values))
{
values_quad -= n_components * n_q_points;
values_dofs -= n_components * dofs_per_comp;
for (unsigned int c = 0; c < n_components; ++c)
for (unsigned int q = 0; q < shape_info.n_q_points; ++q)
values_quad[c * shape_info.n_q_points + q] +=
values_dofs[(c + 1) * shape_info.dofs_per_component_on_cell - 1];
}
}
template <MatrixFreeFunctions::ElementType type,
int dim,
int fe_degree,
int n_q_points_1d,
typename Number>
inline void
FEEvaluationImpl<type, dim, fe_degree, n_q_points_1d, Number>::integrate(
const unsigned int n_components,
const EvaluationFlags::EvaluationFlags integration_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
Number * values_dofs_actual,
Number * values_quad,
Number * gradients_quad,
Number * scratch_data,
const bool add_into_values_array)
{
const EvaluatorVariant variant =
EvaluatorSelector<type, (fe_degree + n_q_points_1d > 4)>::variant;
using Eval = EvaluatorTensorProduct<variant,
dim,
fe_degree + 1,
n_q_points_1d,
Number>;
Eval eval(variant == evaluate_evenodd ?
shape_info.data.front().shape_values_eo :
shape_info.data.front().shape_values,
variant == evaluate_evenodd ?
shape_info.data.front().shape_gradients_eo :
shape_info.data.front().shape_gradients,
variant == evaluate_evenodd ?
shape_info.data.front().shape_hessians_eo :
shape_info.data.front().shape_hessians,
shape_info.data.front().fe_degree + 1,
shape_info.data.front().n_q_points_1d);
const unsigned int temp_size =
Eval::n_rows_of_product == numbers::invalid_unsigned_int ?
0 :
(Eval::n_rows_of_product > Eval::n_columns_of_product ?
Eval::n_rows_of_product :
Eval::n_columns_of_product);
Number *temp1 = scratch_data;
Number *temp2;
if (temp_size == 0)
{
temp2 = temp1 + std::max(Utilities::fixed_power<dim>(
shape_info.data.front().fe_degree + 1),
Utilities::fixed_power<dim>(
shape_info.data.front().n_q_points_1d));
}
else
{
temp2 = temp1 + temp_size;
}
const unsigned int n_q_points =
temp_size == 0 ? shape_info.n_q_points : Eval::n_columns_of_product;
const unsigned int dofs_per_comp =
(type == MatrixFreeFunctions::truncated_tensor) ?
Utilities::fixed_power<dim>(shape_info.data.front().fe_degree + 1) :
shape_info.dofs_per_component_on_cell;
// expand dof_values to tensor product for truncated tensor products
Number *values_dofs =
(type == MatrixFreeFunctions::truncated_tensor) ?
scratch_data + 2 * (std::max(shape_info.dofs_per_component_on_cell,
shape_info.n_q_points)) :
values_dofs_actual;
switch (dim)
{
case 1:
for (unsigned int c = 0; c < n_components; c++)
{
if (integration_flag & EvaluationFlags::values)
{
if (add_into_values_array == false)
eval.template values<0, false, false>(values_quad,
values_dofs);
else
eval.template values<0, false, true>(values_quad,
values_dofs);
}
if (integration_flag & EvaluationFlags::gradients)
{
if (integration_flag & EvaluationFlags::values ||
add_into_values_array == true)
eval.template gradients<0, false, true>(gradients_quad,
values_dofs);
else
eval.template gradients<0, false, false>(gradients_quad,
values_dofs);
}
// advance to the next component in 1D array
values_dofs += dofs_per_comp;
values_quad += n_q_points;
gradients_quad += n_q_points;
}
break;
case 2:
for (unsigned int c = 0; c < n_components; c++)
{
if ((integration_flag & EvaluationFlags::values) &&
!(integration_flag & EvaluationFlags::gradients))
{
eval.template values<1, false, false>(values_quad, temp1);
if (add_into_values_array == false)
eval.template values<0, false, false>(temp1, values_dofs);
else
eval.template values<0, false, true>(temp1, values_dofs);
}
if (integration_flag & EvaluationFlags::gradients)
{
eval.template gradients<1, false, false>(gradients_quad +
n_q_points,
temp1);
if (integration_flag & EvaluationFlags::values)
eval.template values<1, false, true>(values_quad, temp1);
if (add_into_values_array == false)
eval.template values<0, false, false>(temp1, values_dofs);
else
eval.template values<0, false, true>(temp1, values_dofs);
eval.template values<1, false, false>(gradients_quad, temp1);
eval.template gradients<0, false, true>(temp1, values_dofs);
}
// advance to the next component in 1D array
values_dofs += dofs_per_comp;
values_quad += n_q_points;
gradients_quad += 2 * n_q_points;
}
break;
case 3:
for (unsigned int c = 0; c < n_components; c++)
{
if ((integration_flag & EvaluationFlags::values) &&
!(integration_flag & EvaluationFlags::gradients))
{
eval.template values<2, false, false>(values_quad, temp1);
eval.template values<1, false, false>(temp1, temp2);
if (add_into_values_array == false)
eval.template values<0, false, false>(temp2, values_dofs);
else
eval.template values<0, false, true>(temp2, values_dofs);
}
if (integration_flag & EvaluationFlags::gradients)
{
eval.template gradients<2, false, false>(gradients_quad +
2 * n_q_points,
temp1);
if (integration_flag & EvaluationFlags::values)
eval.template values<2, false, true>(values_quad, temp1);
eval.template values<1, false, false>(temp1, temp2);
eval.template values<2, false, false>(gradients_quad +
n_q_points,
temp1);
eval.template gradients<1, false, true>(temp1, temp2);
if (add_into_values_array == false)
eval.template values<0, false, false>(temp2, values_dofs);
else
eval.template values<0, false, true>(temp2, values_dofs);
eval.template values<2, false, false>(gradients_quad, temp1);
eval.template values<1, false, false>(temp1, temp2);
eval.template gradients<0, false, true>(temp2, values_dofs);
}
// advance to the next component in 1D array
values_dofs += dofs_per_comp;
values_quad += n_q_points;
gradients_quad += 3 * n_q_points;
}
break;
default:
AssertThrow(false, ExcNotImplemented());
}
// case FE_Q_DG0: add values, gradients and second derivatives are zero
if (type == MatrixFreeFunctions::tensor_symmetric_plus_dg0)
{
values_dofs -= n_components * dofs_per_comp -
shape_info.dofs_per_component_on_cell + 1;
values_quad -= n_components * n_q_points;
if (integration_flag & EvaluationFlags::values)
for (unsigned int c = 0; c < n_components; ++c)
{
values_dofs[0] = values_quad[0];
for (unsigned int q = 1; q < shape_info.n_q_points; ++q)
values_dofs[0] += values_quad[q];
values_dofs += dofs_per_comp;
values_quad += n_q_points;
}
else
{
for (unsigned int c = 0; c < n_components; ++c)
values_dofs[c * shape_info.dofs_per_component_on_cell] = Number();
values_dofs += n_components * shape_info.dofs_per_component_on_cell;
}
}
if (type == MatrixFreeFunctions::truncated_tensor)
{
values_dofs -= dofs_per_comp * n_components;
const int degree =
fe_degree != -1 ? fe_degree : shape_info.data.front().fe_degree;
for (unsigned int c = 0; c < n_components; ++c)
for (int i = 0, count_p = 0, count_q = 0;
i < (dim > 2 ? degree + 1 : 1);
++i)
{
for (int j = 0; j < (dim > 1 ? degree + 1 - i : 1); ++j)
{
for (int k = 0; k < degree + 1 - j - i;
++k, ++count_p, ++count_q)
values_dofs_actual[c *
shape_info.dofs_per_component_on_cell +
count_p] =
values_dofs[c * dofs_per_comp + count_q];
count_q += j + i;
}
count_q += i * (degree + 1);
}
}
}
template <int dim, int fe_degree, int n_q_points_1d, typename Number>
inline void
FEEvaluationImpl<
MatrixFreeFunctions::tensor_none,
dim,
fe_degree,
n_q_points_1d,
Number>::evaluate(const unsigned int n_components,
const EvaluationFlags::EvaluationFlags evaluation_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
const Number *values_dofs_actual,
Number * values_quad,
Number * gradients_quad,
Number * hessians_quad,
Number * scratch_data)
{
(void)scratch_data;
const unsigned int n_dofs = shape_info.dofs_per_component_on_cell;
const unsigned int n_q_points = shape_info.n_q_points;
using Eval =
EvaluatorTensorProduct<evaluate_general, 1, 0, 0, Number, Number>;
if (evaluation_flag & EvaluationFlags::values)
{
const auto shape_values = shape_info.data.front().shape_values.data();
auto values_quad_ptr = values_quad;
auto values_dofs_actual_ptr = values_dofs_actual;
Eval eval(shape_values, nullptr, nullptr, n_dofs, n_q_points);
for (unsigned int c = 0; c < n_components; ++c)
{
eval.template values<0, true, false>(values_dofs_actual_ptr,
values_quad_ptr);
values_quad_ptr += n_q_points;
values_dofs_actual_ptr += n_dofs;
}
}
if (evaluation_flag & EvaluationFlags::gradients)
{
const auto shape_gradients =
shape_info.data.front().shape_gradients.data();
auto gradients_quad_ptr = gradients_quad;
auto values_dofs_actual_ptr = values_dofs_actual;
for (unsigned int c = 0; c < n_components; ++c)
{
for (unsigned int d = 0; d < dim; ++d)
{
Eval eval(nullptr,
shape_gradients + n_q_points * n_dofs * d,
nullptr,
n_dofs,
n_q_points);
eval.template gradients<0, true, false>(values_dofs_actual_ptr,
gradients_quad_ptr);
gradients_quad_ptr += n_q_points;
}
values_dofs_actual_ptr += n_dofs;
}
}
if (evaluation_flag & EvaluationFlags::hessians)
{
Assert(false, ExcNotImplemented());
(void)hessians_quad;
}
}
template <int dim, int fe_degree, int n_q_points_1d, typename Number>
inline void
FEEvaluationImpl<
MatrixFreeFunctions::tensor_none,
dim,
fe_degree,
n_q_points_1d,
Number>::integrate(const unsigned int n_components,
const EvaluationFlags::EvaluationFlags integration_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
Number * values_dofs_actual,
Number * values_quad,
Number * gradients_quad,
Number * scratch_data,
const bool add_into_values_array)
{
(void)scratch_data;
const unsigned int n_dofs = shape_info.dofs_per_component_on_cell;
const unsigned int n_q_points = shape_info.n_q_points;
using Eval =
EvaluatorTensorProduct<evaluate_general, 1, 0, 0, Number, Number>;
if (integration_flag & EvaluationFlags::values)
{
const auto shape_values = shape_info.data.front().shape_values.data();
auto values_quad_ptr = values_quad;
auto values_dofs_actual_ptr = values_dofs_actual;
Eval eval(shape_values, nullptr, nullptr, n_dofs, n_q_points);
for (unsigned int c = 0; c < n_components; ++c)
{
if (add_into_values_array == false)
eval.template values<0, false, false>(values_quad_ptr,
values_dofs_actual_ptr);
else
eval.template values<0, false, true>(values_quad_ptr,
values_dofs_actual_ptr);
values_quad_ptr += n_q_points;
values_dofs_actual_ptr += n_dofs;
}
}
if (integration_flag & EvaluationFlags::gradients)
{
const auto shape_gradients =
shape_info.data.front().shape_gradients.data();
auto gradients_quad_ptr = gradients_quad;
auto values_dofs_actual_ptr = values_dofs_actual;
for (unsigned int c = 0; c < n_components; ++c)
{
for (unsigned int d = 0; d < dim; ++d)
{
Eval eval(nullptr,
shape_gradients + n_q_points * n_dofs * d,
nullptr,
n_dofs,
n_q_points);
if ((add_into_values_array == false &&
(integration_flag & EvaluationFlags::values) == false) &&
d == 0)
eval.template gradients<0, false, false>(
gradients_quad_ptr, values_dofs_actual_ptr);
else
eval.template gradients<0, false, true>(
gradients_quad_ptr, values_dofs_actual_ptr);
gradients_quad_ptr += n_q_points;
}
values_dofs_actual_ptr += n_dofs;
}
}
}
/**
* 该结构实现了两个不同基数之间的变化。这是FEEvaluationImplTransformToCollocation类中的一个成分,我们首先转换到适当的基数,在那里我们可以通过collocation技术计算导数。
* 这个类允许独立于维度的操作应用,通过模板递归来实现。它已经被测试到了6维。
*
*/
template <EvaluatorVariant variant,
EvaluatorQuantity quantity,
int dim,
int basis_size_1,
int basis_size_2,
typename Number,
typename Number2>
struct FEEvaluationImplBasisChange
{
static_assert(basis_size_1 == 0 || basis_size_1 <= basis_size_2,
"The second dimension must not be smaller than the first");
/**
* 这应用了在系数数组的行上收缩的变换,沿着系数数组的列生成数值。
* @param n_components 矢量成分的数量。 @param
* transformation_matrix
* 作为矢量递进的系数矩阵,如果解释为矩阵,则使用
* @p basis_size_1 行和 @p basis_size_2 列。 @param values_in
* 大小为base_size_1^dim的输入数组。它可以与values_out同义
* @param values_out
* 大小为basis_size_2^dim的数组,用于存储转换的结果。它可以与value_in数组别名。
* @param basis_size_1_variable
* 如果模板参数basis_size_1为零,第一个基的大小可以作为运行时参数传入。如果模板参数为非零,出于效率的考虑,模板参数优先考虑。
* @param basis_size_2_variable
* 在模板参数basis_size_1为零的情况下,第二个基的大小可以作为运行时参数传入。
*
*/
#ifndef DEBUG
DEAL_II_ALWAYS_INLINE
#endif
static void
do_forward(
const unsigned int n_components,
const AlignedVector<Number2> &transformation_matrix,
const Number * values_in,
Number * values_out,
const unsigned int basis_size_1_variable = numbers::invalid_unsigned_int,
const unsigned int basis_size_2_variable = numbers::invalid_unsigned_int)
{
Assert(
basis_size_1 != 0 || basis_size_1_variable <= basis_size_2_variable,
ExcMessage("The second dimension must not be smaller than the first"));
Assert(quantity == EvaluatorQuantity::value, ExcInternalError());
// we do recursion until dim==1 or dim==2 and we have
// basis_size_1==basis_size_2. The latter optimization increases
// optimization possibilities for the compiler but does only work for
// aliased pointers if the sizes are equal.
constexpr int next_dim =
(dim > 2 ||
((basis_size_1 == 0 || basis_size_2 > basis_size_1) && dim > 1)) ?
dim - 1 :
dim;
EvaluatorTensorProduct<variant,
dim,
basis_size_1,
(basis_size_1 == 0 ? 0 : basis_size_2),
Number,
Number2>
eval_val(transformation_matrix,
AlignedVector<Number2>(),
AlignedVector<Number2>(),
basis_size_1_variable,
basis_size_2_variable);
const unsigned int np_1 =
basis_size_1 > 0 ? basis_size_1 : basis_size_1_variable;
const unsigned int np_2 =
basis_size_1 > 0 ? basis_size_2 : basis_size_2_variable;
Assert(np_1 > 0 && np_1 != numbers::invalid_unsigned_int,
ExcMessage("Cannot transform with 0-point basis"));
Assert(np_2 > 0 && np_2 != numbers::invalid_unsigned_int,
ExcMessage("Cannot transform with 0-point basis"));
// run loop backwards to ensure correctness if values_in aliases with
// values_out in case with basis_size_1 < basis_size_2
values_in = values_in + n_components * Utilities::fixed_power<dim>(np_1);
values_out =
values_out + n_components * Utilities::fixed_power<dim>(np_2);
for (unsigned int c = n_components; c != 0; --c)
{
values_in -= Utilities::fixed_power<dim>(np_1);
values_out -= Utilities::fixed_power<dim>(np_2);
if (next_dim < dim)
for (unsigned int q = np_1; q != 0; --q)
FEEvaluationImplBasisChange<
variant,
quantity,
next_dim,
basis_size_1,
basis_size_2,
Number,
Number2>::do_forward(1,
transformation_matrix,
values_in +
(q - 1) *
Utilities::fixed_power<next_dim>(np_1),
values_out +
(q - 1) *
Utilities::fixed_power<next_dim>(np_2),
basis_size_1_variable,
basis_size_2_variable);
// the recursion stops if dim==1 or if dim==2 and
// basis_size_1==basis_size_2 (the latter is used because the
// compiler generates nicer code)
if (basis_size_1 > 0 && basis_size_2 == basis_size_1 && dim == 2)
{
eval_val.template values<0, true, false>(values_in, values_out);
eval_val.template values<1, true, false>(values_out, values_out);
}
else if (dim == 1)
eval_val.template values<dim - 1, true, false>(values_in,
values_out);
else
eval_val.template values<dim - 1, true, false>(values_out,
values_out);
}
}
/**
* 这适用于在系数数组的列上收缩的变换,沿着系数数组的行生成数值。
* @param n_components 矢量成分的数量。 @param
* transformation_matrix
* 作为向量递进的系数矩阵,如果解释为矩阵,则使用
* @p basis_size_1 行和 @p basis_size_2 列。 @param
* add_into_result 定义结果是否应该被添加到数组 @p
* values_out
* 中(如果为真)或覆盖之前的内容。如果values_in和values_out指向同一个数组,并且
* @p add_into_result
* 为真,那么结果是未定义的,在这种情况下会抛出一个异常。
* @param values_in
* 大小为base_size_2^dim的输入数组。它可以与values_out同义。请注意,之前的
* @p values_in 的内容在函数中被覆盖了。 @param
* values_out
* 大小为base_size_1^dim的数组,用于存储转换的结果。它可以与
* @p values_in 数组别名。 @param basis_size_1_variable
* 如果模板参数basis_size_1为零,第一个基的大小可以作为运行时参数传入。如果模板参数为非零,出于效率的考虑,模板参数优先考虑。
* @param basis_size_2_variable
* 在模板参数basis_size_1为零的情况下,第二个基的大小可以作为运行时参数传入。
*
*/
#ifndef DEBUG
DEAL_II_ALWAYS_INLINE
#endif
static void
do_backward(
const unsigned int n_components,
const AlignedVector<Number2> &transformation_matrix,
const bool add_into_result,
Number * values_in,
Number * values_out,
const unsigned int basis_size_1_variable = numbers::invalid_unsigned_int,
const unsigned int basis_size_2_variable = numbers::invalid_unsigned_int)
{
Assert(
basis_size_1 != 0 || basis_size_1_variable <= basis_size_2_variable,
ExcMessage("The second dimension must not be smaller than the first"));
Assert(add_into_result == false || values_in != values_out,
ExcMessage(
"Input and output cannot alias with each other when "
"adding the result of the basis change to existing data"));
Assert(quantity == EvaluatorQuantity::value ||
quantity == EvaluatorQuantity::hessian,
ExcInternalError());
constexpr int next_dim =
(dim > 2 ||
((basis_size_1 == 0 || basis_size_2 > basis_size_1) && dim > 1)) ?
dim - 1 :
dim;
EvaluatorTensorProduct<variant,
dim,
basis_size_1,
(basis_size_1 == 0 ? 0 : basis_size_2),
Number,
Number2>
eval_val(transformation_matrix,
transformation_matrix,
transformation_matrix,
basis_size_1_variable,
basis_size_2_variable);
const unsigned int np_1 =
basis_size_1 > 0 ? basis_size_1 : basis_size_1_variable;
const unsigned int np_2 =
basis_size_1 > 0 ? basis_size_2 : basis_size_2_variable;
Assert(np_1 > 0 && np_1 != numbers::invalid_unsigned_int,
ExcMessage("Cannot transform with 0-point basis"));
Assert(np_2 > 0 && np_2 != numbers::invalid_unsigned_int,
ExcMessage("Cannot transform with 0-point basis"));
for (unsigned int c = 0; c < n_components; ++c)
{
if (basis_size_1 > 0 && basis_size_2 == basis_size_1 && dim == 2)
{
if (quantity == EvaluatorQuantity::value)
eval_val.template values<1, false, false>(values_in, values_in);
else
eval_val.template hessians<1, false, false>(values_in,
values_in);
if (add_into_result)
{
if (quantity == EvaluatorQuantity::value)
eval_val.template values<0, false, true>(values_in,
values_out);
else
eval_val.template hessians<0, false, true>(values_in,
values_out);
}
else
{
if (quantity == EvaluatorQuantity::value)
eval_val.template values<0, false, false>(values_in,
values_out);
else
eval_val.template hessians<0, false, false>(values_in,
values_out);
}
}
else
{
if (dim == 1 && add_into_result)
{
if (quantity == EvaluatorQuantity::value)
eval_val.template values<0, false, true>(values_in,
values_out);
else
eval_val.template hessians<0, false, true>(values_in,
values_out);
}
else if (dim == 1)
{
if (quantity == EvaluatorQuantity::value)
eval_val.template values<0, false, false>(values_in,
values_out);
else
eval_val.template hessians<0, false, false>(values_in,
values_out);
}
else
{
if (quantity == EvaluatorQuantity::value)
eval_val.template values<dim - 1, false, false>(values_in,
values_in);
else
eval_val.template hessians<dim - 1, false, false>(
values_in, values_in);
}
}
if (next_dim < dim)
for (unsigned int q = 0; q < np_1; ++q)
FEEvaluationImplBasisChange<variant,
quantity,
next_dim,
basis_size_1,
basis_size_2,
Number,
Number2>::
do_backward(1,
transformation_matrix,
add_into_result,
values_in +
q * Utilities::fixed_power<next_dim>(np_2),
values_out +
q * Utilities::fixed_power<next_dim>(np_1),
basis_size_1_variable,
basis_size_2_variable);
values_in += Utilities::fixed_power<dim>(np_2);
values_out += Utilities::fixed_power<dim>(np_1);
}
}
/**
* 该操作应用了类似质量矩阵的操作,由do_forward()操作、正交点的系数乘法和do_backward()操作组成。
* @param n_components 矢量成分的数量。 @param
* transformation_matrix
* 作为矢量递进的系数矩阵,如果解释为矩阵,则使用
* @p basis_size_1 行和 @p basis_size_2 列。 @param
* coefficients
* 系数数组,结果被乘以该数组。其长度必须是basis_size_2^dim或n_components*basis_size_2^dim。
* @param values_in
* 大小为basis_size_2^dim的输入阵列。它可以与values_out同义。
* @param scratch_data
* 在操作过程中用于保存临时数据的数组。
* 必须是长度为base_size_2^dim的。 @param values_out
* 用于存储转换结果的base_size_1^dim数组。它可以与value_in数组别名。
*
*/
static void
do_mass(const unsigned int n_components,
const AlignedVector<Number2> &transformation_matrix,
const AlignedVector<Number> & coefficients,
const Number * values_in,
Number * scratch_data,
Number * values_out)
{
constexpr int next_dim = dim > 1 ? dim - 1 : dim;
Number * my_scratch =
basis_size_1 != basis_size_2 ? scratch_data : values_out;
const unsigned int size_per_component = Utilities::pow(basis_size_2, dim);
Assert(coefficients.size() == size_per_component ||
coefficients.size() == n_components * size_per_component,
ExcDimensionMismatch(coefficients.size(), size_per_component));
const unsigned int stride =
coefficients.size() == size_per_component ? 0 : 1;
for (unsigned int q = basis_size_1; q != 0; --q)
FEEvaluationImplBasisChange<
variant,
EvaluatorQuantity::value,
next_dim,
basis_size_1,
basis_size_2,
Number,
Number2>::do_forward(n_components,
transformation_matrix,
values_in +
(q - 1) *
Utilities::pow(basis_size_1, dim - 1),
my_scratch +
(q - 1) *
Utilities::pow(basis_size_2, dim - 1));
EvaluatorTensorProduct<variant,
dim,
basis_size_1,
basis_size_2,
Number,
Number2>
eval_val(transformation_matrix);
const unsigned int n_inner_blocks =
(dim > 1 && basis_size_2 < 10) ? basis_size_2 : 1;
const unsigned int n_blocks = Utilities::pow(basis_size_2, dim - 1);
for (unsigned int ii = 0; ii < n_blocks; ii += n_inner_blocks)
for (unsigned int c = 0; c < n_components; ++c)
{
for (unsigned int i = ii; i < ii + n_inner_blocks; ++i)
eval_val.template values_one_line<dim - 1, true, false>(
my_scratch + i, my_scratch + i);
for (unsigned int q = 0; q < basis_size_2; ++q)
for (unsigned int i = ii; i < ii + n_inner_blocks; ++i)
my_scratch[i + q * n_blocks + c * size_per_component] *=
coefficients[i + q * n_blocks +
c * stride * size_per_component];
for (unsigned int i = ii; i < ii + n_inner_blocks; ++i)
eval_val.template values_one_line<dim - 1, false, false>(
my_scratch + i, my_scratch + i);
}
for (unsigned int q = 0; q < basis_size_1; ++q)
FEEvaluationImplBasisChange<
variant,
EvaluatorQuantity::value,
next_dim,
basis_size_1,
basis_size_2,
Number,
Number2>::do_backward(n_components,
transformation_matrix,
false,
my_scratch +
q * Utilities::pow(basis_size_2, dim - 1),
values_out +
q * Utilities::pow(basis_size_1, dim - 1));
}
};
/**
* 该结构执行张量有限元的函数值、梯度和Hessians的评估。这是对节点点与正交点重合的元素的特化,如Gauss-Lobatto元素上的FE_Q形状函数与Gauss-Lobatto正交的集成。这一类的假设是形状
* "值
* "的操作是相同的,这使得我们可以编写更短的代码。
* 在文献中,这种评价形式通常被称为谱系评价、谱系配位或简单的配位,意思是形状函数和评价空间(正交点)的位置相同。
*
*/
template <int dim, int fe_degree, typename Number>
struct FEEvaluationImplCollocation
{
static void
evaluate(const unsigned int n_components,
const EvaluationFlags::EvaluationFlags evaluation_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
const Number * values_dofs,
Number * values_quad,
Number * gradients_quad,
Number * hessians_quad,
Number * scratch_data);
static void
integrate(const unsigned int n_components,
const EvaluationFlags::EvaluationFlags integration_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
Number * values_dofs,
Number * values_quad,
Number * gradients_quad,
Number * scratch_data,
const bool add_into_values_array);
};
template <int dim, int fe_degree, typename Number>
inline void
FEEvaluationImplCollocation<dim, fe_degree, Number>::evaluate(
const unsigned int n_components,
const EvaluationFlags::EvaluationFlags evaluation_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
const Number * values_dofs,
Number * values_quad,
Number * gradients_quad,
Number * hessians_quad,
Number *)
{
AssertDimension(
shape_info.data.front().shape_gradients_collocation_eo.size(),
(fe_degree + 2) / 2 * (fe_degree + 1));
EvaluatorTensorProduct<evaluate_evenodd,
dim,
fe_degree + 1,
fe_degree + 1,
Number>
eval(AlignedVector<Number>(),
shape_info.data.front().shape_gradients_collocation_eo,
shape_info.data.front().shape_hessians_collocation_eo);
constexpr unsigned int n_q_points = Utilities::pow(fe_degree + 1, dim);
for (unsigned int c = 0; c < n_components; c++)
{
if (evaluation_flag & EvaluationFlags::values)
for (unsigned int i = 0; i < n_q_points; ++i)
values_quad[i] = values_dofs[i];
if (evaluation_flag &
(EvaluationFlags::gradients | EvaluationFlags::hessians))
{
eval.template gradients<0, true, false>(values_dofs,
gradients_quad);
if (dim > 1)
eval.template gradients<1, true, false>(values_dofs,
gradients_quad +
n_q_points);
if (dim > 2)
eval.template gradients<2, true, false>(values_dofs,
gradients_quad +
2 * n_q_points);
}
if (evaluation_flag & EvaluationFlags::hessians)
{
eval.template hessians<0, true, false>(values_dofs, hessians_quad);
if (dim > 1)
{
eval.template gradients<1, true, false>(gradients_quad,
hessians_quad +
dim * n_q_points);
eval.template hessians<1, true, false>(values_dofs,
hessians_quad +
n_q_points);
}
if (dim > 2)
{
eval.template gradients<2, true, false>(gradients_quad,
hessians_quad +
4 * n_q_points);
eval.template gradients<2, true, false>(
gradients_quad + n_q_points, hessians_quad + 5 * n_q_points);
eval.template hessians<2, true, false>(values_dofs,
hessians_quad +
2 * n_q_points);
}
hessians_quad += (dim * (dim + 1)) / 2 * n_q_points;
}
gradients_quad += dim * n_q_points;
values_quad += n_q_points;
values_dofs += n_q_points;
}
}
template <int dim, int fe_degree, typename Number>
inline void
FEEvaluationImplCollocation<dim, fe_degree, Number>::integrate(
const unsigned int n_components,
const EvaluationFlags::EvaluationFlags integration_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
Number * values_dofs,
Number * values_quad,
Number * gradients_quad,
Number *,
const bool add_into_values_array)
{
AssertDimension(
shape_info.data.front().shape_gradients_collocation_eo.size(),
(fe_degree + 2) / 2 * (fe_degree + 1));
EvaluatorTensorProduct<evaluate_evenodd,
dim,
fe_degree + 1,
fe_degree + 1,
Number>
eval(AlignedVector<Number>(),
shape_info.data.front().shape_gradients_collocation_eo,
shape_info.data.front().shape_hessians_collocation_eo);
constexpr unsigned int n_q_points = Utilities::pow(fe_degree + 1, dim);
for (unsigned int c = 0; c < n_components; c++)
{
if (integration_flag & EvaluationFlags::values)
{
if (add_into_values_array == false)
for (unsigned int i = 0; i < n_q_points; ++i)
values_dofs[i] = values_quad[i];
else
for (unsigned int i = 0; i < n_q_points; ++i)
values_dofs[i] += values_quad[i];
}
if (integration_flag & EvaluationFlags::gradients)
{
if (integration_flag & EvaluationFlags::values ||
add_into_values_array == true)
eval.template gradients<0, false, true>(gradients_quad,
values_dofs);
else
eval.template gradients<0, false, false>(gradients_quad,
values_dofs);
if (dim > 1)
eval.template gradients<1, false, true>(gradients_quad +
n_q_points,
values_dofs);
if (dim > 2)
eval.template gradients<2, false, true>(gradients_quad +
2 * n_q_points,
values_dofs);
}
gradients_quad += dim * n_q_points;
values_quad += n_q_points;
values_dofs += n_q_points;
}
}
/**
* 该结构对张量有限元的函数值、梯度和Hessians进行评估。这是对关于单位区间中点0.5的对称基函数的特化,其正交点的数量与自由度相同。在这种情况下,我们可以首先将基数转换为在正交点上有结点的基数(即配位空间),然后在这个转换后的空间中进行一、二次导数的求值,对形状值使用同一性操作。
*
*/
template <int dim, int fe_degree, int n_q_points_1d, typename Number>
struct FEEvaluationImplTransformToCollocation
{
static void
evaluate(const unsigned int n_components,
const EvaluationFlags::EvaluationFlags evaluation_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
const Number * values_dofs,
Number * values_quad,
Number * gradients_quad,
Number * hessians_quad,
Number * scratch_data);
static void
integrate(const unsigned int n_components,
const EvaluationFlags::EvaluationFlags evaluation_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
Number * values_dofs,
Number * values_quad,
Number * gradients_quad,
Number * scratch_data,
const bool add_into_values_array);
};
template <int dim, int fe_degree, int n_q_points_1d, typename Number>
inline void
FEEvaluationImplTransformToCollocation<
dim,
fe_degree,
n_q_points_1d,
Number>::evaluate(const unsigned int n_components,
const EvaluationFlags::EvaluationFlags evaluation_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
const Number * values_dofs,
Number * values_quad,
Number *gradients_quad,
Number *hessians_quad,
Number *)
{
Assert(n_q_points_1d > fe_degree,
ExcMessage("You lose information when going to a collocation space "
"of lower degree, so the evaluation results would be "
"wrong. Thus, this class does not permit the desired "
"operation."));
constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, dim);
for (unsigned int c = 0; c < n_components; c++)
{
FEEvaluationImplBasisChange<
evaluate_evenodd,
EvaluatorQuantity::value,
dim,
(fe_degree >= n_q_points_1d ? n_q_points_1d : fe_degree + 1),
n_q_points_1d,
Number,
Number>::do_forward(1,
shape_info.data.front().shape_values_eo,
values_dofs,
values_quad);
// apply derivatives in the collocation space
if (evaluation_flag &
(EvaluationFlags::gradients | EvaluationFlags::hessians))
FEEvaluationImplCollocation<dim, n_q_points_1d - 1, Number>::evaluate(
1,
evaluation_flag &
(EvaluationFlags::gradients | EvaluationFlags::hessians),
shape_info,
values_quad,
nullptr,
gradients_quad,
hessians_quad,
nullptr);
values_dofs += shape_info.dofs_per_component_on_cell;
values_quad += n_q_points;
gradients_quad += dim * n_q_points;
hessians_quad += (dim * (dim + 1)) / 2 * n_q_points;
}
}
template <int dim, int fe_degree, int n_q_points_1d, typename Number>
inline void
FEEvaluationImplTransformToCollocation<
dim,
fe_degree,
n_q_points_1d,
Number>::integrate(const unsigned int n_components,
const EvaluationFlags::EvaluationFlags integration_flag,
const MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
Number *values_dofs,
Number *values_quad,
Number *gradients_quad,
Number *,
const bool add_into_values_array)
{
Assert(n_q_points_1d > fe_degree,
ExcMessage("You lose information when going to a collocation space "
"of lower degree, so the evaluation results would be "
"wrong. Thus, this class does not permit the desired "
"operation."));
AssertDimension(
shape_info.data.front().shape_gradients_collocation_eo.size(),
(n_q_points_1d + 1) / 2 * n_q_points_1d);
constexpr unsigned int n_q_points = Utilities::pow(n_q_points_1d, dim);
for (unsigned int c = 0; c < n_components; c++)
{
// apply derivatives in collocation space
if (integration_flag & EvaluationFlags::gradients)
FEEvaluationImplCollocation<dim, n_q_points_1d - 1, Number>::
integrate(1,
integration_flag & EvaluationFlags::gradients,
shape_info,
values_quad,
nullptr,
gradients_quad,
nullptr,
/*add_into_values_array=*/ integration_flag &
EvaluationFlags::values);
// transform back to the original space
FEEvaluationImplBasisChange<
evaluate_evenodd,
EvaluatorQuantity::value,
dim,
(fe_degree >= n_q_points_1d ? n_q_points_1d : fe_degree + 1),
n_q_points_1d,
Number,
Number>::do_backward(1,
shape_info.data.front().shape_values_eo,
add_into_values_array,
values_quad,
values_dofs);
gradients_quad += dim * n_q_points;
values_quad += n_q_points;
values_dofs += shape_info.dofs_per_component_on_cell;
}
}
/**
* 该类根据模板参数和shape_info变量选择合适的评估策略,该变量包含策略底层的运行时参数
* FEEvaluation::evaluate(), ,即该类调用
* internal::FEEvaluationImpl::evaluate(),
* internal::FEEvaluationImplCollocation::evaluate() 或
* internal::FEEvaluationImplTransformToCollocation::evaluate()
* 适当的模板参数。如果模板参数fe_degree和n_q_points_1d包含有效信息(即fe_degree>-1和n_q_points_1d>0),我们只需将这些值传递给相应的模板专业化。
* 否则,我们会对运行时参数进行运行时匹配以找到正确的专业化。这种匹配目前支持
* $0\leq fe\_degree \leq 9$ 和 $degree+1\leq n\_q\_points\_1d\leq
* fe\_degree+2$ 。
*
*/
template <int dim, typename Number>
struct FEEvaluationImplEvaluateSelector
{
template <int fe_degree, int n_q_points_1d>
static bool
run(const unsigned int n_components,
const EvaluationFlags::EvaluationFlags evaluation_flag,
const internal::MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
Number *values_dofs_actual,
Number *values_quad,
Number *gradients_quad,
Number *hessians_quad,
Number *scratch_data)
{
// We enable a transformation to collocation for derivatives if it gives
// correct results (first condition), if it is the most efficient choice
// in terms of operation counts (second condition) and if we were able to
// initialize the fields in shape_info.templates.h from the polynomials
// (third condition).
static constexpr bool use_collocation =
n_q_points_1d > fe_degree && n_q_points_1d <= 3 * fe_degree / 2 + 1 &&
n_q_points_1d < 200;
if (fe_degree >= 0 && fe_degree + 1 == n_q_points_1d &&
shape_info.element_type ==
internal::MatrixFreeFunctions::tensor_symmetric_collocation)
{
internal::FEEvaluationImplCollocation<dim, fe_degree, Number>::
evaluate(n_components,
evaluation_flag,
shape_info,
values_dofs_actual,
values_quad,
gradients_quad,
hessians_quad,
scratch_data);
}
// '<=' on type means tensor_symmetric or tensor_symmetric_hermite, see
// shape_info.h for more details
else if (fe_degree >= 0 && use_collocation &&
shape_info.element_type <=
internal::MatrixFreeFunctions::tensor_symmetric)
{
internal::FEEvaluationImplTransformToCollocation<
dim,
fe_degree,
n_q_points_1d,
Number>::evaluate(n_components,
evaluation_flag,
shape_info,
values_dofs_actual,
values_quad,
gradients_quad,
hessians_quad,
scratch_data);
}
else if (fe_degree >= 0 &&
shape_info.element_type <=
internal::MatrixFreeFunctions::tensor_symmetric)
{
internal::FEEvaluationImpl<
internal::MatrixFreeFunctions::tensor_symmetric,
dim,
fe_degree,
n_q_points_1d,
Number>::evaluate(n_components,
evaluation_flag,
shape_info,
values_dofs_actual,
values_quad,
gradients_quad,
hessians_quad,
scratch_data);
}
else if (shape_info.element_type ==
internal::MatrixFreeFunctions::tensor_symmetric_plus_dg0)
{
internal::FEEvaluationImpl<
internal::MatrixFreeFunctions::tensor_symmetric_plus_dg0,
dim,
fe_degree,
n_q_points_1d,
Number>::evaluate(n_components,
evaluation_flag,
shape_info,
values_dofs_actual,
values_quad,
gradients_quad,
hessians_quad,
scratch_data);
}
else if (shape_info.element_type ==
internal::MatrixFreeFunctions::truncated_tensor)
{
internal::FEEvaluationImpl<
internal::MatrixFreeFunctions::truncated_tensor,
dim,
fe_degree,
n_q_points_1d,
Number>::evaluate(n_components,
evaluation_flag,
shape_info,
values_dofs_actual,
values_quad,
gradients_quad,
hessians_quad,
scratch_data);
}
else if (shape_info.element_type ==
internal::MatrixFreeFunctions::tensor_none)
{
internal::FEEvaluationImpl<internal::MatrixFreeFunctions::tensor_none,
dim,
fe_degree,
n_q_points_1d,
Number>::evaluate(n_components,
evaluation_flag,
shape_info,
values_dofs_actual,
values_quad,
gradients_quad,
hessians_quad,
scratch_data);
}
else
{
internal::FEEvaluationImpl<
internal::MatrixFreeFunctions::tensor_general,
dim,
fe_degree,
n_q_points_1d,
Number>::evaluate(n_components,
evaluation_flag,
shape_info,
values_dofs_actual,
values_quad,
gradients_quad,
hessians_quad,
scratch_data);
}
return false;
}
};
/**
* 该类根据模板参数和shape_info变量选择合适的评估策略,该变量包含策略基础
* FEEvaluation::integrate(), 的运行时参数,即该类调用
* internal::FEEvaluationImpl::integrate(),
* internal::FEEvaluationImplCollocation::integrate() 或
* internal::FEEvaluationImplTransformToCollocation::integrate()
* 合适的模板参数。如果模板参数fe_degree和n_q_points_1d包含有效信息(即fe_degree>-1和n_q_points_1d>0),我们只需将这些值传递给相应的模板专业化。
* 否则,我们会对运行时参数进行运行时匹配以找到正确的专业化。这种匹配目前支持
* $0\leq fe\_degree \leq 9$ 和 $degree+1\leq n\_q\_points\_1d\leq
* fe\_degree+2$ 。
*
*/
template <int dim, typename Number>
struct FEEvaluationImplIntegrateSelector
{
template <int fe_degree, int n_q_points_1d>
static bool
run(const unsigned int n_components,
const EvaluationFlags::EvaluationFlags integration_flag,
const internal::MatrixFreeFunctions::ShapeInfo<Number> &shape_info,
Number * values_dofs_actual,
Number * values_quad,
Number * gradients_quad,
Number * scratch_data,
const bool sum_into_values_array)
{
// We enable a transformation to collocation for derivatives if it gives
// correct results (first condition), if it is the most efficient choice
// in terms of operation counts (second condition) and if we were able to
// initialize the fields in shape_info.templates.h from the polynomials
// (third condition).
constexpr bool use_collocation = n_q_points_1d > fe_degree &&
n_q_points_1d <= 3 * fe_degree / 2 + 1 &&
n_q_points_1d < 200;
if (fe_degree >= 0 && fe_degree + 1 == n_q_points_1d &&
shape_info.element_type ==
internal::MatrixFreeFunctions::tensor_symmetric_collocation)
{
internal::FEEvaluationImplCollocation<dim, fe_degree, Number>::
integrate(n_components,
integration_flag,
shape_info,
values_dofs_actual,
values_quad,
gradients_quad,
scratch_data,
sum_into_values_array);
}
// '<=' on type means tensor_symmetric or tensor_symmetric_hermite, see
// shape_info.h for more details
else if (fe_degree >= 0 && use_collocation &&
shape_info.element_type <=
internal::MatrixFreeFunctions::tensor_symmetric)
{
internal::FEEvaluationImplTransformToCollocation<
dim,
fe_degree,
n_q_points_1d,
Number>::integrate(n_components,
integration_flag,
shape_info,
values_dofs_actual,
values_quad,
gradients_quad,
scratch_data,
sum_into_values_array);
}
else if (fe_degree >= 0 &&
shape_info.element_type <=
internal::MatrixFreeFunctions::tensor_symmetric)
{
internal::FEEvaluationImpl<
internal::MatrixFreeFunctions::tensor_symmetric,
dim,
fe_degree,
n_q_points_1d,
Number>::integrate(n_components,
integration_flag,
shape_info,
values_dofs_actual,
values_quad,
gradients_quad,
scratch_data,
sum_into_values_array);
}
else if (shape_info.element_type ==
internal::MatrixFreeFunctions::tensor_symmetric_plus_dg0)
{
internal::FEEvaluationImpl<
internal::MatrixFreeFunctions::tensor_symmetric_plus_dg0,
dim,
fe_degree,
n_q_points_1d,
Number>::integrate(n_components,
integration_flag,
shape_info,
values_dofs_actual,
values_quad,
gradients_quad,
scratch_data,
sum_into_values_array);
}
else if (shape_info.element_type ==
internal::MatrixFreeFunctions::truncated_tensor)
{
internal::FEEvaluationImpl<
internal::MatrixFreeFunctions::truncated_tensor,
dim,
fe_degree,
n_q_points_1d,
Number>::integrate(n_components,
integration_flag,
shape_info,
values_dofs_actual,
values_quad,
gradients_quad,
scratch_data,
sum_into_values_array);
}
else if (shape_info.element_type ==
internal::MatrixFreeFunctions::tensor_none)
{
internal::FEEvaluationImpl<internal::MatrixFreeFunctions::tensor_none,
dim,
fe_degree,
n_q_points_1d,
Number>::integrate(n_components,
integration_flag,
shape_info,
values_dofs_actual,
values_quad,
gradients_quad,
scratch_data,
sum_into_values_array);
}
else
{
internal::FEEvaluationImpl<
internal::MatrixFreeFunctions::tensor_general,
dim,
fe_degree,
n_q_points_1d,
Number>::integrate(n_components,
integration_flag,
shape_info,
values_dofs_actual,
values_quad,
gradients_quad,
scratch_data,
sum_into_values_array);
}
return false;
}
};
template <bool symmetric_evaluate,
int dim,
int fe_degree,
int n_q_points_1d,
typename Number>
struct FEFaceEvaluationImpl
{
// We enable a transformation to collocation for derivatives if it gives
// correct results (first two conditions), if it is the most efficient
// choice in terms of operation counts (third condition) and if we were
// able to initialize the fields in shape_info.templates.h from the
// polynomials (fourth condition).
static constexpr bool use_collocation =
symmetric_evaluate &&
n_q_points_1d > fe_degree &&n_q_points_1d <= 3 * fe_degree / 2 + 1 &&
n_q_points_1d < 200;
static void
evaluate_in_face(const unsigned int n_components,
const MatrixFreeFunctions::ShapeInfo<Number> &data,
Number * values_dofs,
Number * values_quad,
Number * gradients_quad,
Number * scratch_data,
const bool evaluate_val,
const bool evaluate_grad,
const unsigned int subface_index)
{
const AlignedVector<Number> &val1 =
symmetric_evaluate ?
data.data.front().shape_values_eo :
(subface_index >= GeometryInfo<dim>::max_children_per_cell ?
data.data.front().shape_values :
data.data.front().values_within_subface[subface_index % 2]);
const AlignedVector<Number> &val2 =
symmetric_evaluate ?
data.data.front().shape_values_eo :
(subface_index >= GeometryInfo<dim>::max_children_per_cell ?
data.data.front().shape_values :
data.data.front().values_within_subface[subface_index / 2]);
const AlignedVector<Number> &grad1 =
symmetric_evaluate ?
data.data.front().shape_gradients_eo :
(subface_index >= GeometryInfo<dim>::max_children_per_cell ?
data.data.front().shape_gradients :
data.data.front().gradients_within_subface[subface_index % 2]);
const AlignedVector<Number> &grad2 =
symmetric_evaluate ?
data.data.front().shape_gradients_eo :
(subface_index >= GeometryInfo<dim>::max_children_per_cell ?
data.data.front().shape_gradients :
data.data.front().gradients_within_subface[subface_index / 2]);
using Eval =
internal::EvaluatorTensorProduct<symmetric_evaluate ?
internal::evaluate_evenodd :
internal::evaluate_general,
dim - 1,
fe_degree + 1,
n_q_points_1d,
Number>;
Eval eval1(val1,
grad1,
AlignedVector<Number>(),
data.data.front().fe_degree + 1,
data.data.front().n_q_points_1d);
Eval eval2(val2,
grad2,
AlignedVector<Number>(),
data.data.front().fe_degree + 1,
data.data.front().n_q_points_1d);
const unsigned int size_deg =
fe_degree > -1 ?
Utilities::pow(fe_degree + 1, dim - 1) :
(dim > 1 ?
Utilities::fixed_power<dim - 1>(data.data.front().fe_degree + 1) :
1);
const unsigned int n_q_points = fe_degree > -1 ?
Utilities::pow(n_q_points_1d, dim - 1) :
data.n_q_points_face;
if (evaluate_grad == false)
for (unsigned int c = 0; c < n_components; ++c)
{
switch (dim)
{
case 3:
eval1.template values<0, true, false>(values_dofs,
values_quad);
eval2.template values<1, true, false>(values_quad,
values_quad);
break;
case 2:
eval1.template values<0, true, false>(values_dofs,
values_quad);
break;
case 1:
values_quad[0] = values_dofs[0];
break;
default:
Assert(false, ExcNotImplemented());
}
values_dofs += 2 * size_deg;
values_quad += n_q_points;
}
else
for (unsigned int c = 0; c < n_components; ++c)
{
switch (dim)
{
case 3:
if (use_collocation)
{
eval1.template values<0, true, false>(values_dofs,
values_quad);
eval1.template values<1, true, false>(values_quad,
values_quad);
internal::EvaluatorTensorProduct<
internal::evaluate_evenodd,
dim - 1,
n_q_points_1d,
n_q_points_1d,
Number>
eval_grad(
AlignedVector<Number>(),
data.data.front().shape_gradients_collocation_eo,
AlignedVector<Number>());
eval_grad.template gradients<0, true, false>(
values_quad, gradients_quad);
eval_grad.template gradients<1, true, false>(
values_quad, gradients_quad + n_q_points);
}
else
{
eval1.template gradients<0, true, false>(values_dofs,
scratch_data);
eval2.template values<1, true, false>(scratch_data,
gradients_quad);
eval1.template values<0, true, false>(values_dofs,
scratch_data);
eval2.template gradients<1, true, false>(scratch_data,
gradients_quad +
n_q_points);
if (evaluate_val == true)
eval2.template values<1, true, false>(scratch_data,
values_quad);
}
eval1.template values<0, true, false>(values_dofs + size_deg,
scratch_data);
eval2.template values<1, true, false>(
scratch_data, gradients_quad + (dim - 1) * n_q_points);
break;
case 2:
eval1.template values<0, true, false>(values_dofs + size_deg,
gradients_quad +
(dim - 1) *
n_q_points);
eval1.template gradients<0, true, false>(values_dofs,
gradients_quad);
if (evaluate_val == true)
eval1.template values<0, true, false>(values_dofs,
values_quad);
break;
case 1:
values_quad[0] = values_dofs[0];
gradients_quad[0] = values_dofs[1];
break;
default:
AssertThrow(false, ExcNotImplemented());
}
values_dofs += 2 * size_deg;
values_quad += n_q_points;
gradients_quad += dim * n_q_points;
}
}
static void
integrate_in_face(const unsigned int n_components,
const MatrixFreeFunctions::ShapeInfo<Number> &data,
Number * values_dofs,
Number * values_quad,
Number * gradients_quad,
Number * scratch_data,
const bool integrate_val,
const bool integrate_grad,
const unsigned int subface_index)
{
const AlignedVector<Number> &val1 =
symmetric_evaluate ?
data.data.front().shape_values_eo :
(subface_index >= GeometryInfo<dim>::max_children_per_cell ?
data.data.front().shape_values :
data.data.front().values_within_subface[subface_index % 2]);
const AlignedVector<Number> &val2 =
symmetric_evaluate ?
data.data.front().shape_values_eo :
(subface_index >= GeometryInfo<dim>::max_children_per_cell ?
data.data.front().shape_values :
data.data.front().values_within_subface[subface_index / 2]);
const AlignedVector<Number> &grad1 =
symmetric_evaluate ?
data.data.front().shape_gradients_eo :
(subface_index >= GeometryInfo<dim>::max_children_per_cell ?
data.data.front().shape_gradients :
data.data.front().gradients_within_subface[subface_index % 2]);
const AlignedVector<Number> &grad2 =
symmetric_evaluate ?
data.data.front().shape_gradients_eo :
(subface_index >= GeometryInfo<dim>::max_children_per_cell ?
data.data.front().shape_gradients :
data.data.front().gradients_within_subface[subface_index / 2]);
using Eval =
internal::EvaluatorTensorProduct<symmetric_evaluate ?
internal::evaluate_evenodd :
internal::evaluate_general,
dim - 1,
fe_degree + 1,
n_q_points_1d,
Number>;
Eval eval1(val1,
grad1,
val1,
data.data.front().fe_degree + 1,
data.data.front().n_q_points_1d);
Eval eval2(val2,
grad2,
val1,
data.data.front().fe_degree + 1,
data.data.front().n_q_points_1d);
const unsigned int size_deg =
fe_degree > -1 ?
Utilities::pow(fe_degree + 1, dim - 1) :
(dim > 1 ?
Utilities::fixed_power<dim - 1>(data.data.front().fe_degree + 1) :
1);
const unsigned int n_q_points = fe_degree > -1 ?
Utilities::pow(n_q_points_1d, dim - 1) :
data.n_q_points_face;
if (integrate_grad == false)
for (unsigned int c = 0; c < n_components; ++c)
{
switch (dim)
{
case 3:
eval2.template values<1, false, false>(values_quad,
values_quad);
eval1.template values<0, false, false>(values_quad,
values_dofs);
break;
case 2:
eval1.template values<0, false, false>(values_quad,
values_dofs);
break;
case 1:
values_dofs[0] = values_quad[0];
break;
default:
Assert(false, ExcNotImplemented());
}
values_dofs += 2 * size_deg;
values_quad += n_q_points;
}
else
for (unsigned int c = 0; c < n_components; ++c)
{
switch (dim)
{
case 3:
eval2.template values<1, false, false>(gradients_quad +
2 * n_q_points,
gradients_quad +
2 * n_q_points);
eval1.template values<0, false, false>(
gradients_quad + 2 * n_q_points, values_dofs + size_deg);
if (use_collocation)
{
internal::EvaluatorTensorProduct<
internal::evaluate_evenodd,
dim - 1,
n_q_points_1d,
n_q_points_1d,
Number>
eval_grad(
AlignedVector<Number>(),
data.data.front().shape_gradients_collocation_eo,
AlignedVector<Number>());
if (integrate_val)
eval_grad.template gradients<1, false, true>(
gradients_quad + n_q_points, values_quad);
else
eval_grad.template gradients<1, false, false>(
gradients_quad + n_q_points, values_quad);
eval_grad.template gradients<0, false, true>(
gradients_quad, values_quad);
eval1.template values<1, false, false>(values_quad,
values_quad);
eval1.template values<0, false, false>(values_quad,
values_dofs);
}
else
{
if (integrate_val)
{
eval2.template values<1, false, false>(values_quad,
scratch_data);
eval2.template gradients<1, false, true>(
gradients_quad + n_q_points, scratch_data);
}
else
eval2.template gradients<1, false, false>(
gradients_quad + n_q_points, scratch_data);
eval1.template values<0, false, false>(scratch_data,
values_dofs);
eval2.template values<1, false, false>(gradients_quad,
scratch_data);
eval1.template gradients<0, false, true>(scratch_data,
values_dofs);
}
break;
case 2:
eval1.template values<0, false, false>(
gradients_quad + n_q_points, values_dofs + size_deg);
eval1.template gradients<0, false, false>(gradients_quad,
values_dofs);
if (integrate_val == true)
eval1.template values<0, false, true>(values_quad,
values_dofs);
break;
case 1:
values_dofs[0] = values_quad[0];
values_dofs[1] = gradients_quad[0];
break;
default:
AssertThrow(false, ExcNotImplemented());
}
values_dofs += 2 * size_deg;
values_quad += n_q_points;
gradients_quad += dim * n_q_points;
}
}
};
template <int dim, int fe_degree, typename Number, bool lex_faces = false>
struct FEFaceNormalEvaluationImpl
{
template <bool do_evaluate, bool add_into_output>
static void
interpolate(const unsigned int n_components,
const MatrixFreeFunctions::ShapeInfo<Number> &data,
const Number * input,
Number * output,
const bool do_gradients,
const unsigned int face_no)
{
Assert(static_cast<unsigned int>(fe_degree) ==
data.data.front().fe_degree ||
fe_degree == -1,
ExcInternalError());
interpolate_generic<do_evaluate, add_into_output>(
n_components,
input,
output,
do_gradients,
face_no,
data.data.front().fe_degree + 1,
data.data.front().shape_data_on_face,
data.dofs_per_component_on_cell,
2 * data.dofs_per_component_on_face);
}
/**
* 将单元格正交点上的数值内插到一个面上。
*
*/
template <bool do_evaluate, bool add_into_output>
static void
interpolate_quadrature(const unsigned int n_components,
const MatrixFreeFunctions::ShapeInfo<Number> &data,
const Number * input,
Number * output,
const bool do_gradients,
const unsigned int face_no)
{
Assert(static_cast<unsigned int>(fe_degree + 1) ==
data.data.front().quadrature.size() ||
fe_degree == -1,
ExcInternalError());
interpolate_generic<do_evaluate, add_into_output>(
n_components,
input,
output,
do_gradients,
face_no,
data.data.front().quadrature.size(),
data.data.front().quadrature_data_on_face,
data.n_q_points,
data.n_q_points_face);
}
private:
template <bool do_evaluate, bool add_into_output, int face_direction = 0>
static void
interpolate_generic(const unsigned int n_components,
const Number * input,
Number * output,
const bool do_gradients,
const unsigned int face_no,
const unsigned int n_points_1d,
const std::array<AlignedVector<Number>, 2> &shape_data,
const unsigned int dofs_per_component_on_cell,
const unsigned int dofs_per_component_on_face)
{
if (face_direction == face_no / 2)
{
internal::EvaluatorTensorProduct<internal::evaluate_general,
dim,
fe_degree + 1,
0,
Number>
evalf(shape_data[face_no % 2],
AlignedVector<Number>(),
AlignedVector<Number>(),
n_points_1d,
0);
const unsigned int in_stride = do_evaluate ?
dofs_per_component_on_cell :
dofs_per_component_on_face;
const unsigned int out_stride = do_evaluate ?
dofs_per_component_on_face :
dofs_per_component_on_cell;
for (unsigned int c = 0; c < n_components; c++)
{
if (do_gradients)
evalf.template apply_face<face_direction,
do_evaluate,
add_into_output,
1,
lex_faces>(input, output);
else
evalf.template apply_face<face_direction,
do_evaluate,
add_into_output,
0,
lex_faces>(input, output);
input += in_stride;
output += out_stride;
}
}
else if (face_direction < dim)
{
interpolate_generic<do_evaluate,
add_into_output,
std::min(face_direction + 1, dim - 1)>(
n_components,
input,
output,
do_gradients,
face_no,
n_points_1d,
shape_data,
dofs_per_component_on_cell,
dofs_per_component_on_face);
}
}
};
// internal helper function for reading data; base version of different types
template <typename VectorizedArrayType, typename Number2>
void
do_vectorized_read(const Number2 *src_ptr, VectorizedArrayType &dst)
{
for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v)
dst[v] = src_ptr[v];
}
// internal helper function for reading data; specialized version where we
// can use a dedicated load function
template <typename Number, unsigned int width>
void
do_vectorized_read(const Number *src_ptr, VectorizedArray<Number, width> &dst)
{
dst.load(src_ptr);
}
// internal helper function for reading data; base version of different types
template <typename VectorizedArrayType, typename Number2>
void
do_vectorized_gather(const Number2 * src_ptr,
const unsigned int * indices,
VectorizedArrayType &dst)
{
for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v)
dst[v] = src_ptr[indices[v]];
}
// internal helper function for reading data; specialized version where we
// can use a dedicated gather function
template <typename Number, unsigned int width>
void
do_vectorized_gather(const Number * src_ptr,
const unsigned int * indices,
VectorizedArray<Number, width> &dst)
{
dst.gather(src_ptr, indices);
}
// internal helper function for reading data; base version of different types
template <typename VectorizedArrayType, typename Number2>
void
do_vectorized_add(const VectorizedArrayType src, Number2 *dst_ptr)
{
for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v)
dst_ptr[v] += src[v];
}
// internal helper function for reading data; specialized version where we
// can use a dedicated load function
template <typename Number, unsigned int width>
void
do_vectorized_add(const VectorizedArray<Number, width> src, Number *dst_ptr)
{
VectorizedArray<Number, width> tmp;
tmp.load(dst_ptr);
(tmp + src).store(dst_ptr);
}
// internal helper function for reading data; base version of different types
template <typename VectorizedArrayType, typename Number2>
void
do_vectorized_scatter_add(const VectorizedArrayType src,
const unsigned int * indices,
Number2 * dst_ptr)
{
for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v)
dst_ptr[indices[v]] += src[v];
}
// internal helper function for reading data; specialized version where we
// can use a dedicated gather function
template <typename Number, unsigned int width>
void
do_vectorized_scatter_add(const VectorizedArray<Number, width> src,
const unsigned int * indices,
Number * dst_ptr)
{
#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS < 512
for (unsigned int v = 0; v < width; ++v)
dst_ptr[indices[v]] += src[v];
#else
VectorizedArray<Number, width> tmp;
tmp.gather(dst_ptr, indices);
(tmp + src).scatter(indices, dst_ptr);
#endif
}
template <typename Number>
void
adjust_for_face_orientation(const unsigned int dim,
const unsigned int n_components,
const unsigned int face_orientation,
const Table<2, unsigned int> &orientation_map,
const bool integrate,
const bool values,
const bool gradients,
const unsigned int n_q_points,
Number * tmp_values,
Number * values_quad,
Number * gradients_quad)
{
Assert(face_orientation, ExcInternalError());
const unsigned int *orientation = &orientation_map[face_orientation][0];
for (unsigned int c = 0; c < n_components; ++c)
{
if (values == true)
{
if (integrate)
for (unsigned int q = 0; q < n_q_points; ++q)
tmp_values[q] = values_quad[c * n_q_points + orientation[q]];
else
for (unsigned int q = 0; q < n_q_points; ++q)
tmp_values[orientation[q]] = values_quad[c * n_q_points + q];
for (unsigned int q = 0; q < n_q_points; ++q)
values_quad[c * n_q_points + q] = tmp_values[q];
}
if (gradients == true)
for (unsigned int d = 0; d < dim; ++d)
{
if (integrate)
for (unsigned int q = 0; q < n_q_points; ++q)
tmp_values[q] =
gradients_quad[(c * dim + d) * n_q_points + orientation[q]];
else
for (unsigned int q = 0; q < n_q_points; ++q)
tmp_values[orientation[q]] =
gradients_quad[(c * dim + d) * n_q_points + q];
for (unsigned int q = 0; q < n_q_points; ++q)
gradients_quad[(c * dim + d) * n_q_points + q] = tmp_values[q];
}
}
}
template <int dim, typename VectorizedArrayType>
struct FEFaceEvaluationImplEvaluateSelector
{
template <int fe_degree, int n_q_points_1d>
static bool
run(const unsigned int n_components,
const MatrixFreeFunctions::ShapeInfo<VectorizedArrayType> &data,
const VectorizedArrayType * values_array,
VectorizedArrayType * values_quad,
VectorizedArrayType * gradients_quad,
VectorizedArrayType * scratch_data,
const bool evaluate_values,
const bool evaluate_gradients,
const unsigned int face_no,
const unsigned int subface_index,
const unsigned int face_orientation,
const Table<2, unsigned int> &orientation_map)
{
if (data.element_type == MatrixFreeFunctions::tensor_none)
{
const unsigned int n_dofs = data.dofs_per_component_on_cell;
const unsigned int n_q_points = data.n_q_points_faces[face_no];
const auto shape_info = data.data.front();
using Eval = EvaluatorTensorProduct<evaluate_general,
1,
0,
0,
VectorizedArrayType,
VectorizedArrayType>;
if (evaluate_values)
{
const auto shape_values =
&shape_info.shape_values_face(face_no, face_orientation, 0);
auto values_quad_ptr = values_quad;
auto values_dofs_actual_ptr = values_array;
Eval eval(shape_values, nullptr, nullptr, n_dofs, n_q_points);
for (unsigned int c = 0; c < n_components; ++c)
{
eval.template values<0, true, false>(values_dofs_actual_ptr,
values_quad_ptr);
values_quad_ptr += n_q_points;
values_dofs_actual_ptr += n_dofs;
}
}
if (evaluate_gradients)
{
auto gradients_quad_ptr = gradients_quad;
auto values_dofs_actual_ptr = values_array;
std::array<const VectorizedArrayType *, dim> shape_gradients;
for (unsigned int d = 0; d < dim; ++d)
shape_gradients[d] = &shape_info.shape_gradients_face(
face_no, face_orientation, d, 0);
for (unsigned int c = 0; c < n_components; ++c)
{
for (unsigned int d = 0; d < dim; ++d)
{
Eval eval(nullptr,
shape_gradients[d],
nullptr,
n_dofs,
n_q_points);
eval.template gradients<0, true, false>(
values_dofs_actual_ptr, gradients_quad_ptr);
gradients_quad_ptr += n_q_points;
}
values_dofs_actual_ptr += n_dofs;
}
}
return true;
}
constexpr unsigned int static_dofs_per_face =
fe_degree > -1 ? Utilities::pow(fe_degree + 1, dim - 1) :
numbers::invalid_unsigned_int;
const unsigned int dofs_per_face =
fe_degree > -1 ?
static_dofs_per_face :
Utilities::pow(data.data.front().fe_degree + 1, dim - 1);
VectorizedArrayType *temp1 = scratch_data;
FEFaceNormalEvaluationImpl<dim, fe_degree, VectorizedArrayType>::
template interpolate<true, false>(
n_components, data, values_array, temp1, evaluate_gradients, face_no);
const unsigned int n_q_points_1d_actual =
fe_degree > -1 ? n_q_points_1d : 0;
if (fe_degree > -1 &&
subface_index >= GeometryInfo<dim>::max_children_per_cell &&
data.element_type <= MatrixFreeFunctions::tensor_symmetric)
FEFaceEvaluationImpl<
true,
dim,
fe_degree,
n_q_points_1d_actual,
VectorizedArrayType>::evaluate_in_face(n_components,
data,
temp1,
values_quad,
gradients_quad,
scratch_data + 2 *
n_components *
dofs_per_face,
evaluate_values,
evaluate_gradients,
subface_index);
else
FEFaceEvaluationImpl<
false,
dim,
fe_degree,
n_q_points_1d_actual,
VectorizedArrayType>::evaluate_in_face(n_components,
data,
temp1,
values_quad,
gradients_quad,
scratch_data + 2 *
n_components *
dofs_per_face,
evaluate_values,
evaluate_gradients,
subface_index);
if (face_orientation)
adjust_for_face_orientation(dim,
n_components,
face_orientation,
orientation_map,
false,
evaluate_values,
evaluate_gradients,
data.n_q_points_face,
scratch_data,
values_quad,
gradients_quad);
return false;
}
};
template <int dim, typename VectorizedArrayType>
struct FEFaceEvaluationImplIntegrateSelector
{
template <int fe_degree, int n_q_points_1d>
static bool
run(const unsigned int n_components,
const MatrixFreeFunctions::ShapeInfo<VectorizedArrayType> &data,
VectorizedArrayType * values_array,
VectorizedArrayType * values_quad,
VectorizedArrayType * gradients_quad,
VectorizedArrayType * scratch_data,
const bool integrate_values,
const bool integrate_gradients,
const unsigned int face_no,
const unsigned int subface_index,
const unsigned int face_orientation,
const Table<2, unsigned int> &orientation_map)
{
if (data.element_type == MatrixFreeFunctions::tensor_none)
{
const unsigned int n_dofs = data.dofs_per_component_on_cell;
const unsigned int n_q_points = data.n_q_points_faces[face_no];
const auto shape_info = data.data.front();
using Eval = EvaluatorTensorProduct<evaluate_general,
1,
0,
0,
VectorizedArrayType,
VectorizedArrayType>;
if (integrate_values)
{
const auto shape_values =
&shape_info.shape_values_face(face_no, face_orientation, 0);
auto values_quad_ptr = values_quad;
auto values_dofs_actual_ptr = values_array;
Eval eval(shape_values, nullptr, nullptr, n_dofs, n_q_points);
for (unsigned int c = 0; c < n_components; ++c)
{
eval.template values<0, false, false>(values_quad_ptr,
values_dofs_actual_ptr);
values_quad_ptr += n_q_points;
values_dofs_actual_ptr += n_dofs;
}
}
if (integrate_gradients)
{
auto gradients_quad_ptr = gradients_quad;
auto values_dofs_actual_ptr = values_array;
std::array<const VectorizedArrayType *, dim> shape_gradients;
for (unsigned int d = 0; d < dim; ++d)
shape_gradients[d] = &shape_info.shape_gradients_face(
face_no, face_orientation, d, 0);
for (unsigned int c = 0; c < n_components; ++c)
{
for (unsigned int d = 0; d < dim; ++d)
{
Eval eval(nullptr,
shape_gradients[d],
nullptr,
n_dofs,
n_q_points);
if ((integrate_values == false) && d == 0)
eval.template gradients<0, false, false>(
gradients_quad_ptr, values_dofs_actual_ptr);
else
eval.template gradients<0, false, true>(
gradients_quad_ptr, values_dofs_actual_ptr);
gradients_quad_ptr += n_q_points;
}
values_dofs_actual_ptr += n_dofs;
}
}
return true;
}
if (face_orientation)
adjust_for_face_orientation(dim,
n_components,
face_orientation,
orientation_map,
true,
integrate_values,
integrate_gradients,
data.n_q_points_face,
scratch_data,
values_quad,
gradients_quad);
constexpr unsigned int static_dofs_per_face =
fe_degree > -1 ? Utilities::pow(fe_degree + 1, dim - 1) :
numbers::invalid_unsigned_int;
const unsigned int dofs_per_face =
fe_degree > -1 ?
static_dofs_per_face :
Utilities::pow(data.data.front().fe_degree + 1, dim - 1);
VectorizedArrayType *temp1 = scratch_data;
const unsigned int n_q_points_1d_actual =
fe_degree > -1 ? n_q_points_1d : 0;
if (fe_degree > -1 &&
subface_index >= GeometryInfo<dim - 1>::max_children_per_cell &&
data.element_type <= MatrixFreeFunctions::tensor_symmetric)
FEFaceEvaluationImpl<
true,
dim,
fe_degree,
n_q_points_1d_actual,
VectorizedArrayType>::integrate_in_face(n_components,
data,
temp1,
values_quad,
gradients_quad,
scratch_data +
2 * n_components *
dofs_per_face,
integrate_values,
integrate_gradients,
subface_index);
else
FEFaceEvaluationImpl<
false,
dim,
fe_degree,
n_q_points_1d_actual,
VectorizedArrayType>::integrate_in_face(n_components,
data,
temp1,
values_quad,
gradients_quad,
scratch_data +
2 * n_components *
dofs_per_face,
integrate_values,
integrate_gradients,
subface_index);
FEFaceNormalEvaluationImpl<dim, fe_degree, VectorizedArrayType>::
template interpolate<false, false>(n_components,
data,
temp1,
values_array,
integrate_gradients,
face_no);
return false;
}
};
template <int n_face_orientations, typename Processor>
static bool
fe_face_evaluation_process_and_io(Processor &proc)
{
auto n_components = proc.n_components;
auto integrate = proc.integrate;
auto global_vector_ptr = proc.global_vector_ptr;
auto &sm_ptr = proc.sm_ptr;
auto &data = proc.data;
auto &dof_info = proc.dof_info;
auto values_quad = proc.values_quad;
auto gradients_quad = proc.gradients_quad;
auto scratch_data = proc.scratch_data;
auto do_values = proc.do_values;
auto do_gradients = proc.do_gradients;
auto active_fe_index = proc.active_fe_index;
auto first_selected_component = proc.first_selected_component;
auto cells = proc.cells;
auto face_nos = proc.face_nos;
auto subface_index = proc.subface_index;
auto dof_access_index = proc.dof_access_index;
auto face_orientations = proc.face_orientations;
auto &orientation_map = proc.orientation_map;
static const int dim = Processor::dim_;
static const int fe_degree = Processor::fe_degree_;
using VectorizedArrayType = typename Processor::VectorizedArrayType_;
using Number = typename Processor::Number_;
using Number2_ = typename Processor::Number2_;
const unsigned int cell = cells[0];
// In the case of integration, we do not need to reshuffle the
// data at the quadrature points to adjust for the face
// orientation if the shape functions are nodal at the cell
// boundaries (and we only requested the integration of the
// values) or Hermite shape functions are used. These cases are
// handled later when the values are written back into the
// glrobal vector.
if (integrate &&
(face_orientations[0] > 0 &&
(subface_index < GeometryInfo<dim>::max_children_per_cell ||
!(((do_gradients == false &&
data.data.front().nodal_at_cell_boundaries == true &&
fe_degree > 0) ||
(data.element_type ==
MatrixFreeFunctions::tensor_symmetric_hermite &&
fe_degree > 1)) &&
(dof_info.index_storage_variants[dof_access_index][cell] ==
MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
interleaved_contiguous ||
dof_info.index_storage_variants[dof_access_index][cell] ==
MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
interleaved_contiguous_strided ||
dof_info.index_storage_variants[dof_access_index][cell] ==
MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
interleaved_contiguous_mixed_strides ||
dof_info.index_storage_variants[dof_access_index][cell] ==
MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
contiguous)))))
{
AssertDimension(n_face_orientations, 1);
adjust_for_face_orientation(dim,
n_components,
face_orientations[0],
orientation_map,
true,
do_values,
do_gradients,
data.n_q_points_face,
scratch_data,
values_quad,
gradients_quad);
}
// we know that the gradient weights for the Hermite case on the
// right (side==1) are the negative from the value at the left
// (side==0), so we only read out one of them.
VectorizedArrayType grad_weight =
(data.data.front().nodal_at_cell_boundaries == true && fe_degree > 1 &&
data.element_type == MatrixFreeFunctions::tensor_symmetric_hermite) ?
data.data.front()
.shape_data_on_face[0][fe_degree + (integrate ?
(2 - (face_nos[0] % 2)) :
(1 + (face_nos[0] % 2)))] :
VectorizedArrayType(0.0 /*dummy*/ );
constexpr unsigned int static_dofs_per_component =
fe_degree > -1 ? Utilities::pow(fe_degree + 1, dim) :
numbers::invalid_unsigned_int;
constexpr unsigned int static_dofs_per_face =
fe_degree > -1 ? Utilities::pow(fe_degree + 1, dim - 1) :
numbers::invalid_unsigned_int;
const unsigned int dofs_per_face =
fe_degree > -1 ? static_dofs_per_face :
Utilities::pow(data.data.front().fe_degree + 1, dim - 1);
VectorizedArrayType *temp1 = scratch_data;
const unsigned int dummy = 0;
// re-orientation
std::array<const unsigned int *, n_face_orientations> orientation = {};
if (n_face_orientations == 1)
orientation[0] = (data.data.front().nodal_at_cell_boundaries == true) ?
&data.face_orientations[face_orientations[0]][0] :
&dummy;
else
{
for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v)
{
// the loop breaks once an invalid_unsigned_int is hit for
// all cases except the exterior faces in the ECL loop (where
// some faces might be at the boundaries but others not)
if (cells[v] == numbers::invalid_unsigned_int)
continue;
orientation[v] =
(data.data.front().nodal_at_cell_boundaries == true) ?
&data.face_orientations[face_orientations[v]][0] :
&dummy;
}
}
// face_to_cell_index_hermite
std::array<const unsigned int *, n_face_orientations> index_array_hermite =
{};
if (n_face_orientations == 1)
index_array_hermite[0] =
(data.data.front().nodal_at_cell_boundaries == true && fe_degree > 1 &&
data.element_type == MatrixFreeFunctions::tensor_symmetric_hermite) ?
&data.face_to_cell_index_hermite(face_nos[0], 0) :
&dummy;
if (n_face_orientations > 1 &&
data.data.front().nodal_at_cell_boundaries == true && fe_degree > 1 &&
data.element_type == MatrixFreeFunctions::tensor_symmetric_hermite)
{
for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v)
{
if (cells[v] == numbers::invalid_unsigned_int)
continue;
grad_weight[v] =
data.data.front().shape_data_on_face
[0][fe_degree + (integrate ? (2 - (face_nos[v] % 2)) :
(1 + (face_nos[v] % 2)))][v];
index_array_hermite[v] =
&data.face_to_cell_index_hermite(face_nos[v], 0);
}
}
// face_to_cell_index_nodal
std::array<const unsigned int *, n_face_orientations> index_array_nodal =
{};
if (n_face_orientations == 1)
index_array_nodal[0] =
(data.data.front().nodal_at_cell_boundaries == true) ?
&data.face_to_cell_index_nodal(face_nos[0], 0) :
&dummy;
if (n_face_orientations > 1 &&
(data.data.front().nodal_at_cell_boundaries == true))
{
for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v)
{
if (cells[v] == numbers::invalid_unsigned_int)
continue;
index_array_nodal[v] =
&data.face_to_cell_index_nodal(face_nos[v], 0);
}
}
const auto reorientate = [&](const unsigned int v, const unsigned int i) {
return (dim < 3 ||
face_orientations[n_face_orientations == 1 ? 0 : v] == 0 ||
subface_index < GeometryInfo<dim>::max_children_per_cell) ?
i :
orientation[v][i];
};
// this variable keeps track of whether we are able to directly write
// the results into the result (function returns true) or not, requiring
// an additional call to another function
bool accesses_global_vector = true;
for (unsigned int comp = 0; comp < n_components; ++comp)
{
if (integrate)
proc.in_face_operation(temp1, comp);
// we can only use the fast functions if we know the polynomial degree
// as a template parameter (fe_degree != -1), and it only makes sense
// to use the functions for at least linear functions for values on
// the faces and quadratic functions for gradients on the faces, so
// include the switch here
if ((do_gradients == false &&
data.data.front().nodal_at_cell_boundaries == true &&
fe_degree > 0) ||
(data.element_type ==
MatrixFreeFunctions::tensor_symmetric_hermite &&
fe_degree > 1))
{
// case 1: contiguous and interleaved indices
if (n_face_orientations == 1 &&
dof_info.index_storage_variants[dof_access_index][cell] ==
MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
interleaved_contiguous)
{
AssertDimension(n_face_orientations, 1);
AssertDimension(
dof_info.n_vectorization_lanes_filled[dof_access_index][cell],
VectorizedArrayType::size());
Number2_ *vector_ptr =
global_vector_ptr +
dof_info.dof_indices_contiguous[dof_access_index]
[cell *
VectorizedArrayType::size()] +
(dof_info
.component_dof_indices_offset[active_fe_index]
[first_selected_component] +
comp * static_dofs_per_component) *
VectorizedArrayType::size();
if (fe_degree > 1 && do_gradients == true)
{
for (unsigned int i = 0; i < dofs_per_face; ++i)
{
if (n_face_orientations == 1)
{
const unsigned int ind1 =
index_array_hermite[0][2 * i];
const unsigned int ind2 =
index_array_hermite[0][2 * i + 1];
AssertIndexRange(ind1,
data.dofs_per_component_on_cell);
AssertIndexRange(ind2,
data.dofs_per_component_on_cell);
const unsigned int i_ = reorientate(0, i);
proc.hermite_grad_vectorized(
temp1[i_],
temp1[i_ + dofs_per_face],
vector_ptr + ind1 * VectorizedArrayType::size(),
vector_ptr + ind2 * VectorizedArrayType::size(),
grad_weight);
}
else
{
Assert(false, ExcNotImplemented());
}
}
}
else
{
for (unsigned int i = 0; i < dofs_per_face; ++i)
{
if (n_face_orientations == 1)
{
const unsigned int i_ = reorientate(0, i);
const unsigned int ind = index_array_nodal[0][i];
proc.value_vectorized(
temp1[i_],
vector_ptr + ind * VectorizedArrayType::size());
}
else
{
Assert(false, ExcNotImplemented());
}
}
}
}
// case 2: contiguous and interleaved indices with fixed stride
else if (n_face_orientations == 1 &&
dof_info.index_storage_variants[dof_access_index][cell] ==
MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
interleaved_contiguous_strided)
{
AssertDimension(n_face_orientations, 1);
AssertDimension(
dof_info.n_vectorization_lanes_filled[dof_access_index][cell],
VectorizedArrayType::size());
const unsigned int *indices =
&dof_info.dof_indices_contiguous[dof_access_index]
[cell *
VectorizedArrayType::size()];
Number2_ *vector_ptr =
global_vector_ptr +
(comp * static_dofs_per_component +
dof_info
.component_dof_indices_offset[active_fe_index]
[first_selected_component]) *
VectorizedArrayType::size();
if (fe_degree > 1 && do_gradients == true)
{
for (unsigned int i = 0; i < dofs_per_face; ++i)
{
if (n_face_orientations == 1)
{
const unsigned int i_ = reorientate(0, i);
const unsigned int ind1 =
index_array_hermite[0][2 * i] *
VectorizedArrayType::size();
const unsigned int ind2 =
index_array_hermite[0][2 * i + 1] *
VectorizedArrayType::size();
proc.hermite_grad_vectorized_indexed(
temp1[i_],
temp1[i_ + dofs_per_face],
vector_ptr + ind1,
vector_ptr + ind2,
grad_weight,
indices,
indices);
}
else
{
Assert(false, ExcNotImplemented());
}
}
}
else
{
for (unsigned int i = 0; i < dofs_per_face; ++i)
{
if (n_face_orientations == 1)
{
const unsigned int i_ = reorientate(0, i);
const unsigned int ind =
index_array_nodal[0][i] *
VectorizedArrayType::size();
proc.value_vectorized_indexed(temp1[i_],
vector_ptr + ind,
indices);
}
else
{
Assert(false, ExcNotImplemented());
}
}
}
}
// case 3: contiguous and interleaved indices with mixed stride
else if (n_face_orientations == 1 &&
dof_info.index_storage_variants[dof_access_index][cell] ==
MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
interleaved_contiguous_mixed_strides)
{
AssertDimension(n_face_orientations, 1);
const unsigned int *strides =
&dof_info.dof_indices_interleave_strides
[dof_access_index][cell * VectorizedArrayType::size()];
unsigned int indices[VectorizedArrayType::size()];
for (unsigned int v = 0; v < VectorizedArrayType::size(); ++v)
indices[v] =
dof_info.dof_indices_contiguous
[dof_access_index]
[cell * VectorizedArrayType::size() + v] +
(dof_info
.component_dof_indices_offset[active_fe_index]
[first_selected_component] +
comp * static_dofs_per_component) *
strides[v];
const unsigned int n_filled_lanes =
dof_info.n_vectorization_lanes_filled[dof_access_index][cell];
if (fe_degree > 1 && do_gradients == true)
{
if (n_filled_lanes == VectorizedArrayType::size())
for (unsigned int i = 0; i < dofs_per_face; ++i)
{
if (n_face_orientations == 1)
{
const unsigned int i_ = reorientate(0, i);
unsigned int ind1[VectorizedArrayType::size()];
DEAL_II_OPENMP_SIMD_PRAGMA
for (unsigned int v = 0;
v < VectorizedArrayType::size();
++v)
ind1[v] =
indices[v] +
index_array_hermite[0 /*TODO*/ ][2 * i] *
strides[v];
unsigned int ind2[VectorizedArrayType::size()];
DEAL_II_OPENMP_SIMD_PRAGMA
for (unsigned int v = 0;
v < VectorizedArrayType::size();
++v)
ind2[v] =
indices[v] +
index_array_hermite[0 /*TODO*/ ][2 * i + 1] *
strides[v];
proc.hermite_grad_vectorized_indexed(
temp1[i_],
temp1[i_ + dofs_per_face],
global_vector_ptr,
global_vector_ptr,
grad_weight,
ind1,
ind2);
}
else
{
Assert(false, ExcNotImplemented());
}
}
else
{
if (integrate == false)
for (unsigned int i = 0; i < 2 * dofs_per_face; ++i)
temp1[i] = VectorizedArrayType();
for (unsigned int v = 0; v < n_filled_lanes; ++v)
for (unsigned int i = 0; i < dofs_per_face; ++i)
{
const unsigned int i_ =
reorientate(n_face_orientations == 1 ? 0 : v,
i);
proc.hermite_grad(
temp1[i_][v],
temp1[i_ + dofs_per_face][v],
global_vector_ptr
[indices[v] +
index_array_hermite
[n_face_orientations == 1 ? 0 : v]
[2 * i] *
strides[v]],
global_vector_ptr
[indices[v] +
index_array_hermite
[n_face_orientations == 1 ? 0 : v]
[2 * i + 1] *
strides[v]],
grad_weight[n_face_orientations == 1 ? 0 : v]);
}
}
}
else
{
if (n_filled_lanes == VectorizedArrayType::size())
for (unsigned int i = 0; i < dofs_per_face; ++i)
{
if (n_face_orientations == 1)
{
unsigned int ind[VectorizedArrayType::size()];
DEAL_II_OPENMP_SIMD_PRAGMA
for (unsigned int v = 0;
v < VectorizedArrayType::size();
++v)
ind[v] = indices[v] +
index_array_nodal[0][i] * strides[v];
const unsigned int i_ = reorientate(0, i);
proc.value_vectorized_indexed(temp1[i_],
global_vector_ptr,
ind);
}
else
{
Assert(false, ExcNotImplemented());
}
}
else
{
if (integrate == false)
for (unsigned int i = 0; i < dofs_per_face; ++i)
temp1[i] = VectorizedArrayType();
for (unsigned int v = 0; v < n_filled_lanes; ++v)
for (unsigned int i = 0; i < dofs_per_face; ++i)
proc.value(
temp1[reorientate(
n_face_orientations == 1 ? 0 : v, i)][v],
global_vector_ptr
[indices[v] +
index_array_nodal
[n_face_orientations == 1 ? 0 : v][i] *
strides[v]]);
}
}
}
// case 4: contiguous indices without interleaving
else if (n_face_orientations > 1 ||
dof_info.index_storage_variants[dof_access_index][cell] ==
MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
contiguous)
{
const unsigned int *indices =
&dof_info.dof_indices_contiguous[dof_access_index]
[cell *
VectorizedArrayType::size()];
Number2_ *vector_ptr =
global_vector_ptr + comp * static_dofs_per_component +
dof_info
.component_dof_indices_offset[active_fe_index]
[first_selected_component];
const unsigned int n_filled_lanes =
dof_info.n_vectorization_lanes_filled[dof_access_index][cell];
const bool vectorization_possible =
(n_face_orientations == 1) &&
(n_filled_lanes == VectorizedArrayType::size()) &&
(sm_ptr != nullptr);
std::array<Number2_ *, VectorizedArrayType::size()>
vector_ptrs = {};
if (vectorization_possible == false)
{
if (n_face_orientations == 1)
{
for (unsigned int v = 0; v < n_filled_lanes; ++v)
if (sm_ptr == nullptr)
{
vector_ptrs[v] = vector_ptr + indices[v];
}
else
{
const auto &temp =
dof_info.dof_indices_contiguous_sm
[dof_access_index]
[cell * VectorizedArrayType::size() + v];
vector_ptrs[v] = const_cast<Number *>(
sm_ptr->operator[](temp.first).data() +
temp.second + comp * static_dofs_per_component +
dof_info.component_dof_indices_offset
[active_fe_index][first_selected_component]);
}
}
else if (n_face_orientations == VectorizedArrayType::size())
{
for (unsigned int v = 0;
v < VectorizedArrayType::size();
++v)
if (cells[v] != numbers::invalid_unsigned_int)
{
if (sm_ptr == nullptr)
{
vector_ptrs[v] =
vector_ptr +
dof_info
.dof_indices_contiguous[dof_access_index]
[cells[v]];
}
else
{
const auto &temp =
dof_info.dof_indices_contiguous_sm
[dof_access_index][cells[v]];
vector_ptrs[v] = const_cast<Number *>(
sm_ptr->operator[](temp.first).data() +
temp.second +
comp * static_dofs_per_component +
dof_info.component_dof_indices_offset
[active_fe_index]
[first_selected_component]);
}
}
}
else
{
Assert(false, ExcNotImplemented());
}
}
if (do_gradients == true &&
data.element_type ==
MatrixFreeFunctions::tensor_symmetric_hermite)
{
if (vectorization_possible)
for (unsigned int i = 0; i < dofs_per_face; ++i)
{
const unsigned int ind1 =
index_array_hermite[0][2 * i];
const unsigned int ind2 =
index_array_hermite[0][2 * i + 1];
const unsigned int i_ = reorientate(0, i);
proc.hermite_grad_vectorized_indexed(
temp1[i_],
temp1[i_ + dofs_per_face],
vector_ptr + ind1,
vector_ptr + ind2,
grad_weight,
indices,
indices);
}
else if (n_face_orientations == 1)
for (unsigned int i = 0; i < dofs_per_face; ++i)
{
const unsigned int ind1 =
index_array_hermite[0][2 * i];
const unsigned int ind2 =
index_array_hermite[0][2 * i + 1];
const unsigned int i_ = reorientate(0, i);
for (unsigned int v = 0; v < n_filled_lanes; ++v)
proc.hermite_grad(temp1[i_][v],
temp1[i_ + dofs_per_face][v],
vector_ptrs[v][ind1],
vector_ptrs[v][ind2],
grad_weight[v]);
if (integrate == false)
for (unsigned int v = n_filled_lanes;
v < VectorizedArrayType::size();
++v)
{
temp1[i_][v] = 0.0;
temp1[i_ + dofs_per_face][v] = 0.0;
}
}
else
{
for (unsigned int v = 0; v < n_filled_lanes; ++v)
for (unsigned int i = 0; i < dofs_per_face; ++i)
proc.hermite_grad(
temp1[reorientate(v, i)][v],
temp1[reorientate(v, i) + dofs_per_face][v],
vector_ptrs[v][index_array_hermite[v][2 * i]],
vector_ptrs[v][index_array_hermite[v][2 * i + 1]],
grad_weight[v]);
}
}
else
{
if (vectorization_possible)
for (unsigned int i = 0; i < dofs_per_face; ++i)
{
const unsigned int ind = index_array_nodal[0][i];
const unsigned int i_ = reorientate(0, i);
proc.value_vectorized_indexed(temp1[i_],
vector_ptr + ind,
indices);
}
else if (n_face_orientations == 1)
for (unsigned int i = 0; i < dofs_per_face; ++i)
{
const unsigned int ind = index_array_nodal[0][i];
const unsigned int i_ = reorientate(0, i);
for (unsigned int v = 0; v < n_filled_lanes; ++v)
proc.value(temp1[i_][v], vector_ptrs[v][ind]);
if (integrate == false)
for (unsigned int v = n_filled_lanes;
v < VectorizedArrayType::size();
++v)
temp1[i_][v] = 0.0;
}
else
for (unsigned int i = 0; i < dofs_per_face; ++i)
{
for (unsigned int v = 0;
v < VectorizedArrayType::size();
++v)
if (cells[v] != numbers::invalid_unsigned_int)
proc.value(
temp1[reorientate(v, i)][v],
vector_ptrs[v][index_array_nodal[v][i]]);
}
}
}
else
{
// case 5: default vector access
// for the integrate_scatter path (integrate == true), we
// need to only prepare the data in this function for all
// components to later call distribute_local_to_global();
// for the gather_evaluate path (integrate == false), we
// instead want to leave early because we need to get the
// vector data from somewhere else
proc.default_operation(temp1, comp);
if (integrate)
accesses_global_vector = false;
else
return false;
}
}
else
{
// case 5: default vector access
proc.default_operation(temp1, comp);
if (integrate)
accesses_global_vector = false;
else
return false;
}
if (!integrate)
proc.in_face_operation(temp1, comp);
}
if (!integrate &&
(face_orientations[0] > 0 &&
subface_index < GeometryInfo<dim>::max_children_per_cell))
{
AssertDimension(n_face_orientations, 1);
adjust_for_face_orientation(dim,
n_components,
face_orientations[0],
orientation_map,
false,
do_values,
do_gradients,
data.n_q_points_face,
scratch_data,
values_quad,
gradients_quad);
}
return accesses_global_vector;
}
template <int dim,
typename Number,
typename VectorizedArrayType,
typename Number2 = Number>
struct FEFaceEvaluationImplGatherEvaluateSelector
{
template <int fe_degree, int n_q_points_1d>
static bool
run(const unsigned int n_components,
const unsigned int n_face_orientations,
const Number2 * src_ptr,
const std::vector<ArrayView<const Number>> *sm_ptr,
const MatrixFreeFunctions::ShapeInfo<VectorizedArrayType> &data,
const MatrixFreeFunctions::DoFInfo & dof_info,
VectorizedArrayType * values_quad,
VectorizedArrayType *gradients_quad,
VectorizedArrayType *scratch_data,
const bool evaluate_values,
const bool evaluate_gradients,
const unsigned int active_fe_index,
const unsigned int first_selected_component,
const std::array<unsigned int, VectorizedArrayType::size()> cells,
const std::array<unsigned int, VectorizedArrayType::size()> face_nos,
const unsigned int subface_index,
const MatrixFreeFunctions::DoFInfo::DoFAccessIndex dof_access_index,
const std::array<unsigned int, VectorizedArrayType::size()>
face_orientations,
const Table<2, unsigned int> &orientation_map)
{
if (src_ptr == nullptr)
return false;
if (data.element_type == MatrixFreeFunctions::tensor_none)
return false;
(void)sm_ptr;
Processor<fe_degree, n_q_points_1d> p(n_components,
false,
src_ptr,
sm_ptr,
data,
dof_info,
values_quad,
gradients_quad,
scratch_data,
evaluate_values,
evaluate_gradients,
active_fe_index,
first_selected_component,
cells,
face_nos,
subface_index,
dof_access_index,
face_orientations,
orientation_map);
if (n_face_orientations == VectorizedArrayType::size())
return fe_face_evaluation_process_and_io<VectorizedArrayType::size()>(
p);
else
return fe_face_evaluation_process_and_io<1>(p);
}
private:
template <int fe_degree, int n_q_points_1d>
struct Processor
{
static const int dim_ = dim;
static const int fe_degree_ = fe_degree;
static const int n_q_points_1d_ = n_q_points_1d;
using VectorizedArrayType_ = VectorizedArrayType;
using Number_ = Number;
using Number2_ = const Number2;
Processor(
const unsigned int n_components,
const bool integrate,
const Number2 * global_vector_ptr,
const std::vector<ArrayView<const Number>> *sm_ptr,
const MatrixFreeFunctions::ShapeInfo<VectorizedArrayType> &data,
const MatrixFreeFunctions::DoFInfo & dof_info,
VectorizedArrayType * values_quad,
VectorizedArrayType *gradients_quad,
VectorizedArrayType *scratch_data,
const bool do_values,
const bool do_gradients,
const unsigned int active_fe_index,
const unsigned int first_selected_component,
const std::array<unsigned int, VectorizedArrayType::size()> cells,
const std::array<unsigned int, VectorizedArrayType::size()> face_nos,
const unsigned int subface_index,
const MatrixFreeFunctions::DoFInfo::DoFAccessIndex dof_access_index,
const std::array<unsigned int, VectorizedArrayType::size()>
face_orientations,
const Table<2, unsigned int> &orientation_map)
: n_components(n_components)
, integrate(integrate)
, global_vector_ptr(global_vector_ptr)
, sm_ptr(sm_ptr)
, data(data)
, dof_info(dof_info)
, values_quad(values_quad)
, gradients_quad(gradients_quad)
, scratch_data(scratch_data)
, do_values(do_values)
, do_gradients(do_gradients)
, active_fe_index(active_fe_index)
, first_selected_component(first_selected_component)
, cells(cells)
, face_nos(face_nos)
, subface_index(subface_index)
, dof_access_index(dof_access_index)
, face_orientations(face_orientations)
, orientation_map(orientation_map)
{}
template <typename T0, typename T1, typename T2>
void
hermite_grad_vectorized(T0 & temp_1,
T0 & temp_2,
const T1 src_ptr_1,
const T1 src_ptr_2,
const T2 &grad_weight)
{
do_vectorized_read(src_ptr_1, temp_1);
do_vectorized_read(src_ptr_2, temp_2);
temp_2 = grad_weight * (temp_1 - temp_2);
}
template <typename T1, typename T2>
void
value_vectorized(T1 &temp, const T2 src_ptr)
{
do_vectorized_read(src_ptr, temp);
}
template <typename T0, typename T1, typename T2, typename T3>
void
hermite_grad_vectorized_indexed(T0 & temp_1,
T0 & temp_2,
const T1 src_ptr_1,
const T1 src_ptr_2,
const T2 &grad_weight,
const T3 &indices_1,
const T3 &indices_2)
{
do_vectorized_gather(src_ptr_1, indices_1, temp_1);
do_vectorized_gather(src_ptr_2, indices_2, temp_2);
temp_2 = grad_weight * (temp_1 - temp_2);
}
template <typename T0, typename T1, typename T2>
void
value_vectorized_indexed(T0 &temp, const T1 src_ptr, const T2 &indices)
{
do_vectorized_gather(src_ptr, indices, temp);
}
template <typename T0, typename T1, typename T2>
void
hermite_grad(T0 & temp_1,
T0 & temp_2,
const T1 &src_ptr_1,
const T2 &src_ptr_2,
const T2 &grad_weight)
{
// case 3a)
temp_1 = src_ptr_1;
temp_2 = grad_weight * (temp_1 - src_ptr_2);
}
template <typename T1, typename T2>
void
value(T1 &temp, const T2 &src_ptr)
{
// case 3b)
temp = src_ptr;
}
template <typename T1>
void
default_operation(const T1 &, const unsigned int)
{
// case 5)
}
template <typename T1>
void
in_face_operation(T1 &temp1, const unsigned int comp)
{
const unsigned int dofs_per_face =
fe_degree > -1 ?
Utilities::pow(fe_degree + 1, dim - 1) :
Utilities::pow(data.data.front().fe_degree + 1, dim - 1);
const unsigned int n_q_points =
fe_degree > -1 ? Utilities::pow(n_q_points_1d, dim - 1) :
data.n_q_points_face;
if (fe_degree > -1 &&
subface_index >= GeometryInfo<dim>::max_children_per_cell &&
data.element_type <= MatrixFreeFunctions::tensor_symmetric)
FEFaceEvaluationImpl<true,
dim,
fe_degree,
n_q_points_1d,
VectorizedArrayType>::
evaluate_in_face( /* n_components */ 1,
data,
temp1,
values_quad + comp * n_q_points,
gradients_quad + comp * dim * n_q_points,
scratch_data + 2 * dofs_per_face,
do_values,
do_gradients,
subface_index);
else
FEFaceEvaluationImpl<false,
dim,
fe_degree,
n_q_points_1d,
VectorizedArrayType>::
evaluate_in_face( /* n_components */ 1,
data,
temp1,
values_quad + comp * n_q_points,
gradients_quad + comp * dim * n_q_points,
scratch_data + 2 * dofs_per_face,
do_values,
do_gradients,
subface_index);
}
const unsigned int n_components;
const bool integrate;
const Number2 * global_vector_ptr;
const std::vector<ArrayView<const Number>> *sm_ptr;
const MatrixFreeFunctions::ShapeInfo<VectorizedArrayType> &data;
const MatrixFreeFunctions::DoFInfo & dof_info;
VectorizedArrayType * values_quad;
VectorizedArrayType * gradients_quad;
VectorizedArrayType * scratch_data;
const bool do_values;
const bool do_gradients;
const unsigned int active_fe_index;
const unsigned int first_selected_component;
const std::array<unsigned int, VectorizedArrayType::size()> cells;
const std::array<unsigned int, VectorizedArrayType::size()> face_nos;
const unsigned int subface_index;
const MatrixFreeFunctions::DoFInfo::DoFAccessIndex dof_access_index;
const std::array<unsigned int, VectorizedArrayType::size()>
face_orientations;
const Table<2, unsigned int> &orientation_map;
};
};
template <int dim,
typename Number,
typename VectorizedArrayType,
typename Number2 = Number>
struct FEFaceEvaluationImplIntegrateScatterSelector
{
template <int fe_degree, int n_q_points_1d>
static bool
run(const unsigned int n_components,
const unsigned int n_face_orientations,
Number2 * dst_ptr,
const std::vector<ArrayView<const Number2>> *sm_ptr,
const MatrixFreeFunctions::ShapeInfo<VectorizedArrayType> &data,
const MatrixFreeFunctions::DoFInfo & dof_info,
VectorizedArrayType * values_array,
VectorizedArrayType * values_quad,
VectorizedArrayType *gradients_quad,
VectorizedArrayType *scratch_data,
const bool integrate_values,
const bool integrate_gradients,
const unsigned int active_fe_index,
const unsigned int first_selected_component,
const std::array<unsigned int, VectorizedArrayType::size()> cells,
const std::array<unsigned int, VectorizedArrayType::size()> face_nos,
const unsigned int subface_index,
const MatrixFreeFunctions::DoFInfo::DoFAccessIndex dof_access_index,
const std::array<unsigned int, VectorizedArrayType::size()>
face_orientations,
const Table<2, unsigned int> &orientation_map)
{
(void)sm_ptr;
if (dst_ptr == nullptr ||
data.element_type == MatrixFreeFunctions::tensor_none)
{
AssertDimension(n_face_orientations, 1);
// for block vectors simply integrate
FEFaceEvaluationImplIntegrateSelector<dim, VectorizedArrayType>::
template run<fe_degree, n_q_points_1d>(n_components,
data,
values_array,
values_quad,
gradients_quad,
scratch_data,
integrate_values,
integrate_gradients,
face_nos[0],
subface_index,
face_orientations[0],
orientation_map);
// default vector access
return false;
}
Processor<fe_degree, n_q_points_1d> p(values_array,
n_components,
true,
dst_ptr,
sm_ptr,
data,
dof_info,
values_quad,
gradients_quad,
scratch_data,
integrate_values,
integrate_gradients,
active_fe_index,
first_selected_component,
cells,
face_nos,
subface_index,
dof_access_index,
face_orientations,
orientation_map);
if (n_face_orientations == VectorizedArrayType::size())
return fe_face_evaluation_process_and_io<VectorizedArrayType::size()>(
p);
else
return fe_face_evaluation_process_and_io<1>(p);
}
private:
template <int fe_degree, int n_q_points_1d>
struct Processor
{
static const int dim_ = dim;
static const int fe_degree_ = fe_degree;
static const int n_q_points_1d_ = n_q_points_1d;
using VectorizedArrayType_ = VectorizedArrayType;
using Number_ = Number;
using Number2_ = Number2;
Processor(
VectorizedArrayType * values_array,
const unsigned int n_components,
const bool integrate,
Number2 * global_vector_ptr,
const std::vector<ArrayView<const Number>> *sm_ptr,
const MatrixFreeFunctions::ShapeInfo<VectorizedArrayType> &data,
const MatrixFreeFunctions::DoFInfo & dof_info,
VectorizedArrayType * values_quad,
VectorizedArrayType *gradients_quad,
VectorizedArrayType *scratch_data,
const bool do_values,
const bool do_gradients,
const unsigned int active_fe_index,
const unsigned int first_selected_component,
const std::array<unsigned int, VectorizedArrayType::size()> cells,
const std::array<unsigned int, VectorizedArrayType::size()> face_nos,
const unsigned int subface_index,
const MatrixFreeFunctions::DoFInfo::DoFAccessIndex dof_access_index,
const std::array<unsigned int, VectorizedArrayType::size()>
face_orientations,
const Table<2, unsigned int> &orientation_map)
: values_array(values_array)
, n_components(n_components)
, integrate(integrate)
, global_vector_ptr(global_vector_ptr)
, sm_ptr(sm_ptr)
, data(data)
, dof_info(dof_info)
, values_quad(values_quad)
, gradients_quad(gradients_quad)
, scratch_data(scratch_data)
, do_values(do_values)
, do_gradients(do_gradients)
, active_fe_index(active_fe_index)
, first_selected_component(first_selected_component)
, cells(cells)
, face_nos(face_nos)
, subface_index(subface_index)
, dof_access_index(dof_access_index)
, face_orientations(face_orientations)
, orientation_map(orientation_map)
{}
template <typename T0, typename T1, typename T2, typename T3, typename T4>
void
hermite_grad_vectorized(const T0 &temp_1,
const T1 &temp_2,
T2 dst_ptr_1,
T3 dst_ptr_2,
const T4 &grad_weight)
{
// case 1a)
const VectorizedArrayType val = temp_1 - grad_weight * temp_2;
const VectorizedArrayType grad = grad_weight * temp_2;
do_vectorized_add(val, dst_ptr_1);
do_vectorized_add(grad, dst_ptr_2);
}
template <typename T0, typename T1>
void
value_vectorized(const T0 &temp, T1 dst_ptr)
{
// case 1b)
do_vectorized_add(temp, dst_ptr);
}
template <typename T0, typename T1, typename T2, typename T3>
void
hermite_grad_vectorized_indexed(const T0 &temp_1,
const T0 &temp_2,
T1 dst_ptr_1,
T1 dst_ptr_2,
const T2 &grad_weight,
const T3 &indices_1,
const T3 &indices_2)
{
// case 2a)
const VectorizedArrayType val = temp_1 - grad_weight * temp_2;
const VectorizedArrayType grad = grad_weight * temp_2;
do_vectorized_scatter_add(val, indices_1, dst_ptr_1);
do_vectorized_scatter_add(grad, indices_2, dst_ptr_2);
}
template <typename T0, typename T1, typename T2>
void
value_vectorized_indexed(const T0 &temp, T1 dst_ptr, const T2 &indices)
{
// case 2b)
do_vectorized_scatter_add(temp, indices, dst_ptr);
}
template <typename T0, typename T1, typename T2>
void
hermite_grad(const T0 &temp_1,
const T0 &temp_2,
T1 & dst_ptr_1,
T1 & dst_ptr_2,
const T2 &grad_weight)
{
// case 3a)
const Number val = temp_1 - grad_weight * temp_2;
const Number grad = grad_weight * temp_2;
dst_ptr_1 += val;
dst_ptr_2 += grad;
}
template <typename T0, typename T1>
void
value(const T0 &temp, T1 &dst_ptr)
{
// case 3b)
dst_ptr += temp;
}
template <typename T0>
void
default_operation(const T0 &temp1, const unsigned int comp)
{
// case 5: default vector access, must be handled separately, just do
// the face-normal interpolation
FEFaceNormalEvaluationImpl<dim, fe_degree, VectorizedArrayType>::
template interpolate<false, false>(
/* n_components */ 1,
data,
temp1,
values_array + comp * data.dofs_per_component_on_cell,
do_gradients,
face_nos[0]);
}
template <typename T0>
void
in_face_operation(T0 &temp1, const unsigned int comp)
{
const unsigned int dofs_per_face =
fe_degree > -1 ?
Utilities::pow(fe_degree + 1, dim - 1) :
Utilities::pow(data.data.front().fe_degree + 1, dim - 1);
const unsigned int n_q_points =
fe_degree > -1 ? Utilities::pow(n_q_points_1d, dim - 1) :
data.n_q_points_face;
if (fe_degree > -1 &&
subface_index >= GeometryInfo<dim>::max_children_per_cell &&
data.element_type <=
internal::MatrixFreeFunctions::tensor_symmetric)
internal::FEFaceEvaluationImpl<true,
dim,
fe_degree,
n_q_points_1d,
VectorizedArrayType>::
integrate_in_face( /* n_components */ 1,
data,
temp1,
values_quad + comp * n_q_points,
gradients_quad + dim * comp * n_q_points,
scratch_data + 2 * dofs_per_face,
do_values,
do_gradients,
subface_index);
else
internal::FEFaceEvaluationImpl<false,
dim,
fe_degree,
n_q_points_1d,
VectorizedArrayType>::
integrate_in_face( /* n_components */ 1,
data,
temp1,
values_quad + comp * n_q_points,
gradients_quad + dim * comp * n_q_points,
scratch_data + 2 * dofs_per_face,
do_values,
do_gradients,
subface_index);
}
VectorizedArrayType *values_array;
const unsigned int n_components;
const bool integrate;
Number2 * global_vector_ptr;
const std::vector<ArrayView<const Number>> *sm_ptr;
const MatrixFreeFunctions::ShapeInfo<VectorizedArrayType> &data;
const MatrixFreeFunctions::DoFInfo & dof_info;
VectorizedArrayType * values_quad;
VectorizedArrayType * gradients_quad;
VectorizedArrayType * scratch_data;
const bool do_values;
const bool do_gradients;
const unsigned int active_fe_index;
const unsigned int first_selected_component;
const std::array<unsigned int, VectorizedArrayType::size()> cells;
const std::array<unsigned int, VectorizedArrayType::size()> face_nos;
const unsigned int subface_index;
const MatrixFreeFunctions::DoFInfo::DoFAccessIndex dof_access_index;
const std::array<unsigned int, VectorizedArrayType::size()>
face_orientations;
const Table<2, unsigned int> &orientation_map;
};
};
/**
* 该结构使用FEEvaluationBaseData参数实现了反质量矩阵操作的动作。
*
*/
template <int dim, typename Number>
struct CellwiseInverseMassMatrixImplBasic
{
template <int fe_degree, int = 0>
static bool
run(const unsigned int n_components,
const FEEvaluationBaseData<dim,
typename Number::value_type,
false,
Number> &fe_eval,
const Number * in_array,
Number * out_array,
typename std::enable_if<fe_degree != -1>::type * = nullptr)
{
constexpr unsigned int dofs_per_component =
Utilities::pow(fe_degree + 1, dim);
Assert(dim >= 1 || dim <= 3, ExcNotImplemented());
Assert(fe_eval.get_shape_info().element_type <=
MatrixFreeFunctions::tensor_symmetric,
ExcNotImplemented());
internal::EvaluatorTensorProduct<internal::evaluate_evenodd,
dim,
fe_degree + 1,
fe_degree + 1,
Number>
evaluator(
AlignedVector<Number>(),
AlignedVector<Number>(),
fe_eval.get_shape_info().data.front().inverse_shape_values_eo);
for (unsigned int d = 0; d < n_components; ++d)
{
const Number *in = in_array + d * dofs_per_component;
Number * out = out_array + d * dofs_per_component;
// Need to select 'apply' method with hessian slot because values
// assume symmetries that do not exist in the inverse shapes
evaluator.template hessians<0, true, false>(in, out);
if (dim > 1)
evaluator.template hessians<1, true, false>(out, out);
if (dim > 2)
evaluator.template hessians<2, true, false>(out, out);
}
for (unsigned int q = 0; q < dofs_per_component; ++q)
{
const Number inverse_JxW_q = Number(1.) / fe_eval.JxW(q);
for (unsigned int d = 0; d < n_components; ++d)
out_array[q + d * dofs_per_component] *= inverse_JxW_q;
}
for (unsigned int d = 0; d < n_components; ++d)
{
Number *out = out_array + d * dofs_per_component;
if (dim > 2)
evaluator.template hessians<2, false, false>(out, out);
if (dim > 1)
evaluator.template hessians<1, false, false>(out, out);
evaluator.template hessians<0, false, false>(out, out);
}
return false;
}
template <int fe_degree, int = 0>
static bool
run(const unsigned int n_components,
const FEEvaluationBaseData<dim,
typename Number::value_type,
false,
Number> &fe_eval,
const Number * in_array,
Number * out_array,
typename std::enable_if<fe_degree == -1>::type * = nullptr)
{
static_assert(fe_degree == -1, "Only usable for degree -1");
const unsigned int dofs_per_component =
fe_eval.get_shape_info().dofs_per_component_on_cell;
Assert(dim >= 1 || dim <= 3, ExcNotImplemented());
internal::
EvaluatorTensorProduct<internal::evaluate_general, dim, 0, 0, Number>
evaluator(fe_eval.get_shape_info().data.front().inverse_shape_values,
AlignedVector<Number>(),
AlignedVector<Number>(),
fe_eval.get_shape_info().data.front().fe_degree + 1,
fe_eval.get_shape_info().data.front().fe_degree + 1);
for (unsigned int d = 0; d < n_components; ++d)
{
const Number *in = in_array + d * dofs_per_component;
Number * out = out_array + d * dofs_per_component;
// Need to select 'apply' method with hessian slot because values
// assume symmetries that do not exist in the inverse shapes
evaluator.template values<0, true, false>(in, out);
if (dim > 1)
evaluator.template values<1, true, false>(out, out);
if (dim > 2)
evaluator.template values<2, true, false>(out, out);
}
for (unsigned int q = 0; q < dofs_per_component; ++q)
{
const Number inverse_JxW_q = Number(1.) / fe_eval.JxW(q);
for (unsigned int d = 0; d < n_components; ++d)
out_array[q + d * dofs_per_component] *= inverse_JxW_q;
}
for (unsigned int d = 0; d < n_components; ++d)
{
Number *out = out_array + d * dofs_per_component;
if (dim > 2)
evaluator.template values<2, false, false>(out, out);
if (dim > 1)
evaluator.template values<1, false, false>(out, out);
evaluator.template values<0, false, false>(out, out);
}
return false;
}
};
/**
* 这个结构实现了使用FEEvaluationBaseData参数的反质量矩阵操作的动作。
*
*/
template <int dim, typename Number>
struct CellwiseInverseMassMatrixImplFlexible
{
template <int fe_degree, int = 0>
static bool
run(const unsigned int n_desired_components,
const AlignedVector<Number> &inverse_shape,
const AlignedVector<Number> &inverse_coefficients,
const Number * in_array,
Number * out_array,
typename std::enable_if<fe_degree != -1>::type * = nullptr)
{
constexpr unsigned int dofs_per_component =
Utilities::pow(fe_degree + 1, dim);
Assert(inverse_coefficients.size() > 0 &&
inverse_coefficients.size() % dofs_per_component == 0,
ExcMessage(
"Expected diagonal to be a multiple of scalar dof per cells"));
if (inverse_coefficients.size() != dofs_per_component)
AssertDimension(n_desired_components * dofs_per_component,
inverse_coefficients.size());
Assert(dim >= 1 || dim <= 3, ExcNotImplemented());
internal::EvaluatorTensorProduct<internal::evaluate_evenodd,
dim,
fe_degree + 1,
fe_degree + 1,
Number>
evaluator(AlignedVector<Number>(),
AlignedVector<Number>(),
inverse_shape);
const unsigned int shift_coefficient =
inverse_coefficients.size() > dofs_per_component ? dofs_per_component :
0;
const Number *inv_coefficient = inverse_coefficients.data();
for (unsigned int d = 0; d < n_desired_components; ++d)
{
const Number *in = in_array + d * dofs_per_component;
Number * out = out_array + d * dofs_per_component;
// Need to select 'apply' method with hessian slot because values
// assume symmetries that do not exist in the inverse shapes
evaluator.template hessians<0, true, false>(in, out);
if (dim > 1)
evaluator.template hessians<1, true, false>(out, out);
if (dim > 2)
evaluator.template hessians<2, true, false>(out, out);
for (unsigned int q = 0; q < dofs_per_component; ++q)
out[q] *= inv_coefficient[q];
if (dim > 2)
evaluator.template hessians<2, false, false>(out, out);
if (dim > 1)
evaluator.template hessians<1, false, false>(out, out);
evaluator.template hessians<0, false, false>(out, out);
inv_coefficient += shift_coefficient;
}
return false;
}
/**
* 度的版本= -
*
*/
template <int fe_degree, int = 0>
static bool
run(const unsigned int,
const AlignedVector<Number> &,
const AlignedVector<Number> &,
const Number *,
Number *,
typename std::enable_if<fe_degree == -1>::type * = nullptr)
{
static_assert(fe_degree == -1, "Only usable for degree -1");
Assert(false, ExcNotImplemented());
return false;
}
};
/**
* 该结构使用FEEvaluationBaseData参数实现了反质量矩阵操作的动作。
*
*/
template <int dim, typename Number>
struct CellwiseInverseMassMatrixImplTransformFromQPoints
{
template <int fe_degree, int = 0>
static bool
run(const unsigned int n_desired_components,
const FEEvaluationBaseData<dim,
typename Number::value_type,
false,
Number> &fe_eval,
const Number * in_array,
Number * out_array,
typename std::enable_if<fe_degree != -1>::type * = nullptr)
{
const AlignedVector<Number> &inverse_shape =
fe_eval.get_shape_info().data.front().inverse_shape_values_eo;
constexpr unsigned int dofs_per_cell = Utilities::pow(fe_degree + 1, dim);
internal::EvaluatorTensorProduct<internal::evaluate_evenodd,
dim,
fe_degree + 1,
fe_degree + 1,
Number>
evaluator(AlignedVector<Number>(),
AlignedVector<Number>(),
inverse_shape);
for (unsigned int d = 0; d < n_desired_components; ++d)
{
const Number *in = in_array + d * dofs_per_cell;
Number * out = out_array + d * dofs_per_cell;
if (dim == 3)
{
evaluator.template hessians<2, false, false>(in, out);
evaluator.template hessians<1, false, false>(out, out);
evaluator.template hessians<0, false, false>(out, out);
}
if (dim == 2)
{
evaluator.template hessians<1, false, false>(in, out);
evaluator.template hessians<0, false, false>(out, out);
}
if (dim == 1)
evaluator.template hessians<0, false, false>(in, out);
}
return false;
}
template <int fe_degree, int = 0>
static bool
run(const unsigned int n_desired_components,
const FEEvaluationBaseData<dim,
typename Number::value_type,
false,
Number> &fe_eval,
const Number * in_array,
Number * out_array,
typename std::enable_if<fe_degree == -1>::type * = nullptr)
{
static_assert(fe_degree == -1, "Only usable for degree -1");
const AlignedVector<Number> &inverse_shape =
fe_eval.get_shape_info().data.front().inverse_shape_values;
const unsigned int dofs_per_component =
fe_eval.get_shape_info().dofs_per_component_on_cell;
const unsigned int n_q_points = fe_eval.get_shape_info().n_q_points;
internal::
EvaluatorTensorProduct<internal::evaluate_general, dim, 0, 0, Number>
evaluator(inverse_shape,
AlignedVector<Number>(),
AlignedVector<Number>(),
fe_eval.get_shape_info().data.front().fe_degree + 1,
fe_eval.get_shape_info().data.front().n_q_points_1d);
auto temp_1 = fe_eval.get_scratch_data().begin();
auto temp_2 = temp_1 + std::max(n_q_points, dofs_per_component);
for (unsigned int d = 0; d < n_desired_components; ++d)
{
const Number *in = in_array + d * n_q_points;
Number * out = out_array + d * dofs_per_component;
if (dim == 3)
{
evaluator.template values<2, false, false>(in, temp_1);
evaluator.template values<1, false, false>(temp_1, temp_2);
evaluator.template values<0, false, false>(temp_2, out);
}
if (dim == 2)
{
evaluator.template values<1, false, false>(in, temp_1);
evaluator.template values<0, false, false>(temp_1, out);
}
if (dim == 1)
evaluator.template values<0, false, false>(in, out);
}
return false;
}
};
} // end of namespace internal
DEAL_II_NAMESPACE_CLOSE
#endif
| 42.635705 | 178 | 0.474229 | [
"shape",
"vector",
"transform"
] |
df7d82cdf3f8bcc2b8ca5e6914607f94f2b93c6e | 6,595 | c | C | src/bin/header_change.c | timburrow/ovj-private | 5b84ff1ac51ef387d5073352f9c604e28150d6d2 | [
"Apache-2.0"
] | 1 | 2019-09-17T21:07:37.000Z | 2019-09-17T21:07:37.000Z | src/bin/header_change.c | timburrow/openvnmrj-source | f5e65eb2db4bded3437701f0fa91abd41928579c | [
"Apache-2.0"
] | 1 | 2019-08-12T20:59:59.000Z | 2019-08-13T08:04:30.000Z | src/bin/header_change.c | timburrow/openvnmrj-source | f5e65eb2db4bded3437701f0fa91abd41928579c | [
"Apache-2.0"
] | null | null | null | /*
* Copyright (C) 2015 University of Oregon
*
* You may distribute under the terms of either the GNU General Public
* License or the Apache License, as specified in the LICENSE file.
*
* For more information, see the LICENSE file.
*/
/***********************************************************/
/**** PROGRAM: header_change.c ****/
/**** For updating the header of FID (ct) ****/
/**** Last update: Feb 13, 2007 ****/
/***********************************************************/
/**[1] INCLUDE FILES **************************************/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <netinet/in.h>
/**[2] GENEREL DECLERATIONS *******************************/
struct {
/* Used at start of each data file (FIDs, spectra, 2D) */
int nblocks; /* number of blocks in file */
int ntraces; /* number of traces per block */
int np; /* number of elements per trace */
int ebytes; /* number of bytes per element */
int tbytes; /* number of bytes per trace */
int bbytes; /* number of bytes per block */
short vers_id; /* software version, file_id status bits */
short status; /* status of whole file */
int nbheaders; /* number of block headers per block */
} datafilehead;
struct {
/* Each file block contains the following header */
short scale; /* scaling factor */
short status; /* status of data in block */
short index; /* block index */
short mode; /* mode of data in block */
int ctcount; /* ct value for FID */
float lpval; /* f2 (2D-f1) left phase in phasefile */
float rpval; /* f2 (2D-f1) right phase in phasefile */
float lvl; /* left drift correction */
float tlt; /* tilt drift correction */
} datablockhead;
/*** PROGRAM BEGIN *************************************/
int main(int argc, char *argv[])
{
/*** VARIABLE DECLARATION **************************************/
FILE *in_file,*out_file;
int i,j,no_points;
short data_short;
int data_int ;
int bit[16],n,file_status,fs;
double x;
int nblks;
int ctval;
int no_bytes;
/*** CONTROL OF INPUT PARAMETERS ******************************/
if (argc<2)
{
printf("\nName of input file was not passed!\n");
exit (3);
}
if (argc<3)
{
printf("\nName of output file was not passed!\n");
exit (3);
}
if (argc<4)
{
printf("\nNew CT was not passed!\n");
exit (3);
}
/*** OPENING THE FILES AND READING THE HEADER ****************/
if ((in_file=fopen(argv[1],"rb"))==NULL)
{
printf("\nCan not open input file");
exit (3);
}
rewind(in_file);
if ((out_file=fopen(argv[2],"wb"))==NULL)
{
printf("\nCan not open output file");
exit (3);
}
rewind(out_file);
/*** WRITE A HEADER OF AN OUTPUT FILE *****************/
if (fread(&datafilehead,sizeof(datafilehead),1,in_file) != 1)
{
fprintf (stderr, "Error in reading input data (datafilehead)\n");
exit (3);
}
if (fwrite(&datafilehead,sizeof(datafilehead),1,out_file) != 1)
{
fprintf (stderr, "Error in writting output data (datafilehead)\n");
exit (3);
}
/*** READ PARAMETERS **********************************/
no_points = ntohl(datafilehead.np);
/*** FILE STATUS ********************************/
file_status = ntohs(datafilehead.status);
fs = file_status;
for (n=15;n>=0;n--)
{
x = pow(2.0,(double)n);
bit[n] = 0;
if (fs >= (int)x)
{
fs += -(int)x;
bit[n] = 1;
}
}
fprintf (stderr, "\n******* FID HEADER UPDATE *************\n");
fprintf (stderr, "FILE STATUS = %d\n",file_status);
if (bit[3] == 0)
{
fprintf (stderr, "Type of data: INTEGER\n");
}
if (bit[3] == 1)
{
fprintf (stderr, "Type of data: FLOATING POINT\n");
}
fprintf (stderr, "np = %d\n",no_points);
/*** WRITING THE DATA ********************************/
nblks = ntohl(datafilehead.nblocks);
no_bytes = ntohl(datafilehead.ebytes);
ctval = atoi(argv[3]);
for (i=0;i<nblks;++i)
{
if (fread(&datablockhead,sizeof(datablockhead),1,in_file) != 1)
{
fprintf (stderr, "Error in reading input data (datablockhead)\n");
exit (3);
}
datablockhead.ctcount = htonl(ctval);
fprintf (stderr, "block[%3d] new CT = %3d\n",i+1,ctval);
if (fwrite(&datablockhead,sizeof(datablockhead),1,out_file) != 1)
{
fprintf (stderr, "Error in writting output data (datablockhead)\n");
exit (3);
}
for (j=0;j<no_points;++j)
{
if (no_bytes == 2)
{
if (fread(&data_short,sizeof(data_short),1,in_file) != 1)
{
fprintf (stderr, "Error in reading input data (integer)\n");
exit (3);
}
if (fwrite(&data_short,sizeof(data_short),1,out_file) != 1)
{
fprintf (stderr, "Error in writting output data (integer) \n");
exit (3);
}
}
else
{
if (fread(&data_int,sizeof(data_int),1,in_file) != 1)
{
fprintf (stderr, "Error in reading input data (float)\n");
exit (3);
}
if (fwrite(&data_int,sizeof(data_int),1,out_file) != 1)
{
fprintf (stderr, "Error in writting output data\n");
exit (3);
}
}
}
}
/*** CLOSING FILES *************************************/
if (fclose(in_file) != 0)
{
printf ("Error closing input file");
exit (3);
}
if (fclose(out_file) != 0)
{
printf ("Error closing output file");
exit (3);
}
exit(EXIT_SUCCESS);
}
| 30.114155 | 82 | 0.443366 | [
"3d"
] |
df7f045dd96a754281336817c63eb3b2577a6268 | 5,575 | h | C | source/EVector4.h | narbys/SoftwareRaytracer | a40894a4900558c77f5ae7fd47fe4a203ae64c48 | [
"Unlicense"
] | 1 | 2021-09-29T14:37:33.000Z | 2021-09-29T14:37:33.000Z | source/EVector4.h | narbys/SoftwareRaytracer | a40894a4900558c77f5ae7fd47fe4a203ae64c48 | [
"Unlicense"
] | null | null | null | source/EVector4.h | narbys/SoftwareRaytracer | a40894a4900558c77f5ae7fd47fe4a203ae64c48 | [
"Unlicense"
] | null | null | null | /*=============================================================================*/
// Copyright 2019 Elite Engine 2.0
// Authors: Matthieu Delaere
/*=============================================================================*/
// EVector4.h: Vector4D struct
/*=============================================================================*/
#ifndef ELITE_MATH_VECTOR4
#define ELITE_MATH_VECTOR4
#include "EVector.h"
#include "EPoint.h"
#include "EMathUtilities.h"
namespace Elite
{
//=== VECTOR4 SPECIALIZATION ===
template<typename T>
struct Vector<4, T>
{
//=== Data ===
#pragma warning(disable : 4201)
union
{
T data[4];
struct { T x, y, z, w; };
struct { T r, g, b, a; };
Vector<2, T> xy;
Vector<3, T> xyz;
Vector<2, T> rg;
Vector<3, T> rgb;
};
#pragma warning(default : 4201)
//=== Constructors ===
#pragma region Constructors
Vector<4, T>() = default;
Vector<4, T>(T _x, T _y, T _z, T _w = 0) //W component of Vector is usually 0
: x(_x), y(_y), z(_z), w(_w) {}
Vector<4, T>(const Vector<2, T> v, T _z, T _w = 0)
: x(v.x), y(v.y), z(_z), w(_w) {}
Vector<4, T>(const Vector<3, T> v, T _w = 0)
: x(v.x), y(v.y), z(v.z), w(_w) {}
Vector<4, T>(const Vector<4, T>& v)
: x(v.x), y(v.y), z(v.z), w(v.w) {}
Vector<4, T>(Vector<4, T>&& v) noexcept
:x(std::move(v.x)), y(std::move(v.y)), z(std::move(v.z)), w(std::move(v.w)) {}
explicit Vector<4, T>(const Point<4, T>& p)
: x(p.x), y(p.y), z(p.z), w(p.w) {}
#pragma endregion
//=== Conversion Operator ===
#pragma region ConversionOperator
template<typename U>
operator Vector<4, U>() const //Implicit conversion to different types of Vector4
{
return Vector<4, U>(
static_cast<U>(this->x),
static_cast<U>(this->y),
static_cast<U>(this->z),
static_cast<U>(this->w));
}
#pragma endregion
//=== Arithmetic Operators ===
#pragma region ArithmeticOperators
template<typename U>
inline Vector<4, T> operator+(const Vector<4, U>& v) const
{ return Vector<4, T>(x + static_cast<T>(v.x), y + static_cast<T>(v.y),
z + static_cast<T>(v.z), w + static_cast<T>(v.w)); }
template<typename U>
inline Vector<4, T> operator-(const Vector<4, U>& v) const
{ return Vector<4, T>(x - static_cast<T>(v.x), y - static_cast<T>(v.y),
z - static_cast<T>(v.z), w - static_cast<T>(v.w)); }
inline Vector<4, T> operator*(T scale) const
{ return Vector<4, T>(x * scale, y * scale, z * scale, w * scale); }
inline Vector<4, T> operator/(T scale) const
{
const T revS = static_cast<T>(1.0f / scale);
return Vector<4, T>(x * revS, y * revS, z * revS, w * revS);
}
#pragma endregion
//=== Compound Assignment Operators ===
#pragma region CompoundAssignmentOperators
inline Vector<4, T>& operator=(const Vector<4, T>& v)
{ x = v.x; y = v.y; z = v.z; w = v.w; return *this; }
inline Vector<4, T>& operator+=(const Vector<4, T>& v)
{ x += v.x; y += v.y; z += v.z; w += v.w; return *this; }
inline Vector<4, T>& operator-=(const Vector<4, T>& v)
{ x -= v.x; y -= v.y; z -= v.z; w -= v.w; return *this; }
inline Vector<4, T>& operator*=(T scale)
{ x *= scale; y *= scale; z *= scale; w *= scale; return *this; }
inline Vector<4, T>& operator/=(T scale)
{
const T revS = static_cast<T>(1.0f / scale);
x *= revS; y *= revS; z *= revS; w *= revS; return *this;
}
#pragma endregion
//=== Unary Operators ===
#pragma region UnaryOperators
inline Vector<4, T> operator-() const
{ return Vector<4, T>(-x, -y, -z, -w); }
#pragma endregion
//=== Relational Operators ===
#pragma region RelationalOperators
inline bool operator==(const Vector<4, T>& v) const
{ return AreEqual<T>(x, v.x) && AreEqual<T>(y, v.y) && AreEqual<T>(z, v.z) && AreEqual<T>(w, v.w); }
inline bool operator!=(const Vector<4, T>& v) const
{ return !(*this == v); }
#pragma endregion
//=== Member Access Operators ===
#pragma region MemberAccessOperators
inline T operator[](uint8_t i) const
{
assert((i < 4) && "ERROR: index of Vector4 [] operator is out of bounds!");
return data[i];
}
inline T& operator[](uint8_t i)
{
assert((i < 4) && "ERROR: index of Vector4 [] operator is out of bounds!");
return data[i];
}
#pragma endregion
//=== Static Functions ===
static Vector<4, T> ZeroVector();
};
//--- VECTOR4 FUNCTIONS ---
#pragma region GlobalOperators
template<typename T, typename U>
inline Vector<4, T> operator*(U scale, const Vector<4, T>& v)
{
T s = static_cast<T>(scale);
return Vector<4, T>(v.x * s, v.y * s, v.z * s, v.w * s);
}
#pragma endregion
#pragma region GlobalFunctions
template<typename T>
inline Vector<4, T> Vector<4, T>::ZeroVector()
{
T z = static_cast<T>(0);
return Vector<4, T>(z, z, z, z);
}
template<typename T>
inline T Dot(const Vector<4, T>& v1, const Vector<4, T>& v2)
{
return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z + v1.w * v2.w;
}
template<typename T>
inline Vector<4, T> GetAbs(const Vector<4, T>& v)
{ return Vector<4, T>(abs(v.x), abs(v.y), abs(v.z), abs(v.w)); }
template<typename T>
inline Vector<4, T> Max(const Vector<4, T>& v1, const Vector<4, T>& v2)
{
Vector<4, T>v = v1;
if (v2.x > v.x) v.x = v2.x;
if (v2.y > v.y) v.y = v2.y;
if (v2.z > v.z) v.z = v2.z;
if (v2.w > v.w) v.w = v2.w;
return v;
}
template<typename T>
inline Vector<4, T> Min(const Vector<4, T>& v1, const Vector<4, T>& v2)
{
Vector<4, T>v = v1;
if (v2.x < v.x) v.x = v2.x;
if (v2.y < v.y) v.y = v2.y;
if (v2.z < v.z) v.z = v2.z;
if (v2.w < v.w) v.w = v2.w;
return v;
}
#pragma endregion
}
#endif | 29.036458 | 102 | 0.565022 | [
"vector"
] |
df89df047bbdf1ac3a09bc18bd906ebb7bcd2a47 | 3,744 | h | C | src/mongo/db/security.h | sayfullah/MongoPi | 8205c77e634a2db210938ae97dbe09fdaaa43736 | [
"Apache-2.0"
] | 65 | 2015-01-22T00:03:44.000Z | 2021-11-12T22:44:01.000Z | src/mongo/db/security.h | sayfullah/MongoPi | 8205c77e634a2db210938ae97dbe09fdaaa43736 | [
"Apache-2.0"
] | 1 | 2016-08-24T12:36:09.000Z | 2016-08-24T13:34:10.000Z | src/mongo/db/security.h | sayfullah/MongoPi | 8205c77e634a2db210938ae97dbe09fdaaa43736 | [
"Apache-2.0"
] | 18 | 2015-02-05T01:56:43.000Z | 2019-10-28T07:56:32.000Z | // security.h
/**
* Copyright (C) 2009 10gen Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "mongo/db/authlevel.h"
#include "mongo/db/nonce.h"
#include "mongo/db/security_common.h"
#include "mongo/util/concurrency/spin_lock.h"
// this is used by both mongos and mongod
namespace mongo {
/** An AuthenticationInfo object is present within every mongo::Client object */
class AuthenticationInfo : boost::noncopyable {
bool _isLocalHost;
bool _isLocalHostAndLocalHostIsAuthorizedForAll;
public:
void startRequest(); // need to call at the beginning of each request
void setIsALocalHostConnectionWithSpecialAuthPowers(); // called, if localhost, when conneciton established.
AuthenticationInfo() {
_isLocalHost = false;
_isLocalHostAndLocalHostIsAuthorizedForAll = false;
}
~AuthenticationInfo() {}
bool isLocalHost() const { return _isLocalHost; } // why are you calling this? makes no sense to be externalized
// -- modifiers ----
void logout(const string& dbname ) {
scoped_spinlock lk(_lock);
_dbs.erase(dbname);
}
void authorize(const string& dbname , const string& user ) {
scoped_spinlock lk(_lock);
_dbs[dbname].level = Auth::WRITE;
_dbs[dbname].user = user;
}
void authorizeReadOnly(const string& dbname , const string& user ) {
scoped_spinlock lk(_lock);
_dbs[dbname].level = Auth::READ;
_dbs[dbname].user = user;
}
// -- accessors ---
bool isAuthorized(const string& dbname) const {
return _isAuthorized( dbname, Auth::WRITE );
}
bool isAuthorizedReads(const string& dbname) const {
return _isAuthorized( dbname, Auth::READ );
}
/**
* @param lockType - this is from dbmutex 1 is write, 0 is read
*/
bool isAuthorizedForLock(const string& dbname, int lockType ) const {
return _isAuthorized( dbname , lockType > 0 ? Auth::WRITE : Auth::READ );
}
bool isAuthorizedForLevel( const string& dbname , Auth::Level level ) const {
return _isAuthorized( dbname , level );
}
string getUser( const string& dbname ) const;
void print() const;
private:
void _checkLocalHostSpecialAdmin();
/** takes a lock */
bool _isAuthorized(const string& dbname, Auth::Level level) const;
bool _isAuthorizedSingle_inlock(const string& dbname, Auth::Level level) const;
/** cannot call this locked */
bool _isAuthorizedSpecialChecks( const string& dbname ) const ;
private:
// while most access to _dbs is from our thread (the TLS thread), currentOp() inspects
// it too thus we need this
mutable SpinLock _lock;
// todo: caching should not last forever
typedef map<string,Auth> MA;
MA _dbs; // dbname -> auth
static bool _warned;
};
} // namespace mongo
| 34.036364 | 120 | 0.627938 | [
"object"
] |
d4c28a5a8c820e31a545b9fd75b7e866bff16c96 | 851,668 | c | C | sdk-6.5.16/src/bcm/esw/katana2/oam.c | copslock/broadcom_cpri | 8e2767676e26faae270cf485591902a4c50cf0c5 | [
"Spencer-94"
] | null | null | null | sdk-6.5.16/src/bcm/esw/katana2/oam.c | copslock/broadcom_cpri | 8e2767676e26faae270cf485591902a4c50cf0c5 | [
"Spencer-94"
] | null | null | null | sdk-6.5.16/src/bcm/esw/katana2/oam.c | copslock/broadcom_cpri | 8e2767676e26faae270cf485591902a4c50cf0c5 | [
"Spencer-94"
] | null | null | null | /*
* This license is set out in https://raw.githubusercontent.com/Broadcom-Network-Switching-Software/OpenBCM/master/Legal/LICENSE file.
*
* Copyright 2007-2019 Broadcom Inc. All rights reserved.
*
* File:
* oam.c
*
* Purpose:
* OAM implementation for Triumph3 family of devices.
*/
#include <shared/bsl.h>
#include <sal/core/libc.h>
#include <soc/defs.h>
#include <soc/drv.h>
#include <soc/scache.h>
#include <soc/profile_mem.h>
#include <soc/hash.h>
#include <soc/l3x.h>
#include <soc/katana2.h>
#include <soc/ism_hash.h>
#include <bcm/l3.h>
#include <bcm/oam.h>
#include <bcm/cosq.h>
#include <bcm_int/esw/oam.h>
#include <bcm_int/esw/port.h>
#include <bcm_int/esw/l3.h>
#include <bcm_int/esw/switch.h>
#include <bcm_int/esw/katana2.h>
#include <bcm_int/esw/virtual.h>
#include <bcm_int/esw_dispatch.h>
#if defined(BCM_KATANA2_SUPPORT)
#include <bcm_int/esw/stack.h>
#include <bcm_int/esw/subport.h>
#if defined(INCLUDE_BHH)
#include <bcm_int/esw/bhh.h>
#include <bcm_int/esw/bhh_sdk_pack.h>
#include <soc/uc.h>
#include <soc/shared/oam_pm_shared.h>
#include <soc/shared/oam_pm_pack.h>
#endif /* INCLUDE_BHH */
/*
* Device OAM control structure.
*/
_bcm_oam_control_t *_kt2_oam_control[SOC_MAX_NUM_DEVICES];
/* * * * * * * * * * * * * * * * * * *
* OAM MACROS *
*/
/*
* Macro:
* _BCM_OAM_IS_INIT (internal)
* Purpose:
* Check that the unit is valid and confirm that the oam functions
* are initialized.
* Parameters:
* unit - BCM device number
* Notes:
* Results in return(BCM_E_UNIT), return(BCM_E_UNAVAIL), or
* return(BCM_E_INIT) if fails.
*/
#define _BCM_OAM_IS_INIT(unit) \
do { \
if (!soc_feature(unit, soc_feature_oam)) { \
return (BCM_E_UNAVAIL); \
} \
if (_kt2_oam_control[unit] == NULL) { \
LOG_ERROR(BSL_LS_BCM_OAM, \
(BSL_META_U(unit, \
"OAM Error: Module not initialized\n"))); \
return (BCM_E_INIT); \
} \
} while (0)
/*
*Macro:
* _BCM_OAM_LOCK
* Purpose:
* Lock take the OAM control mutex
* Parameters:
* control - Pointer to OAM control structure.
*/
#define _BCM_OAM_LOCK(control) \
sal_mutex_take((control)->oc_lock, sal_mutex_FOREVER)
/*
* Macro:
* _BCM_OAM_UNLOCK
* Purpose:
* Lock take the OAM control mutex
* Parameters:
* control - Pointer to OAM control structure.
*/
#define _BCM_OAM_UNLOCK(control) \
sal_mutex_give((control)->oc_lock);
/*
* Macro:
* _BCM_OAM_HASH_DATA_CLEAR
* Purpose:
* Clear hash data memory occupied by one endpoint.
* Parameters:
* _ptr_ - Pointer to endpoint hash data memory.
*/
#define _BCM_OAM_HASH_DATA_CLEAR(_ptr_) \
sal_memset(_ptr_, 0, sizeof(_bcm_oam_hash_data_t));
/*
* Macro:
* _BCM_OAM_HASH_DATA_HW_IDX_INIT
* Purpose:
* Initialize hardware indices to invalid index for an endpoint hash data.
* Parameters:
* _ptr_ - Pointer to endpoint hash data memory.
*/
#define _BCM_OAM_HASH_DATA_HW_IDX_INIT(_ptr_) \
do { \
(_ptr_)->group_index = (_BCM_OAM_INVALID_INDEX); \
(_ptr_)->remote_index = (_BCM_OAM_INVALID_INDEX); \
(_ptr_)->profile_index = (_BCM_OAM_INVALID_INDEX); \
(_ptr_)->pri_map_index = (_BCM_OAM_INVALID_INDEX); \
(_ptr_)->lm_counter_index = (_BCM_OAM_INVALID_INDEX); \
(_ptr_)->local_tx_index = (_BCM_OAM_INVALID_INDEX); \
(_ptr_)->local_rx_index = (_BCM_OAM_INVALID_INDEX); \
} while (0)
/*
* Macro:
* _BCM_OAM_ALLOC
* Purpose:
* Generic memory allocation routine.
* Parameters:
* _ptr_ - Pointer to allocated memory.
* _ptype_ - Pointer type.
* _size_ - Size of heap memory to be allocated.
* _descr_ - Information about this memory allocation.
*/
#define _BCM_OAM_ALLOC(_ptr_,_ptype_,_size_,_descr_) \
do { \
if (NULL == (_ptr_)) { \
(_ptr_) = (_ptype_ *) sal_alloc((_size_), (_descr_)); \
} \
if((_ptr_) != NULL) { \
sal_memset((_ptr_), 0, (_size_)); \
} else { \
LOG_ERROR(BSL_LS_BCM_OAM, \
(BSL_META("OAM Error: Allocation failure %s\n"), \
(_descr_))); \
} \
} while (0)
/*
* Macro:
* _BCM_OAM_GROUP_INDEX_VALIDATE
* Purpose:
* Validate OAM Group ID value.
* Parameters:
* _group_ - Group ID value.
*/
#define _BCM_OAM_GROUP_INDEX_VALIDATE(_group_) \
do { \
if ((_group_) < 0 || (_group_) >= oc->group_count) { \
LOG_ERROR(BSL_LS_BCM_OAM, \
(BSL_META("OAM Error: Invalid Group ID = %d.\n"), \
_group_)); \
return (BCM_E_PARAM); \
} \
} while (0);
/*
* Macro:
* _BCM_OAM_EP_INDEX_VALIDATE
* Purpose:
* Validate OAM Endpoint ID value.
* Parameters:
* _ep_ - Endpoint ID value.
*/
#define _BCM_OAM_EP_INDEX_VALIDATE(_ep_) \
do { \
if ((_ep_) < 0 || (_ep_) >= oc->ep_count) { \
LOG_ERROR(BSL_LS_BCM_OAM, \
(BSL_META("OAM Error: Invalid Endpoint ID" \
" = %d.\n"), _ep_)); \
return (BCM_E_PARAM); \
} \
} while (0);
/*
* Macro:
* _BCM_OAM_RMEP_INDEX_VALIDATE
* Purpose:
* Validate OAM Endpoint ID value.
* Parameters:
* _ep_ - Endpoint ID value.
*/
#define _BCM_OAM_RMEP_INDEX_VALIDATE(_ep_) \
do { \
if ((_ep_) < 0 || (_ep_) >= oc->rmep_count) { \
LOG_ERROR(BSL_LS_BCM_OAM, \
(BSL_META("OAM Error: Invalid RMEP Index" \
" = %d.\n"), _ep_)); \
return (BCM_E_PARAM); \
} \
} while (0);
/*
* Macro:
* _BCM_OAM_KEY_PACK
* Purpose:
* Pack the hash table look up key fields.
* Parameters:
* _dest_ - Hash key buffer.
* _src_ - Hash key field to be packed.
* _size_ - Hash key field size in bytes.
*/
#define _BCM_OAM_KEY_PACK(_dest_,_src_,_size_) \
do { \
sal_memcpy((_dest_), (_src_), (_size_)); \
(_dest_) += (_size_); \
} while (0)
/*
* Macro:
* _BCM_KT2_OAM_MOD_PORT_TO_GLP
* Purpose:
* Construct hadware GLP value from module ID, port ID and Trunk ID value.
* Parameters:
* _modid_ - Module ID.
* _port_ - Port ID.
* _trunk_ - Trunk (1 - TRUE/0 - FALSE).
* _tgid_ - Trunk ID.
*/
#define _BCM_KT2_OAM_MOD_PORT_TO_GLP(_u_, _m_, _p_, _t_, _tgid_, _glp_) \
do { \
if ((_tgid_) != -1) { \
(_glp_) = (((0x1 & (_t_)) << SOC_TRUNK_BIT_POS(_u_)) \
| ((soc_mem_index_count((_u_), TRUNK_GROUPm) - 1) \
& (_tgid_))); \
} else { \
(_glp_) = (((0x1 & (_t_)) << SOC_TRUNK_BIT_POS(_u_)) \
| ((SOC_MODID_MAX(_u_) & (_m_)) \
<< (_shr_popcount((unsigned int) SOC_PORT_ADDR_MAX(_u_))) \
| (SOC_PORT_ADDR_MAX(_u_) & (_p_)))); \
} \
LOG_DEBUG(BSL_LS_BCM_OAM, \
(BSL_META("u:%d m:%d p:%d t:%d tgid:%d glp:%x\n"), \
_u_, _m_, _p_, _t_, _tgid_, _glp_)); \
} while (0)
/*
* Macro:
* _BCM_OAM_GLP_XXX
* Purpose:
* Get components of generic logical port value.
* Parameters:
* _glp_ - Generic logical port.
*/
#define _BCM_OAM_GLP_TRUNK_BIT_GET(_glp_) (0x1 & ((_glp_) >> 15))
#define _BCM_OAM_GLP_TRUNK_ID_GET(_glp_) (0xFF & (_glp_))
#define _BCM_OAM_GLP_MODULE_ID_GET(_glp_) (0xFF & ((_glp_) >> 7))
#define _BCM_OAM_GLP_PORT_GET(_glp_) (0x7F & (_glp_))
/*
* Macro:
* _BCM_OAM_EP_LEVEL_XXX
* Purpose:
* Maintenance domain level bit count and level max value.
* Parameters:
* _glp_ - Generic logical port.
*/
#define _BCM_OAM_EP_LEVEL_COUNT (1 << (_BCM_OAM_EP_LEVEL_BIT_COUNT))
#define _BCM_OAM_EP_LEVEL_MAX (_BCM_OAM_EP_LEVEL_COUNT - 1)
#define BHH_EP_TYPE(ep) ( (ep->type == bcmOAMEndpointTypeBHHMPLS) || \
(ep->type == bcmOAMEndpointTypeBHHMPLSVccv) || \
(ep->type == bcmOAMEndpointTypeBhhSection)||\
(ep->type == bcmOAMEndpointTypeBhhSectionInnervlan)||\
(ep->type == bcmOAMEndpointTypeBhhSectionOuterVlan)||\
(ep->type == bcmOAMEndpointTypeBhhSectionOuterPlusInnerVlan)\
)
#define BHH_EP_MPLS_SECTION_TYPE(ep) (\
(ep->type == bcmOAMEndpointTypeBhhSection)||\
(ep->type == bcmOAMEndpointTypeBhhSectionInnervlan)||\
(ep->type == bcmOAMEndpointTypeBhhSectionOuterVlan)||\
(ep->type == bcmOAMEndpointTypeBhhSectionOuterPlusInnerVlan)\
)
#define ETH_TYPE(type) (type == bcmOAMEndpointTypeEthernet)
/* Checks hash_data pointer's int_flags field */
#define _BCM_OAM_EP_IS_VP_TYPE(h_data_p) (h_data_p->int_flags &\
_BCM_OAM_ENDPOINT_IS_VP_BASED)
#define _BCM_OAM_EP_IS_MIP(info) (info->flags & BCM_OAM_ENDPOINT_INTERMEDIATE)
/*
* Macro:
* BCM_WB_XXX
* Purpose:
* OAM module scache version information.
* Parameters:
* (major number, minor number)
*/
#ifdef BCM_WARM_BOOT_SUPPORT
#define BCM_WB_VERSION_1_0 SOC_SCACHE_VERSION(1,0)
#define BCM_WB_VERSION_1_1 SOC_SCACHE_VERSION(1,1)
/* This new version incorporates new MA_INDEX allocation scheme.
* Recovery of Endpoints allocated in new scheme will be
* done for this version or higher only */
#define BCM_WB_VERSION_1_2 SOC_SCACHE_VERSION(1,2)
/* This new version saves the SW endpoint ids for OAM.
* Recovery of Endpoints allocated in new scheme will be
* done for this version or higher only */
#define BCM_WB_VERSION_1_3 SOC_SCACHE_VERSION(1,3)
#define BCM_WB_VERSION_1_4 SOC_SCACHE_VERSION(1,4)
#define BCM_WB_VERSION_1_5 SOC_SCACHE_VERSION(1,5)
#define BCM_WB_DEFAULT_VERSION BCM_WB_VERSION_1_5
#endif
#define _BCM_KT2_OAM_MA_IDX_TO_EP_ID_MAPPING_TBL_SIZE \
(sizeof(bcm_oam_endpoint_t) * (oc->ma_idx_count + oc->egr_ma_idx_count))
#define _BCM_KT2_OAM_MA_IDX_TO_EP_ID_MAPPING_DOWNMEP_IDX(ma_base_index, ma_offset)\
(ma_base_index + ma_offset)
#define _BCM_KT2_OAM_MA_IDX_TO_EP_ID_MAPPING_UPMEP_IDX(ma_base_index, ma_offset)\
(oc->ma_idx_count + ma_base_index + ma_offset)
#define _BCM_KT2_OAM_MA_IDX_TO_EP_ID_MAPPING_IDX(ma_base_index, ma_offset, is_upmep)\
((is_upmep)?\
_BCM_KT2_OAM_MA_IDX_TO_EP_ID_MAPPING_UPMEP_IDX(ma_base_index, ma_offset)\
:_BCM_KT2_OAM_MA_IDX_TO_EP_ID_MAPPING_DOWNMEP_IDX(ma_base_index, ma_offset))
/* Macros for endpoint id locations for different endpoint types. */
#define _BCM_KT2_OAM_ENDPOINT_ID_SCACHE_SIZE \
(sizeof(bcm_oam_endpoint_t) * (oc->rmep_count + oc->lmep_count + oc->ma_idx_count + oc->egr_ma_idx_count))
#define _BCM_KT2_OAM_RX_DOWNMEP_SCACHE_LOCATION(mep_index) \
(sizeof(bcm_oam_endpoint_t) * mep_index)
#define _BCM_KT2_OAM_RX_UPMEP_SCACHE_LOCATION(mep_index) \
(sizeof(bcm_oam_endpoint_t) * (oc->ma_idx_count + mep_index))
#define _BCM_KT2_OAM_RMEP_SCACHE_LOCATION(rmep_index) \
(sizeof(bcm_oam_endpoint_t) * (oc->ma_idx_count + oc->egr_ma_idx_count + rmep_index))
#define _BCM_KT2_OAM_TX_MEP_SCACHE_LOCATION(mep_index) \
(sizeof(bcm_oam_endpoint_t) * (oc->ma_idx_count + oc->egr_ma_idx_count + oc->rmep_count + mep_index))
/* Vlans will be programmed in ING/EGR MP_GROUP table depending on MEP_TYPE */
#define _BCM_KT2_ETH_OAM_MP_GROUP_KEY_DOMAIN_SPECIFIC 0
/* Inner & Outer Vlan will be always programmed in ING/EGR MP_GROUP table */
#define _BCM_KT2_ETH_OAM_MP_GROUP_KEY_DOMAIN_INDEPENDANT 1
/* Defined for OAM opcode group 1 and 2 */
#define _BCM_OAM_OTHER_OPCODE_GROUP_1 0x1
#define _BCM_OAM_OTHER_OPCODE_GROUP_2 0x2
/* Define the port used in OLP-XGS communication */
#define _BCM_OAM_OLP_COMMUNICATION_PORT 0x7f
/*
* Device OAM control structure.
*/
_bcm_oam_control_t *_kt2_oam_control[SOC_MAX_NUM_DEVICES];
#if defined (INCLUDE_BHH)
typedef struct _bcm_oam_pm_profile_int_info_s {
int id_status;
bcm_oam_pm_profile_info_t pm_profile;
}_bcm_oam_pm_profile_int_info_t;
typedef struct _bcm_oam_pm_profile_control_s {
_bcm_oam_pm_profile_int_info_t profile_info[_BCM_OAM_MAX_PM_PROFILES];
}_bcm_oam_pm_profile_control_t;
_bcm_oam_pm_profile_control_t *kt2_pm_profile_control[SOC_MAX_NUM_DEVICES];
#define _BCM_KT2_PM_CTRL_PROFILE_IN_USE(pmc, id) (pmc->profile_info[id].id_status == 1)
#define _BCM_KT2_PM_CTRL_PROFILE_PTR(pmc, id) (&(pmc->profile_info[id].pm_profile))
#define _BCM_KT2_SET_PM_CTRL_PROFILE_IN_USE(pmc, id) (pmc->profile_info[id].id_status = 1)
#define _BCM_KT2_SET_PM_CTRL_PROFILE_NOT_IN_USE(pmc, id) (pmc->profile_info[id].id_status = 0)
#define _BCM_KT2_PM_PROFILE_REPLACE_FLAG_SET(profile) (profile->flags & BCM_OAM_PM_PROFILE_REPLACE)
#define _BCM_KT2_PM_PROFILE_WITH_ID_FLAG_SET(profile) (profile->flags & BCM_OAM_PM_PROFILE_WITH_ID)
#define _BCM_KT2_PM_PROFILE_ID_VALID(profile_id) \
((profile_id >= 0) && (profile_id < _BCM_OAM_MAX_PM_PROFILES))
#define _BCM_KT2_OAM_PM_BHH_DATA_COLLECTION_MODE_ENABLED(oc) \
(oc->pm_bhh_lmdm_data_collection_mode != \
_BCM_OAM_PM_COLLECTION_MODE_NONE)
#define _BCM_KT2_OAM_PM_BHH_DATA_COLLECTION_MODE_IS_RAW_DATA(oc) \
(oc->pm_bhh_lmdm_data_collection_mode == \
_BCM_OAM_PM_COLLECTION_MODE_RAW_DATA)
#define _BCM_KT2_OAM_PM_BHH_DATA_COLLECTION_MODE_IS_PROCESSED(oc) \
(oc->pm_bhh_lmdm_data_collection_mode == \
_BCM_OAM_PM_COLLECTION_MODE_PROCESSED_STATS)
static int kt2_oam_mpls_tp_ach_channel_type = SHR_BHH_ACH_CHANNEL_TYPE;
#else
#define _BCM_KT2_OAM_PM_BHH_DATA_COLLECTION_MODE_ENABLED(oc) 0
#define _BCM_KT2_OAM_PM_BHH_DATA_COLLECTION_MODE_IS_RAW_DATA(oc) 0
#define _BCM_KT2_OAM_PM_BHH_DATA_COLLECTION_MODE_IS_PROCESSED(oc) 0
#endif
typedef struct _oam_tpid_s {
uint32 tpid;
uint32 ref_count;
} _oam_tpid_t;
#define BCM_MAX_TPID_ENTRIES 4
#define BCM_MAX_INNER_TPID_ENTRIES 1
#define BCM_OAM_TPID_TYPE_OUTER 0
#define BCM_OAM_TPID_TYPE_INNER 1
#define BCM_OAM_TPID_TYPE_SUBPORT 2
#define BCM_OAM_TPID_VALUE_ZERO 0
#define BCM_OAM_DEFAULT_TPID 0x8100
#define BCM_OAM_TPID_9100 0x9100
#define BCM_OAM_TPID_88A8 0x88A8
#define DGLP_LAG_ID_INDICATOR_SHIFT_BITS 15
#define DGLP_MODULE_ID_SHIFT_BITS 7
#define _BCM_OAM_OPCODE_TYPE_CCM 0x1
#define _BCM_OAM_OPCODE_TYPE_LBR 0x2
#define _BCM_OAM_OPCODE_TYPE_LBM 0x3
#define _BCM_OAM_OPCODE_TYPE_LTR 0x4
#define _BCM_OAM_OPCODE_TYPE_LTM 0x5
#define _BCM_OAM_OPCODE_TYPE_NON_CFM_FIRST 0x6
#define _BCM_OAM_OPCODE_TYPE_NON_CFM_LAST 0xFF
#define _BCM_OAM_OPCODE_TYPE_CFM_MASK 0x3E
#define _BCM_OAM_OPCODE_TYPE_NON_CFM_MASK 0xFFFFFFFF
#define _BCM_OAM_DGLP1_PROFILE_PTR 0x1
#define _BCM_OAM_DGLP2_PROFILE_PTR 0x2
#define _BCM_OAM_LOW_MDL_DROP_PACKET 0x2
#define _BCM_OAM_FWD_AS_DATA 0x0
#define _BCM_OAM_DROP_REDIRECT_T0_DGLP1 0x2
#define _BCM_OAM_DROP_REDIRECT_T0_DGLP2 0x3
#define _BCM_OAM_SERVICE_PRI_MAX_OFFSET 0x7
STATIC _oam_tpid_t
(*_oam_outer_tpid_tab[BCM_MAX_NUM_UNITS])[BCM_MAX_TPID_ENTRIES];
STATIC _oam_tpid_t
(*_oam_inner_tpid_tab[BCM_MAX_NUM_UNITS])[BCM_MAX_INNER_TPID_ENTRIES];
STATIC _oam_tpid_t
(*_oam_subport_tpid_tab[BCM_MAX_NUM_UNITS])[BCM_MAX_TPID_ENTRIES];
STATIC sal_mutex_t _kt2_outer_tpid_lock[BCM_MAX_NUM_UNITS];
STATIC sal_mutex_t _kt2_inner_tpid_lock[BCM_MAX_NUM_UNITS];
STATIC sal_mutex_t _kt2_subport_tpid_lock[BCM_MAX_NUM_UNITS];
/* OAM TPID registers and default values */
STATIC int outer_tpid[4] = { OUTER_TPID_0r, OUTER_TPID_1r,
OUTER_TPID_2r, OUTER_TPID_3r };
STATIC int default_outer_tpid[4] = { BCM_OAM_DEFAULT_TPID, BCM_OAM_TPID_9100,
BCM_OAM_TPID_88A8, BCM_OAM_TPID_VALUE_ZERO};
STATIC int subport_tpid[4] = { SUBPORT_TAG_TPID_0r, SUBPORT_TAG_TPID_1r,
SUBPORT_TAG_TPID_2r, SUBPORT_TAG_TPID_3r };
#define KT2_OAM_OUTER_TPID_TAB(unit) _oam_outer_tpid_tab[unit]
#define KT2_OAM_INNER_TPID_TAB(unit) _oam_inner_tpid_tab[unit]
#define KT2_OAM_SUBPORT_TPID_TAB(unit) _oam_subport_tpid_tab[unit]
#define KT2_OAM_OUTER_TPID_ENTRY(unit, index) \
((*_oam_outer_tpid_tab[unit])[index].tpid)
#define KT2_OAM_INNER_TPID_ENTRY(unit, index) \
((*_oam_inner_tpid_tab[unit])[index].tpid)
#define KT2_OAM_SUBPORT_TPID_ENTRY(unit, index) \
((*_oam_subport_tpid_tab[unit])[index].tpid)
#define KT2_OAM_OUTER_TPID_REF_COUNT(unit, index) \
((*_oam_outer_tpid_tab[unit])[index].ref_count)
#define KT2_OAM_INNER_TPID_REF_COUNT(unit, index) \
((*_oam_inner_tpid_tab[unit])[index].ref_count)
#define KT2_OAM_SUBPORT_TPID_REF_COUNT(unit, index) \
((*_oam_subport_tpid_tab[unit])[index].ref_count)
#define KT2_OAM_OUTER_TPID_TAB_INIT_CHECK(unit) \
if (KT2_OAM_OUTER_TPID_TAB(unit) == NULL) { return BCM_E_INIT; }
#define KT2_OAM_INNER_TPID_TAB_INIT_CHECK(unit) \
if (KT2_OAM_INNER_TPID_TAB(unit) == NULL) { return BCM_E_INIT; }
#define KT2_OAM_SUBPORT_TPID_TAB_INIT_CHECK(unit) \
if (KT2_OAM_SUBPORT_TPID_TAB(unit) == NULL) { return BCM_E_INIT; }
#define BCM_KT2_OUTER_TPID_MUTEX(_u_) _kt2_outer_tpid_lock[_u_]
#define BCM_KT2_INNER_TPID_MUTEX(_u_) _kt2_inner_tpid_lock[_u_]
#define BCM_KT2_SUBPORT_TPID_MUTEX(_u_) _kt2_subport_tpid_lock[_u_]
#define BCM_KT2_OUTER_TPID_LOCK(_u_) \
((_kt2_outer_tpid_lock[_u_]) ? \
sal_mutex_take(_kt2_outer_tpid_lock[_u_], sal_mutex_FOREVER) : \
(BCM_E_INTERNAL))
#define BCM_KT2_INNER_TPID_LOCK(_u_) \
((_kt2_inner_tpid_lock[_u_]) ? \
sal_mutex_take(_kt2_inner_tpid_lock[_u_], sal_mutex_FOREVER) : \
(BCM_E_INTERNAL))
#define BCM_KT2_SUBPORT_TPID_LOCK(_u_) \
((_kt2_subport_tpid_lock[_u_]) ? \
sal_mutex_take(_kt2_subport_tpid_lock[_u_], sal_mutex_FOREVER) : \
(BCM_E_INTERNAL))
#define BCM_KT2_OUTER_TPID_UNLOCK(_u_) \
((_kt2_outer_tpid_lock[_u_]) ? \
sal_mutex_give(_kt2_outer_tpid_lock[_u_]) : \
(BCM_E_INTERNAL))
#define BCM_KT2_INNER_TPID_UNLOCK(_u_) \
((_kt2_inner_tpid_lock[_u_]) ? \
sal_mutex_give(_kt2_inner_tpid_lock[_u_]) : \
(BCM_E_INTERNAL))
#define BCM_KT2_SUBPORT_TPID_UNLOCK(_u_) \
((_kt2_subport_tpid_lock[_u_]) ? \
sal_mutex_give(_kt2_subport_tpid_lock[_u_]) : \
(BCM_E_INTERNAL))
/*
* ==========================================================================
* Downmep/IFP 3'b000 8'd1(L2_HDR) 8'd0(NULL)
* Downmep/IFP 3'b001 8'd0(OAM_HDR) 8'd2(CCM/BHH-CCM)
* Downmep/IFP 3'b010 8'd0(OAM_HDR) 8'd3(BFD)
* Downmep/IFP 3'b011 8'd0(OAM_HDR) 8'd4(LM/DM)
* Upmep 3'b000 N/A N/A
* Upmep 3'b001 8'd0(OAM_HDR) 8'd5(CCM)
* Upmep 3'b010 N/A N/A
* Upmep 3'b011 8'd0(OAM_HDR) 8'd6(LM/DM)
*/
#define _BCM_KT2_OLP_HDR_SUBTYPE_CCM 0x2
#define _BCM_KT2_OLP_HDR_SUBTYPE_BFD 0x3
#define _BCM_KT2_OLP_HDR_SUBTYPE_LM_DM 0x4
#define _BCM_KT2_OLP_HDR_SUBTYPE_ETH_OAM_UPMEP_CCM 0x5
#define _BCM_KT2_OLP_HDR_SUBTYPE_ETH_OAM_UPMEP_LM_DM 0x6
#define _BCM_KT2_NO_OF_BITS_COMP_HDR 3
#define _BCM_KT2_UP_MEP_OLP_HDR_TYPE_COMPRESSED_GET(comp_hdr) \
((1 << _BCM_KT2_NO_OF_BITS_COMP_HDR) | comp_hdr)
#define _BCM_KT2_CCM_OLP_HDR_TYPE_COMPRESSED 0x1
#define _BCM_KT2_BFD_OLP_HDR_TYPE_COMPRESSED 0x2
#define _BCM_KT2_LM_DM_OLP_HDR_TYPE_COMPRESSED 0x3
#define _BCM_KT2_ETH_OAM_UP_MEP_CCM_OLP_HDR_TYPE_COMPRESSED \
_BCM_KT2_UP_MEP_OLP_HDR_TYPE_COMPRESSED_GET( \
_BCM_KT2_CCM_OLP_HDR_TYPE_COMPRESSED)
#define _BCM_KT2_ETH_OAM_UP_MEP_LM_DM_OLP_HDR_TYPE_COMPRESSED \
_BCM_KT2_UP_MEP_OLP_HDR_TYPE_COMPRESSED_GET( \
_BCM_KT2_LM_DM_OLP_HDR_TYPE_COMPRESSED)
#define _BCM_KT2_OLP_OAM_HDR 0
#define _BCM_KT2_OLP_L2_HDR 1
/* No. of counter pool that can be programmed per endpoint */
#define _KT2_OAM_COUNTER_SIZE 2
/* * * * * * * * * * * * * * * * * * * * * * * * *
* OAM function prototypes *
*/
int _bcm_kt2_oam_convert_action_to_opcode_entry(
int unit,
bcm_oam_endpoint_action_t *action,
bcm_oam_endpoint_t ep_id,
void *profile,
uint8 *opcode_profile_changed);
STATIC int
_bcm_kt2_oam_ma_idx_num_entries_and_pool_get(int unit,
_bcm_oam_hash_data_t *h_data_p,
int *num_ma_idx_entries,
shr_idxres_list_handle_t *pool);
STATIC int
_bcm_kt2_oam_ma_idx_pool_create(_bcm_oam_control_t *oc);
STATIC int
_bcm_kt2_oam_ma_idx_pool_destroy(_bcm_oam_control_t *oc);
STATIC int
_bcm_kt2_oam_control_get(int unit, _bcm_oam_control_t **oc);
STATIC INLINE bcm_oam_endpoint_t
_bcm_kt2_oam_ma_idx_to_ep_id_mapping_get (_bcm_oam_control_t *oc, int ma_idx);
/* * * * * * * * * * * * * * * * * * * * * * * * *
* OAM global data initialization *
*/
#if defined(INCLUDE_BHH)
#define BHH_COSQ_INVALID 0xFFFF
/*
* Macro:
* _BCM_OAM_BHH_IS_VALID (internal)
* Purpose:
* Check that the BHH feature is available on this unit
* Parameters:
* unit - BCM device number
* Notes:
* Results in return(BCM_E_UNAVAIL),
*/
#define _BCM_OAM_BHH_IS_VALID(unit) \
do { \
if (!soc_feature(unit, soc_feature_bhh)) { \
return (BCM_E_UNAVAIL); \
} \
} while (0)
#define BCM_OAM_BHH_GET_UKERNEL_EP(ep) \
(ep - _BCM_OAM_BHH_KT2_ENDPOINT_OFFSET)
#define BCM_OAM_BHH_GET_SDK_EP(ep) \
(ep + _BCM_OAM_BHH_KT2_ENDPOINT_OFFSET)
#define BCM_OAM_BHH_VALIDATE_EP(_ep_) \
do { \
if (((_ep_) < _BCM_OAM_BHH_KT2_ENDPOINT_OFFSET) || \
((_ep_) >= (_BCM_OAM_BHH_KT2_ENDPOINT_OFFSET \
+ oc->bhh_endpoint_count))) { \
LOG_ERROR(BSL_LS_BCM_OAM, \
(BSL_META_U(unit, \
"OAM Error: Invalid Endpoint ID" \
" = %d.\n"), _ep_)); \
_BCM_OAM_UNLOCK(oc); \
return (BCM_E_PARAM); \
} \
} while (0);
#define BCM_OAM_BHH_ABS(x) (((x) < 0) ? (-(x)) : (x))
STATIC int
_bcm_kt2_oam_bhh_session_hw_delete(int unit, _bcm_oam_hash_data_t *h_data_p);
STATIC int
_bcm_kt2_oam_bhh_msg_send_receive(int unit, uint8 s_subclass,
uint16 s_len, uint32 s_data,
uint8 r_subclass, uint16 *r_len);
STATIC int
_bcm_kt2_oam_bhh_hw_init(int unit);
STATIC void
_bcm_kt2_oam_bhh_callback_thread(void *param);
STATIC int
_bcm_kt2_oam_bhh_event_mask_set(int unit);
STATIC int
bcm_kt2_oam_bhh_endpoint_create(int unit,
bcm_oam_endpoint_info_t *endpoint_info,
_bcm_oam_hash_key_t *hash_key);
STATIC int
_bcm_kt2_oam_bhh_sec_mep_alloc_counter(int unit, shr_idxres_element_t *ctr_index);
STATIC int
_bcm_kt2_oam_pm_msg_send_receive(int unit, uint8 s_class, uint8 s_subclass,
uint16 s_len, uint32 s_data,
uint8 r_subclass, uint16 *r_len);
STATIC void _bcm_kt2_oam_pm_event_msg_handle(_bcm_oam_control_t *oc,
mos_msg_data_t *event_msg);
int _bcm_kt2_oam_pm_init(int unit);
#endif /* INCLUDE_BHH */
STATIC int
bcm_kt2_oam_hw_ccm_tx_ctr_update(int unit,
bcm_oam_endpoint_info_t *ep_info);
/*
* Katana2 device OAM CCM intervals array initialization..
*/
STATIC uint32 _kt2_ccm_intervals[] =
{
BCM_OAM_ENDPOINT_CCM_PERIOD_DISABLED,
BCM_OAM_ENDPOINT_CCM_PERIOD_3MS,
BCM_OAM_ENDPOINT_CCM_PERIOD_10MS,
BCM_OAM_ENDPOINT_CCM_PERIOD_100MS,
BCM_OAM_ENDPOINT_CCM_PERIOD_1S,
BCM_OAM_ENDPOINT_CCM_PERIOD_10S,
BCM_OAM_ENDPOINT_CCM_PERIOD_1M,
BCM_OAM_ENDPOINT_CCM_PERIOD_10M,
_BCM_OAM_ENDPOINT_CCM_PERIOD_UNDEFINED
};
/*
* OAM hardware interrupts to software events mapping array initialization.
* _kt2_oam_interrupts[] =
* {Interrupt status register, Remote MEP index field, MA index field,
* CCM_INTERRUPT_CONTROLr - Interrupt status Field,
* OAM event type}.
*/
STATIC _bcm_oam_interrupt_t _kt2_oam_interrupts[] =
{
/* 1. Port down interrupt. */
{ANY_RMEP_TLV_PORT_DOWN_STATUSr, FIRST_RMEP_INDEXf, INVALIDf,
ANY_RMEP_TLV_PORT_DOWN_INT_ENABLEf,
bcmOAMEventEndpointPortDown},
/* 2. Port up interrupt. */
{ANY_RMEP_TLV_PORT_UP_STATUSr, FIRST_RMEP_INDEXf, INVALIDf,
ANY_RMEP_TLV_PORT_UP_INT_ENABLEf,
bcmOAMEventEndpointPortUp},
/* 3. Interface down interrupt. */
{ANY_RMEP_TLV_INTERFACE_DOWN_STATUSr, FIRST_RMEP_INDEXf, INVALIDf,
ANY_RMEP_TLV_INTERFACE_UP_TO_DOWN_TRANSITION_INT_ENABLEf,
bcmOAMEventEndpointInterfaceDown},
/* 4. Interface up interrupt. */
{ANY_RMEP_TLV_INTERFACE_UP_STATUSr, FIRST_RMEP_INDEXf, INVALIDf,
ANY_RMEP_TLV_INTERFACE_DOWN_TO_UP_TRANSITION_INT_ENABLEf,
bcmOAMEventEndpointInterfaceUp},
/* 5. Interface TLV Testing to Up interrupt. */
{ANY_RMEP_TLV_INTERFACE_UP_STATUSr, FIRST_RMEP_INDEXf, INVALIDf,
ANY_RMEP_TLV_INTERFACE_TESTING_TO_UP_TRANSITION_INT_ENABLEf,
bcmOAMEventEndpointInterfaceTestingToUp},
/* 6. Interface TLV Unknown to Up interrupt. */
{ANY_RMEP_TLV_INTERFACE_UP_STATUSr, FIRST_RMEP_INDEXf, INVALIDf,
ANY_RMEP_TLV_INTERFACE_UNKNOWN_TO_UP_TRANSITION_INT_ENABLEf,
bcmOAMEventEndpointInterfaceUnknownToUp},
/* 7. Interface TLV Dormant to Up interrupt. */
{ANY_RMEP_TLV_INTERFACE_UP_STATUSr, FIRST_RMEP_INDEXf, INVALIDf,
ANY_RMEP_TLV_INTERFACE_DORMANT_TO_UP_TRANSITION_INT_ENABLEf,
bcmOAMEventEndpointInterfaceDormantToUp},
/* 8. Interface TLV Not present to Up interrupt. */
{ANY_RMEP_TLV_INTERFACE_UP_STATUSr, FIRST_RMEP_INDEXf, INVALIDf,
ANY_RMEP_TLV_INTERFACE_NOTPRESENT_TO_UP_TRANSITION_INT_ENABLEf,
bcmOAMEventEndpointInterfaceNotPresentToUp},
/* 9. Interface Link Layer Down to Up interrupt. */
{ANY_RMEP_TLV_INTERFACE_UP_STATUSr, FIRST_RMEP_INDEXf, INVALIDf,
ANY_RMEP_TLV_INTERFACE_LLDOWN_TO_UP_TRANSITION_INT_ENABLEf,
bcmOAMEventEndpointInterfaceLLDownToUp},
/* 10. Interface up to testing transition interrupt. */
{ANY_RMEP_TLV_INTERFACE_DOWN_STATUSr, FIRST_RMEP_INDEXf, INVALIDf,
ANY_RMEP_TLV_INTERFACE_UP_TO_TESTING_TRANSITION_INT_ENABLEf,
bcmOAMEventEndpointInterfaceTesting},
/* 11. Interface up to unknown transition interrupt. */
{ANY_RMEP_TLV_INTERFACE_DOWN_STATUSr, FIRST_RMEP_INDEXf, INVALIDf,
ANY_RMEP_TLV_INTERFACE_UP_TO_UNKNOWN_TRANSITION_INT_ENABLEf,
bcmOAMEventEndpointInterfaceUnkonwn},
/* 11. Interface up to dormant transition interrupt. */
{ANY_RMEP_TLV_INTERFACE_DOWN_STATUSr, FIRST_RMEP_INDEXf, INVALIDf,
ANY_RMEP_TLV_INTERFACE_UP_TO_DORMANT_TRANSITION_INT_ENABLEf,
bcmOAMEventEndpointInterfaceDormant},
/* 11. Interface up to not present transition interrupt. */
{ANY_RMEP_TLV_INTERFACE_DOWN_STATUSr, FIRST_RMEP_INDEXf, INVALIDf,
ANY_RMEP_TLV_INTERFACE_UP_TO_NOTPRESENT_TRANSITION_INT_ENABLEf,
bcmOAMEventEndpointInterfaceNotPresent},
/* 11. Interface up to Link Layer Down transition interrupt. */
{ANY_RMEP_TLV_INTERFACE_DOWN_STATUSr, FIRST_RMEP_INDEXf, INVALIDf,
ANY_RMEP_TLV_INTERFACE_UP_TO_LLDOWN_TRANSITION_INT_ENABLEf,
bcmOAMEventEndpointInterfaceLLDown},
/* 12. Low MDL or unexpected MAID interrupt. */
{XCON_CCM_DEFECT_STATUSr, INVALIDf, FIRST_MA_INDEXf,
XCON_CCM_DEFECT_INT_ENABLEf,
bcmOAMEventGroupCCMxcon},
/*
* 13. Remote MEP lookup failed or CCM interval mismatch during Remote MEP
* lookup interrupt.
*/
{ERROR_CCM_DEFECT_STATUSr, INVALIDf, FIRST_MA_INDEXf,
ERROR_CCM_DEFECT_INT_ENABLEf,
bcmOAMEventGroupCCMError},
/*
* 14. Some Remote defect indicator interrupt - aggregated health of remote
* MEPs.
*/
{SOME_RDI_DEFECT_STATUSr, FIRST_RMEP_INDEXf, FIRST_MA_INDEXf,
SOME_RDI_DEFECT_INT_ENABLEf,
bcmOAMEventGroupRemote},
/* 15. Aggregate health of remote MEP state machines interrupt. */
{SOME_RMEP_CCM_DEFECT_STATUSr, FIRST_RMEP_INDEXf, FIRST_MA_INDEXf,
SOME_RMEP_CCM_DEFECT_INT_ENABLEf,
bcmOAMEventGroupCCMTimeout},
/* Invalid Interrupt - Always Last */
{INVALIDr, INVALIDf, 0, bcmOAMEventCount}
};
/*
* 0AM Group faults array initialization.
*/
STATIC _bcm_oam_fault_t _kt2_oam_group_faults[] =
{
{CURRENT_XCON_CCM_DEFECTf, STICKY_XCON_CCM_DEFECTf,
BCM_OAM_GROUP_FAULT_CCM_XCON, 0x08},
{CURRENT_ERROR_CCM_DEFECTf, STICKY_ERROR_CCM_DEFECTf,
BCM_OAM_GROUP_FAULT_CCM_ERROR, 0x04},
{CURRENT_SOME_RMEP_CCM_DEFECTf, STICKY_SOME_RMEP_CCM_DEFECTf,
BCM_OAM_GROUP_FAULT_CCM_TIMEOUT, 0x02},
{CURRENT_SOME_RDI_DEFECTf, STICKY_SOME_RDI_DEFECTf,
BCM_OAM_GROUP_FAULT_REMOTE, 0x01},
{0, 0, 0, 0}
};
/*
* 0AM Endpoint faults array initialization.
*/
STATIC _bcm_oam_fault_t _kt2_oam_endpoint_faults[] =
{
{CURRENT_RMEP_PORT_STATUS_DEFECTf,
STICKY_RMEP_PORT_STATUS_DEFECTf,
BCM_OAM_ENDPOINT_FAULT_PORT_DOWN, 0x08},
{CURRENT_RMEP_INTERFACE_STATUS_DEFECTf,
STICKY_RMEP_INTERFACE_STATUS_DEFECTf,
BCM_OAM_ENDPOINT_FAULT_INTERFACE_DOWN, 0x04},
{CURRENT_RMEP_CCM_DEFECTf,
STICKY_RMEP_CCM_DEFECTf,
BCM_OAM_ENDPOINT_FAULT_CCM_TIMEOUT, 0x20},
{CURRENT_RMEP_LAST_RDIf,
STICKY_RMEP_LAST_RDIf,
BCM_OAM_ENDPOINT_FAULT_REMOTE, 0x10},
{0, 0, 0, 0}
};
typedef struct _bcm_kt2_oam_intr_en_fields_s {
soc_field_t field;
uint32 value;
} _bcm_kt2_oam_intr_en_fields_t;
STATIC _bcm_kt2_oam_intr_en_fields_t _kt2_oam_intr_en_fields[bcmOAMEventCount] =
{
/*
* Note:
* The order of hardware field names in the below initialization
* code must match the event enum order in bcm_oam_event_type_t.
*/
{ ANY_RMEP_TLV_PORT_DOWN_INT_ENABLEf, 1},
{ ANY_RMEP_TLV_PORT_UP_INT_ENABLEf, 1},
{ ANY_RMEP_TLV_INTERFACE_UP_TO_DOWN_TRANSITION_INT_ENABLEf, 1},
{ ANY_RMEP_TLV_INTERFACE_DOWN_TO_UP_TRANSITION_INT_ENABLEf, 1},
{ ANY_RMEP_TLV_INTERFACE_TESTING_TO_UP_TRANSITION_INT_ENABLEf, 1},
{ ANY_RMEP_TLV_INTERFACE_UNKNOWN_TO_UP_TRANSITION_INT_ENABLEf, 1},
{ ANY_RMEP_TLV_INTERFACE_DORMANT_TO_UP_TRANSITION_INT_ENABLEf, 1},
{ ANY_RMEP_TLV_INTERFACE_NOTPRESENT_TO_UP_TRANSITION_INT_ENABLEf, 1},
{ ANY_RMEP_TLV_INTERFACE_LLDOWN_TO_UP_TRANSITION_INT_ENABLEf, 1},
{ ANY_RMEP_TLV_INTERFACE_UP_TO_TESTING_TRANSITION_INT_ENABLEf, 1},
{ ANY_RMEP_TLV_INTERFACE_UP_TO_UNKNOWN_TRANSITION_INT_ENABLEf, 1},
{ ANY_RMEP_TLV_INTERFACE_UP_TO_DORMANT_TRANSITION_INT_ENABLEf, 1},
{ ANY_RMEP_TLV_INTERFACE_UP_TO_NOTPRESENT_TRANSITION_INT_ENABLEf, 1},
{ ANY_RMEP_TLV_INTERFACE_UP_TO_LLDOWN_TRANSITION_INT_ENABLEf, 1},
{ XCON_CCM_DEFECT_INT_ENABLEf, 1},
{ ERROR_CCM_DEFECT_INT_ENABLEf, 1},
{ SOME_RDI_DEFECT_INT_ENABLEf, 1},
{ SOME_RMEP_CCM_DEFECT_INT_ENABLEf, 1},
{ INVALIDf, 0},
{ INVALIDf, 0},
{ INVALIDf, 0},
{ INVALIDf, 0},
{ INVALIDf, 0},
{ INVALIDf, 0},
{ INVALIDf, 0},
{ INVALIDf, 0},
{ INVALIDf, 0},
{ INVALIDf, 0},
{ INVALIDf, 0}
};
/* LM counter related fields - Ingress */
STATIC mep_ctr_info_t mep_ctr_info[] = {
{ LMEP__CTR1_VALIDf, LMEP__CTR1_BASE_PTRf,
LMEP__CTR1_MEP_TYPEf, LMEP__CTR1_MEP_MDLf,
LMEP__CTR1_SERVICE_PRI_MAP_PROFILE_PTRf
},
{ LMEP__CTR2_VALIDf, LMEP__CTR2_BASE_PTRf,
LMEP__CTR2_MEP_TYPEf, LMEP__CTR2_MEP_MDLf,
LMEP__CTR2_SERVICE_PRI_MAP_PROFILE_PTRf
}
};
/* LM counter related fields - Egress */
STATIC mep_ctr_info_t egr_mep_ctr_info[] = {
{ CTR1_VALIDf, CTR1_BASE_PTRf,
CTR1_MEP_TYPEf, CTR1_MEP_MDLf,
CTR1_SERVICE_PRI_MAP_PROFILE_PTRf
},
{ CTR2_VALIDf, CTR2_BASE_PTRf,
CTR2_MEP_TYPEf, CTR2_MEP_MDLf,
CTR2_SERVICE_PRI_MAP_PROFILE_PTRf
}
};
typedef struct _bcm_kt2_oam_olp_hdr_type_map_s {
bcm_field_olp_header_type_t field_olp_hdr_type;
uint8 mem_index;
uint8 subtype;
uint8 reuse;
} _bcm_kt2_oam_olp_hdr_type_map_t;
STATIC _bcm_kt2_oam_olp_hdr_type_map_t kt2_olp_hdr_type_mapping[] = {
/* Down MEP CCM */
{bcmFieldOlpHeaderTypeEthOamCcm, _BCM_KT2_CCM_OLP_HDR_TYPE_COMPRESSED,
_BCM_KT2_OLP_HDR_SUBTYPE_CCM, 0},
{bcmFieldOlpHeaderTypeBhhOamCcm, _BCM_KT2_CCM_OLP_HDR_TYPE_COMPRESSED,
_BCM_KT2_OLP_HDR_SUBTYPE_CCM, 1},
/* BFD */
{bcmFieldOlpHeaderTypeBfdOam, _BCM_KT2_BFD_OLP_HDR_TYPE_COMPRESSED,
_BCM_KT2_OLP_HDR_SUBTYPE_BFD, 0},
/* Down MEP LM/DM */
{bcmFieldOlpHeaderTypeEthOamLmDm, _BCM_KT2_LM_DM_OLP_HDR_TYPE_COMPRESSED,
_BCM_KT2_OLP_HDR_SUBTYPE_LM_DM, 0},
{bcmFieldOlpHeaderTypeEthOamLm, _BCM_KT2_LM_DM_OLP_HDR_TYPE_COMPRESSED,
_BCM_KT2_OLP_HDR_SUBTYPE_LM_DM, 1},
{bcmFieldOlpHeaderTypeEthOamDm, _BCM_KT2_LM_DM_OLP_HDR_TYPE_COMPRESSED,
_BCM_KT2_OLP_HDR_SUBTYPE_LM_DM, 1},
{bcmFieldOlpHeaderTypeBhhOamLm, _BCM_KT2_LM_DM_OLP_HDR_TYPE_COMPRESSED,
_BCM_KT2_OLP_HDR_SUBTYPE_LM_DM, 1},
{bcmFieldOlpHeaderTypeBhhOamDm, _BCM_KT2_LM_DM_OLP_HDR_TYPE_COMPRESSED,
_BCM_KT2_OLP_HDR_SUBTYPE_LM_DM, 1},
/*
* UpMEP header types are not programmable by FP, but OAM pipeline lookup
* can assign UpMEP subtypes.
*/
/* UpMEP CCM*/
{bcmFieldOlpHeaderTypeEthOamUpMepCcm, _BCM_KT2_ETH_OAM_UP_MEP_CCM_OLP_HDR_TYPE_COMPRESSED,
_BCM_KT2_OLP_HDR_SUBTYPE_ETH_OAM_UPMEP_CCM, 0},
{bcmFieldOlpHeaderTypeEthOamUpMepLm, _BCM_KT2_ETH_OAM_UP_MEP_LM_DM_OLP_HDR_TYPE_COMPRESSED,
_BCM_KT2_OLP_HDR_SUBTYPE_ETH_OAM_UPMEP_LM_DM, 0},
{bcmFieldOlpHeaderTypeEthOamUpMepDm, _BCM_KT2_ETH_OAM_UP_MEP_LM_DM_OLP_HDR_TYPE_COMPRESSED,
_BCM_KT2_OLP_HDR_SUBTYPE_ETH_OAM_UPMEP_LM_DM, 1},
};
static uint8 kt2_olp_hdr_type_count = sizeof(kt2_olp_hdr_type_mapping) /
sizeof(kt2_olp_hdr_type_mapping[0]);
/* * * * * * * * * * * * * * * * * * * * * * * * *
* Static local functions *
*/
/*
* Function:
* _bcm_kt2_outer_tpid_init
* Purpose:
* Allocate and initialize memory to cache oam outer tpid entries.
* Initialize lock for cached tpid entries.
* Parameters:
* unit - (IN)SOC unit number.
* Returns:
* BCM_E_XXX
* Notes:
*/
int
_bcm_kt2_outer_tpid_init(int unit) {
int index;
int alloc_size;
uint32 reg32;
int rv = BCM_E_NONE;
/* Allocate memory to cache OUTER_TPID entries. */
alloc_size = sizeof(_oam_tpid_t) * BCM_MAX_TPID_ENTRIES;
if (KT2_OAM_OUTER_TPID_TAB(unit) == NULL) {
KT2_OAM_OUTER_TPID_TAB(unit) = sal_alloc(alloc_size,
"Cached OAM Outer TPIDs");
if (KT2_OAM_OUTER_TPID_TAB(unit) == NULL) {
return BCM_E_MEMORY;
}
}
sal_memset(KT2_OAM_OUTER_TPID_TAB(unit), 0, alloc_size);
/* Cache outer TPID table */
for (index = 0; index < BCM_MAX_TPID_ENTRIES; index++) {
rv = soc_reg32_get(unit, outer_tpid[index], REG_PORT_ANY, 0, ®32);
if (BCM_FAILURE(rv)) {
sal_free(KT2_OAM_OUTER_TPID_TAB(unit));
KT2_OAM_OUTER_TPID_TAB(unit) = NULL;
}
KT2_OAM_OUTER_TPID_ENTRY(unit, index) = soc_reg_field_get(unit,
outer_tpid[index], reg32, TPIDf);
}
if (NULL == BCM_KT2_OUTER_TPID_MUTEX(unit)) {
/* Create protection mutex. */
BCM_KT2_OUTER_TPID_MUTEX(unit) = sal_mutex_create("oamouter_tpid_lock");
if (NULL == BCM_KT2_OUTER_TPID_MUTEX(unit)) {
return (BCM_E_MEMORY);
}
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_inner_tpid_init
* Purpose:
* Allocate and initialize memory to cache oam inner tpid entries.
* Initialize lock for cached tpid entries.
* Parameters:
* unit - (IN)SOC unit number.
* Returns:
* BCM_E_XXX
* Notes:
*/
int
_bcm_kt2_inner_tpid_init(int unit) {
int alloc_size;
uint32 reg32;
int rv = BCM_E_NONE;
/* Allocate memory to cache INNER_TPID entries. */
alloc_size = sizeof(_oam_tpid_t) * BCM_MAX_INNER_TPID_ENTRIES;
if (KT2_OAM_INNER_TPID_TAB(unit) == NULL) {
KT2_OAM_INNER_TPID_TAB(unit) = sal_alloc(alloc_size,
"Cached OAM Inner TPIDs");
if (KT2_OAM_INNER_TPID_TAB(unit) == NULL) {
return BCM_E_MEMORY;
}
}
sal_memset(KT2_OAM_INNER_TPID_TAB(unit), 0, alloc_size);
/* Cache Inner TPID table */
rv = soc_reg32_get(unit, INNER_TPIDr, REG_PORT_ANY, 0, ®32);
if (BCM_FAILURE(rv)) {
sal_free(KT2_OAM_INNER_TPID_TAB(unit));
KT2_OAM_INNER_TPID_TAB(unit) = NULL;
}
/* Inner TPID has only 1 entry. So, index is always 0 */
KT2_OAM_INNER_TPID_ENTRY(unit, 0) = soc_reg_field_get(unit, INNER_TPIDr,
reg32, TPIDf);
if (NULL == BCM_KT2_INNER_TPID_MUTEX(unit)) {
/* Create protection mutex. */
BCM_KT2_INNER_TPID_MUTEX(unit) = sal_mutex_create("oam inner_tpid_lock");
if (NULL == BCM_KT2_INNER_TPID_MUTEX(unit)) {
return (BCM_E_MEMORY);
}
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_subport_tpid_init
* Purpose:
* Allocate and initialize memory to cache oam subport tpid entries.
* Initialize lock for cached tpid entries.
* Parameters:
* unit - (IN)SOC unit number.
* Returns:
* BCM_E_XXX
* Notes:
*/
int
_bcm_kt2_subport_tpid_init(int unit) {
int index;
int alloc_size;
uint32 reg32;
int rv = BCM_E_NONE;
/* Allocate memory to cache OUTER_TPID entries. */
alloc_size = sizeof(_oam_tpid_t) * BCM_MAX_TPID_ENTRIES;
if (KT2_OAM_SUBPORT_TPID_TAB(unit) == NULL) {
KT2_OAM_SUBPORT_TPID_TAB(unit) = sal_alloc(alloc_size,
"Cached OAM Subport TPIDs");
if (KT2_OAM_SUBPORT_TPID_TAB(unit) == NULL) {
return BCM_E_MEMORY;
}
}
sal_memset(KT2_OAM_SUBPORT_TPID_TAB(unit), 0, alloc_size);
/* Cache Subport TPID table */
for (index = 0; index < BCM_MAX_TPID_ENTRIES; index++) {
rv = soc_reg32_get(unit, subport_tpid[index], REG_PORT_ANY, 0, ®32);
if (BCM_FAILURE(rv)) {
sal_free(KT2_OAM_SUBPORT_TPID_TAB(unit));
KT2_OAM_SUBPORT_TPID_TAB(unit) = NULL;
}
KT2_OAM_SUBPORT_TPID_ENTRY(unit, index) = soc_reg_field_get(unit,
subport_tpid[index], reg32, TPIDf);
}
if (NULL == BCM_KT2_SUBPORT_TPID_MUTEX(unit)) {
/* Create protection mutex. */
BCM_KT2_SUBPORT_TPID_MUTEX(unit) = sal_mutex_create("oam subport_tpid_lock");
if (NULL == BCM_KT2_SUBPORT_TPID_MUTEX(unit)) {
return (BCM_E_MEMORY);
}
}
return (BCM_E_NONE);
}
/*
* Function :
* _bcm_kt2_oam_tpid_get
*
* Purpose :
* Get tpid value for tpid entry index in the HW.
*
* Parameters :
* unit - (IN) BCM device number.
* tpid_type -(IN) - outer/inner/subport
* index - (IN) Entry index.
* value - (OUT) TPID value
* Return :
* BCM_E_XXX
*/
int
_bcm_kt2_oam_tpid_get(int unit, int tpid_type, int index, int *value)
{
int rv = BCM_E_NONE;
uint32 rval = 0;
if (tpid_type == BCM_OAM_TPID_TYPE_OUTER) {
if (index >= BCM_MAX_TPID_ENTRIES) {
return (BCM_E_PARAM);
}
rv = soc_reg32_get(unit, outer_tpid[index], REG_PORT_ANY, 0, &rval);
if (BCM_FAILURE(rv)) {
return (BCM_E_PARAM);
}
*value = soc_reg_field_get(unit, outer_tpid[index], rval, TPIDf);
} else if (tpid_type == BCM_OAM_TPID_TYPE_INNER) {
if (index >= BCM_MAX_INNER_TPID_ENTRIES) {
return (BCM_E_PARAM);
}
rv = soc_reg32_get(unit, INNER_TPIDr, REG_PORT_ANY, 0, &rval);
if (BCM_FAILURE(rv)) {
return (BCM_E_PARAM);
}
*value = soc_reg_field_get(unit, INNER_TPIDr, rval, TPIDf);
} else if (tpid_type == BCM_OAM_TPID_TYPE_SUBPORT) {
if (index >= BCM_MAX_TPID_ENTRIES) {
return (BCM_E_PARAM);
}
rv = soc_reg32_get(unit, subport_tpid[index], REG_PORT_ANY, 0, &rval);
if (BCM_FAILURE(rv)) {
return (BCM_E_PARAM);
}
*value = soc_reg_field_get(unit, subport_tpid[index], rval, TPIDf);
} else {
return (BCM_E_PARAM);
}
return (rv);
}
/*
* Function :
* _bcm_kt2_oam_tpid_set
*
* Purpose :
* Set tpid value for tpid entry index in the HW.
*
* Parameters :
* unit - (IN) BCM device number.
* tpid_type -(IN) - outer/inner/subport
* index - (IN) Entry index.
* value - (IN) Value to be set as tpid
* Return :
* BCM_E_XXX
*/
int
_bcm_kt2_oam_tpid_set(int unit, int tpid_type, int index, int value)
{
int rv = BCM_E_NONE;
uint32 rval = 0;
if (tpid_type == BCM_OAM_TPID_TYPE_OUTER) {
if (index >= BCM_MAX_TPID_ENTRIES) {
return (BCM_E_PARAM);
}
soc_reg_field_set(unit, outer_tpid[index], &rval, TPIDf, value);
rv = soc_reg32_set(unit, outer_tpid[index], REG_PORT_ANY, 0, rval);
if (BCM_FAILURE(rv)) {
return (BCM_E_PARAM);
}
} else if (tpid_type == BCM_OAM_TPID_TYPE_INNER) {
if (index >= BCM_MAX_INNER_TPID_ENTRIES) {
return (BCM_E_PARAM);
}
soc_reg_field_set(unit, INNER_TPIDr, &rval, TPIDf, value);
rv = soc_reg32_set(unit, INNER_TPIDr, REG_PORT_ANY, 0, rval);
if (BCM_FAILURE(rv)) {
return (BCM_E_PARAM);
}
} else if (tpid_type == BCM_OAM_TPID_TYPE_SUBPORT) {
if (index >= BCM_MAX_TPID_ENTRIES) {
return (BCM_E_PARAM);
}
soc_reg_field_set(unit, subport_tpid[index], &rval, TPIDf, value);
rv = soc_reg32_set(unit, subport_tpid[index], REG_PORT_ANY, 0, rval);
if (BCM_FAILURE(rv)) {
return (BCM_E_PARAM);
}
} else {
return (BCM_E_PARAM);
}
return (rv);
}
/*
* Function :
* _bcm_kt2_oam_tpid_entry_get
*
* Purpose :
* Get tpid value for tpid entry index .
*
* Parameters :
* unit - (IN) BCM device number.
* tpid - (OUT) TPID value.
* index - (IN) Entry index.
*
* Return :
* BCM_E_XXX
*/
int
_bcm_kt2_oam_tpid_entry_get(int unit, uint16 *tpid, int index, int tpid_type)
{
if (tpid_type == BCM_OAM_TPID_TYPE_OUTER) {
if ((index < 0) || (index >= BCM_MAX_TPID_ENTRIES) ||
(KT2_OAM_OUTER_TPID_REF_COUNT(unit, index) <= 0)) {
return (BCM_E_PARAM);
}
*tpid = KT2_OAM_OUTER_TPID_ENTRY(unit, index);
} else if (tpid_type == BCM_OAM_TPID_TYPE_INNER) {
if ((index < 0) || (index >= BCM_MAX_INNER_TPID_ENTRIES) ||
(KT2_OAM_INNER_TPID_REF_COUNT(unit, index) <= 0)) {
return (BCM_E_PARAM);
}
*tpid = KT2_OAM_INNER_TPID_ENTRY(unit, index);
} else if (tpid_type == BCM_OAM_TPID_TYPE_SUBPORT) {
if ((index < 0) || (index >= BCM_MAX_TPID_ENTRIES) ||
(KT2_OAM_SUBPORT_TPID_REF_COUNT(unit, index) <= 0)) {
return (BCM_E_PARAM);
}
*tpid = KT2_OAM_SUBPORT_TPID_ENTRY(unit, index);
} else {
return (BCM_E_PARAM);
}
return (BCM_E_NONE);
}
/*
* Function :
* _bcm_kt2_oam_tpid_entry_delete
*
* Purpose :
* Delete tpid entry by index.
*
* Parameters :
* unit - (IN) BCM device number.
* index - (IN) Entry index.
*
* Return :
* BCM_E_XXX
*/
int
_bcm_kt2_tpid_entry_delete(int unit, int index, int tpid_type)
{
int rv = BCM_E_NONE;
if (tpid_type == BCM_OAM_TPID_TYPE_OUTER) {
BCM_KT2_OUTER_TPID_LOCK(unit);
if ((index < 0) || (index >= BCM_MAX_TPID_ENTRIES) ||
(KT2_OAM_OUTER_TPID_REF_COUNT(unit, index) <= 0)) {
rv = BCM_E_PARAM;
BCM_KT2_OUTER_TPID_UNLOCK(unit);
return rv;
}
KT2_OAM_OUTER_TPID_REF_COUNT(unit, index)--;
if (0 == KT2_OAM_OUTER_TPID_REF_COUNT(unit, index)) {
KT2_OAM_OUTER_TPID_ENTRY(unit, index) = default_outer_tpid[index];
rv = _bcm_kt2_oam_tpid_set(unit, tpid_type, index,
default_outer_tpid[index]);
}
BCM_KT2_OUTER_TPID_UNLOCK(unit);
} else if (tpid_type == BCM_OAM_TPID_TYPE_INNER) {
BCM_KT2_INNER_TPID_LOCK(unit);
if ((index < 0) || (index >= BCM_MAX_INNER_TPID_ENTRIES) ||
(KT2_OAM_INNER_TPID_REF_COUNT(unit, index) <= 0)) {
rv = BCM_E_PARAM;
BCM_KT2_INNER_TPID_UNLOCK(unit);
return rv;
}
KT2_OAM_INNER_TPID_REF_COUNT(unit, index)--;
if (0 == KT2_OAM_INNER_TPID_REF_COUNT(unit, index)) {
KT2_OAM_INNER_TPID_ENTRY(unit, index) = BCM_OAM_DEFAULT_TPID;
rv = _bcm_kt2_oam_tpid_set(unit, tpid_type, index,
BCM_OAM_DEFAULT_TPID);
}
BCM_KT2_INNER_TPID_UNLOCK(unit);
} else if (tpid_type == BCM_OAM_TPID_TYPE_SUBPORT) {
BCM_KT2_SUBPORT_TPID_LOCK(unit);
if ((index < 0) || (index >= BCM_MAX_TPID_ENTRIES) ||
(KT2_OAM_SUBPORT_TPID_REF_COUNT(unit, index) <= 0)) {
rv = BCM_E_PARAM;
BCM_KT2_SUBPORT_TPID_UNLOCK(unit);
return rv;
}
KT2_OAM_SUBPORT_TPID_REF_COUNT(unit, index)--;
if (0 == KT2_OAM_SUBPORT_TPID_REF_COUNT(unit, index)) {
KT2_OAM_SUBPORT_TPID_ENTRY(unit, index) = BCM_OAM_DEFAULT_TPID;
rv = _bcm_kt2_oam_tpid_set(unit, tpid_type, index,
BCM_OAM_DEFAULT_TPID);
}
BCM_KT2_SUBPORT_TPID_UNLOCK(unit);
} else {
return (BCM_E_PARAM);
}
return (rv);
}
/*
* Function :
* _bcm_kt2_tpid_lkup
*
* Purpose :
* Get tpid entry index for specific tpid value.
*
* Parameters :
* unit - (IN) BCM device number.
* tpid - (IN) TPID value.
* tpid_type - (IN) Type of the tpid - inner/outer/subport
* index - (OUT) Entry index.
*
* Return :
* BCM_E_XXX
*/
int
_bcm_kt2_tpid_lkup(int unit, uint16 tpid, int tpid_type, int *index)
{
int i;
if (tpid_type == BCM_OAM_TPID_TYPE_OUTER) {
for (i = 0; i < BCM_MAX_TPID_ENTRIES; i++) {
if (KT2_OAM_OUTER_TPID_ENTRY(unit, i) == tpid) {
*index = i;
return BCM_E_NONE;
}
}
} else if (tpid_type == BCM_OAM_TPID_TYPE_INNER) {
for (i = 0; i < BCM_MAX_INNER_TPID_ENTRIES; i++) {
if (KT2_OAM_INNER_TPID_ENTRY(unit, i) == tpid) {
*index = i;
return BCM_E_NONE;
}
}
} else if (tpid_type == BCM_OAM_TPID_TYPE_SUBPORT) {
for (i = 0; i < BCM_MAX_TPID_ENTRIES; i++) {
if (KT2_OAM_SUBPORT_TPID_ENTRY(unit, i) == tpid) {
*index = i;
return BCM_E_NONE;
}
}
}
return BCM_E_NOT_FOUND;
}
/*
* Function:
* _bcm_kt2_tpid_entry_add
* Purpose:
* Add a new TPID entry.
* Allocate and initialize memory to cache tpid entries.
* Initialize lock for cached tpid entries.
* Parameters:
* unit - (IN) SOC unit number.
* tpid - (IN) TPID to be added.
* tpid_type - (IN) Type of the tpid - inner/outer/subport
* index - (OUT) Index where the the new TPID is added.
* Returns:
* BCM_E_XXX
* Notes:
* If the same TPID already exists, simply increase the
* reference count of the cached entry. Otherwise, add the entry
* to the cached table and write the new entry to hardware.
* Only four distinct TPID values are currently supported for
* outer and subport TPID and only one value is supported for
* inner tpid.
*/
int
_bcm_kt2_tpid_entry_add(int unit, uint16 tpid, int tpid_type, int *index)
{
int rv = BCM_E_NONE;
int i, free_index;
free_index = -1;
if (tpid_type == BCM_OAM_TPID_TYPE_OUTER) {
BCM_KT2_OUTER_TPID_LOCK(unit);
/* Search for an existing entry. */
for (i = 0; i < BCM_MAX_TPID_ENTRIES; i++) {
if (KT2_OAM_OUTER_TPID_ENTRY(unit, i) == tpid) {
KT2_OAM_OUTER_TPID_REF_COUNT(unit, i)++;
*index = i;
BCM_KT2_OUTER_TPID_UNLOCK(unit);
return rv;
}
}
for (i = 0; i < BCM_MAX_TPID_ENTRIES; i++) {
if (KT2_OAM_OUTER_TPID_REF_COUNT(unit, i) == 0) {
free_index = i;
break;
}
}
if (free_index < 0) {
rv = BCM_E_RESOURCE;
BCM_KT2_OUTER_TPID_UNLOCK(unit);
return rv;
}
/* Insert the new configuration into tpid table as free
entry is available */
rv = _bcm_kt2_oam_tpid_set(unit, tpid_type, free_index, tpid);
if (BCM_FAILURE(rv)) {
BCM_KT2_OUTER_TPID_UNLOCK(unit);
return rv;
}
KT2_OAM_OUTER_TPID_ENTRY(unit, free_index) = tpid;
KT2_OAM_OUTER_TPID_REF_COUNT(unit, free_index)++;
*index = free_index;
BCM_KT2_OUTER_TPID_UNLOCK(unit);
} else if (tpid_type == BCM_OAM_TPID_TYPE_INNER) {
BCM_KT2_INNER_TPID_LOCK(unit);
/* Search for an existing entry */
for (i = 0; i < BCM_MAX_INNER_TPID_ENTRIES; i++) {
if (KT2_OAM_INNER_TPID_ENTRY(unit, i) == tpid) {
KT2_OAM_INNER_TPID_REF_COUNT(unit, i)++;
*index = i;
BCM_KT2_INNER_TPID_UNLOCK(unit);
return rv;
}
}
for (i = 0; i < BCM_MAX_INNER_TPID_ENTRIES; i++) {
if (KT2_OAM_INNER_TPID_REF_COUNT(unit, i) == 0) {
free_index = i;
break;
}
}
if (free_index < 0) {
rv = BCM_E_RESOURCE;
BCM_KT2_INNER_TPID_UNLOCK(unit);
return rv;
}
/* Insert the new configuration into tpid table as free
entry is available */
rv = _bcm_kt2_oam_tpid_set(unit, tpid_type, free_index, tpid);
if (BCM_FAILURE(rv)) {
BCM_KT2_INNER_TPID_UNLOCK(unit);
return rv;
}
KT2_OAM_INNER_TPID_ENTRY(unit, free_index) = tpid;
KT2_OAM_INNER_TPID_REF_COUNT(unit, free_index)++;
*index = free_index;
BCM_KT2_INNER_TPID_UNLOCK(unit);
} else if (tpid_type == BCM_OAM_TPID_TYPE_SUBPORT) {
BCM_KT2_SUBPORT_TPID_LOCK(unit);
/* Search for an existing entry */
for (i = 0; i < BCM_MAX_TPID_ENTRIES; i++) {
if (KT2_OAM_SUBPORT_TPID_ENTRY(unit, i) == tpid) {
KT2_OAM_SUBPORT_TPID_REF_COUNT(unit, i)++;
*index = i;
BCM_KT2_SUBPORT_TPID_UNLOCK(unit);
return rv;
}
}
for (i = 0; i < BCM_MAX_TPID_ENTRIES; i++) {
if (KT2_OAM_SUBPORT_TPID_REF_COUNT(unit, i) == 0) {
free_index = i;
break;
}
}
if (free_index < 0) {
rv = BCM_E_RESOURCE;
BCM_KT2_SUBPORT_TPID_UNLOCK(unit);
return rv;
}
/* Insert the new configuration into tpid table as free
entry is available. */
rv = _bcm_kt2_oam_tpid_set(unit, tpid_type, free_index, tpid);
if (BCM_FAILURE(rv)) {
BCM_KT2_SUBPORT_TPID_UNLOCK(unit);
return rv;
}
KT2_OAM_SUBPORT_TPID_ENTRY(unit, free_index) = tpid;
KT2_OAM_SUBPORT_TPID_REF_COUNT(unit, free_index)++;
*index = free_index;
BCM_KT2_SUBPORT_TPID_UNLOCK(unit);
} else {
rv = BCM_E_PARAM;
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_ccm_msecs_to_hw_encode
* Purpose:
* Quanitze CCM interval from msecs to hardware encoding.
* Parameters:
* period - (IN) CCM interval in milli seconds.
* Retruns:
* Hardware encoding for the specified CCM interval value.
*/
STATIC int
_bcm_kt2_oam_ccm_msecs_to_hw_encode(int period)
{
int q_period = 0; /* Quantized CCM period value. */
if (0 == period) {
return (q_period);
}
/* Find closest supported period */
for (q_period = 1; _kt2_ccm_intervals[q_period]
!= _BCM_OAM_ENDPOINT_CCM_PERIOD_UNDEFINED; ++q_period) {
if (period < _kt2_ccm_intervals[q_period]) {
break;
}
}
if (_kt2_ccm_intervals[q_period]
== _BCM_OAM_ENDPOINT_CCM_PERIOD_UNDEFINED) {
/* Use the highest defined value */
--q_period;
} else {
if ((period - _kt2_ccm_intervals[q_period - 1])
< (_kt2_ccm_intervals[q_period] - period)) {
/* Closer to the lower value */
--q_period;
}
}
return q_period;
}
#if defined(BCM_WARM_BOOT_SUPPORT)
/*
* Function:
* _bcm_kt2_oam_ccm_hw_encode_to_msecs
* Purpose:
* Get CCM interval in msecs for a given hardware encoded value.
* Parameters:
* encode - (IN) CCM interval hardware encoding.
* Retruns:
* CCM interval in msecs.
*/
STATIC int
_bcm_kt2_oam_ccm_hw_encode_to_msecs(int encode)
{
return (_kt2_ccm_intervals[encode]);
}
#endif
/*
* Function:
* _bcm_kt2_oam_opcode_profile_entry_set
* Purpose:
* Program the OAM opcode control profile fields.
* Parameters:
* unit - (IN) BCM device number
* mem - (IN) Opcode profile memory - ingress/egress
* flags - (IN) Bitmap of opcode control settings.
* entry - (IN/OUT) Pointer to opcode control profile table entry buffer.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_opcode_profile_entry_set(int unit, _bcm_oam_hash_data_t *h_data_p,
soc_mem_t mem,
uint32 flags,
void *entry)
{
uint32 ep_opcode; /* Endpoint opcode flag bits. */
uint32 opcode_count = 0; /* Number of bits set. */
int bp; /* bit position. */
int rv = BCM_E_NONE; /* Opreation return status. */
/* Validate opcode flag bits. */
if (flags & ~(_BCM_KT2_OAM_OPCODE_MASK)) {
return (BCM_E_PARAM);
}
/* Get number of valid opcodes supported. */
opcode_count = _shr_popcount(_BCM_KT2_OAM_OPCODE_MASK);
/*
* Iterate over opcode flag bits and set corresponding fields
* in entry buffer.
*/
for (bp = 0; bp < opcode_count; bp++) {
ep_opcode = (flags & (1 << bp));
switch (ep_opcode) {
case BCM_OAM_OPCODE_CCM_COPY_TO_CPU:
soc_mem_field32_set(unit, mem, entry, CCM_COPYTO_CPUf, 1);
break;
case BCM_OAM_OPCODE_CCM_IN_HW:
soc_mem_field32_set(unit, mem, entry, CCM_PROCESS_IN_HWf, 1);
break;
case BCM_OAM_OPCODE_CCM_DROP:
soc_mem_field32_set(unit, mem, entry, CCM_DROPf, 1);
break;
case BCM_OAM_OPCODE_LBM_IN_HW:
soc_mem_field32_set(unit, mem, entry, LBM_ACTIONf, 1);
break;
case BCM_OAM_OPCODE_LBM_UC_COPY_TO_CPU:
soc_mem_field32_set(unit, mem, entry, LBM_UC_COPYTO_CPUf, 1);
break;
case BCM_OAM_OPCODE_LBM_UC_DROP:
soc_mem_field32_set(unit, mem, entry, LBM_UC_DROPf, 1);
break;
case BCM_OAM_OPCODE_LBM_MC_DROP:
soc_mem_field32_set(unit, mem, entry, LBM_MC_DROPf, 1);
break;
case BCM_OAM_OPCODE_LBM_MC_COPY_TO_CPU:
soc_mem_field32_set(unit, mem, entry, LBM_MC_COPYTO_CPUf, 1);
break;
case BCM_OAM_OPCODE_LBR_COPY_TO_CPU:
soc_mem_field32_set(unit, mem, entry, LBR_COPYTO_CPUf, 1);
break;
case BCM_OAM_OPCODE_LBR_DROP:
soc_mem_field32_set(unit, mem, entry, LBR_DROPf, 1);
break;
case BCM_OAM_OPCODE_LTM_COPY_TO_CPU:
soc_mem_field32_set(unit, mem, entry, LTM_COPYTO_CPUf, 1);
break;
case BCM_OAM_OPCODE_LTM_DROP:
soc_mem_field32_set(unit, mem, entry, LTM_DROPf, 1);
break;
case BCM_OAM_OPCODE_LTR_COPY_TO_CPU:
soc_mem_field32_set(unit, mem, entry, LTR_COPYTO_CPUf, 1);
break;
case BCM_OAM_OPCODE_LTR_DROP:
soc_mem_field32_set(unit, mem, entry, LTR_DROPf, 1);
break;
case BCM_OAM_OPCODE_LMEP_PKT_FWD:
soc_mem_field32_set(unit, mem, entry, FWD_LMEP_PKTf, 1);
break;
case BCM_OAM_OPCODE_OTHER_COPY_TO_CPU:
soc_mem_field32_set(unit, mem, entry,
OTHER_OPCODE_GROUP_1_UC_MY_STATION_MISS_COPYTO_CPUf, 1);
soc_mem_field32_set (unit, mem, entry,
OTHER_OPCODE_GROUP_1_UC_MY_STATION_HIT_COPYTO_CPUf, 1);
soc_mem_field32_set (unit, mem, entry,
OTHER_OPCODE_GROUP_1_LOW_MDL_COPYTO_CPUf, 1);
soc_mem_field32_set (unit, mem, entry,
OTHER_OPCODE_GROUP_1_MC_COPYTO_CPUf, 1);
soc_mem_field32_set(unit, mem, entry,
OTHER_OPCODE_GROUP_2_UC_MY_STATION_MISS_COPYTO_CPUf, 1);
soc_mem_field32_set (unit, mem, entry,
OTHER_OPCODE_GROUP_2_UC_MY_STATION_HIT_COPYTO_CPUf, 1);
soc_mem_field32_set (unit, mem, entry,
OTHER_OPCODE_GROUP_2_LOW_MDL_COPYTO_CPUf, 1);
soc_mem_field32_set (unit, mem, entry,
OTHER_OPCODE_GROUP_2_MC_COPYTO_CPUf, 1);
break;
case BCM_OAM_OPCODE_OTHER_DROP:
soc_mem_field32_set(unit, mem, entry,
OTHER_OPCODE_GROUP_1_UC_MY_STATION_MISS_ACTIONf, 1);
soc_mem_field32_set(unit, mem, entry,
OTHER_OPCODE_GROUP_1_UC_MY_STATION_HIT_ACTIONf, 1);
soc_mem_field32_set(unit, mem, entry,
OTHER_OPCODE_GROUP_1_LOW_MDL_ACTIONf, 1);
soc_mem_field32_set(unit, mem, entry,
OTHER_OPCODE_GROUP_1_MC_ACTIONf, 1);
soc_mem_field32_set(unit, mem, entry,
OTHER_OPCODE_GROUP_2_UC_MY_STATION_MISS_ACTIONf, 1);
soc_mem_field32_set(unit, mem, entry,
OTHER_OPCODE_GROUP_2_UC_MY_STATION_HIT_ACTIONf, 1);
soc_mem_field32_set(unit, mem, entry,
OTHER_OPCODE_GROUP_2_LOW_MDL_ACTIONf, 1);
soc_mem_field32_set(unit, mem, entry,
OTHER_OPCODE_GROUP_2_MC_ACTIONf, 1);
break;
default:
break;
}
}
if (_BCM_OAM_EP_IS_MIP(h_data_p)) {
soc_mem_field32_set(unit, mem, entry, LOW_MDL_CCM_FWD_ACTIONf, 0);
soc_mem_field32_set(unit, mem, entry, LOW_MDL_LB_LT_DROPf, 0);
soc_mem_field32_set(unit, mem, entry, LB_LT_UC_MY_STATION_MISS_DROPf, 0);
} else {
/* Set the opcode action values of the newly introduced CFM opcode actions
to values that match with the legacy behavior for these opcodes */
/* LOW MDL CCM / LB/LT drop and LB/LT my station miss drop */
soc_mem_field32_set(unit, mem, entry, LOW_MDL_CCM_FWD_ACTIONf, 2);
soc_mem_field32_set(unit, mem, entry, LOW_MDL_LB_LT_DROPf, 1);
soc_mem_field32_set(unit, mem, entry, LB_LT_UC_MY_STATION_MISS_DROPf, 1);
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_opcode_profile_entry_init
* Purpose:
* Setup default OAM opcode control profile settings for MEP.
* Parameters:
* unit - (IN) BCM device number
* mem - (IN) Opcode profile memory - ingress/egress
* entry - (IN/OUT) Pointer to opcode control profile table entry buffer.
* Retruns:
* BCM_E_XXX
*/
int
_bcm_kt2_oam_opcode_profile_entry_init(int unit, soc_mem_t mem,
void *entry)
{
uint32 opcode; /* Opcode flag bits. */
int rv; /* Operation return status. */
_bcm_oam_hash_data_t h_data;
sal_memset(&h_data, 0, sizeof(_bcm_oam_hash_data_t));
h_data.type = bcmOAMEndpointTypeEthernet;
opcode = (BCM_OAM_OPCODE_CCM_COPY_TO_CPU |
BCM_OAM_OPCODE_CCM_DROP |
BCM_OAM_OPCODE_LBM_UC_COPY_TO_CPU |
BCM_OAM_OPCODE_LBM_UC_DROP |
BCM_OAM_OPCODE_LBM_MC_COPY_TO_CPU |
BCM_OAM_OPCODE_LBM_MC_DROP |
BCM_OAM_OPCODE_LBR_COPY_TO_CPU |
BCM_OAM_OPCODE_LBR_DROP |
BCM_OAM_OPCODE_LTM_COPY_TO_CPU |
BCM_OAM_OPCODE_LTM_DROP |
BCM_OAM_OPCODE_LTR_COPY_TO_CPU |
BCM_OAM_OPCODE_LTR_DROP |
BCM_OAM_OPCODE_OTHER_COPY_TO_CPU |
BCM_OAM_OPCODE_OTHER_DROP
);
rv = _bcm_kt2_oam_opcode_profile_entry_set(unit, &h_data, mem, opcode, entry);
return rv;
}
/*
* Function:
* _bcm_kt2_oam_mip_opcode_profile_entry_init
* Purpose:
* Setup default OAM opcode control profile settings for MIP.
* Parameters:
* unit - (IN) BCM device number
* mem - (IN) Opcode profile memory - ingress/egress
* entry - (IN/OUT) Pointer to opcode control profile table entry buffer.
* Retruns:
* BCM_E_XXX
*/
int
_bcm_kt2_oam_mip_opcode_profile_entry_init(int unit, soc_mem_t mem,
void *entry)
{
uint32 opcode; /* Opcode flag bits. */
int rv; /* Operation return status. */
_bcm_oam_hash_data_t h_data;
sal_memset(&h_data, 0, sizeof(_bcm_oam_hash_data_t));
h_data.flags |= BCM_OAM_ENDPOINT_INTERMEDIATE;
h_data.type = bcmOAMEndpointTypeEthernet;
opcode = (BCM_OAM_OPCODE_LBM_UC_COPY_TO_CPU |
BCM_OAM_OPCODE_LBM_UC_DROP |
BCM_OAM_OPCODE_LBM_MC_COPY_TO_CPU |
BCM_OAM_OPCODE_LBM_MC_DROP |
BCM_OAM_OPCODE_LTM_COPY_TO_CPU |
BCM_OAM_OPCODE_LTM_DROP);
rv = _bcm_kt2_oam_opcode_profile_entry_set(unit, &h_data, mem, opcode, entry);
return rv;
}
/*
* Function:
* _bcm_kt2_oam_opcode_group_init
* Purpose:
* Init ingress and egress OAM opcode groups.
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
int
_bcm_kt2_oam_opcode_group_init(int unit)
{
int rv = BCM_E_NONE;
ingress_oam_opcode_group_entry_t *opcode_grp_buf;
ingress_oam_opcode_group_entry_t *opcode_grp_entry;
int entry_mem_size; /* Size of table entry. */
int index_max = 0;
int index = 0;
bcm_oam_endpoint_action_t action;
BCM_OAM_ACTION_CLEAR_ALL(action);
BCM_OAM_OPCODE_CLEAR_ALL(action);
/* Set ingress and egress OAM opcode group */
entry_mem_size = sizeof(ingress_oam_opcode_group_entry_t);
/* Allocate buffer to store the DMAed table entries. */
index_max = soc_mem_index_max(unit, INGRESS_OAM_OPCODE_GROUPm);
opcode_grp_buf = soc_cm_salloc(unit, entry_mem_size * (index_max + 1),
"Opcode group entry buffer");
if (NULL == opcode_grp_buf) {
return (BCM_E_MEMORY);
}
/* Initialize the entry buffer. */
sal_memset(opcode_grp_buf, 0, sizeof(entry_mem_size) * (index_max + 1));
for (index = 0; index <= index_max; index++) {
opcode_grp_entry = soc_mem_table_idx_to_pointer
(unit, INGRESS_OAM_OPCODE_GROUPm,
ingress_oam_opcode_group_entry_t *,
opcode_grp_buf, index);
if (index <= _BCM_OAM_OPCODE_TYPE_LTM) {
/* For CFM packets group valid = 0 */
soc_mem_field32_set(unit, INGRESS_OAM_OPCODE_GROUPm,
(uint32 *)opcode_grp_entry, OPCODE_GROUP_VALIDf, 0);
} else {
/* For Non-CFM packets group valid = 1 and group is opcode group 1*/
soc_mem_field32_set(unit, INGRESS_OAM_OPCODE_GROUPm,
(uint32 *)opcode_grp_entry, OPCODE_GROUP_VALIDf, 1);
soc_mem_field32_set(unit, INGRESS_OAM_OPCODE_GROUPm,
(uint32 *)opcode_grp_entry, OPCODE_GROUPf, 0);
}
}
rv = soc_mem_write_range(unit, INGRESS_OAM_OPCODE_GROUPm,
MEM_BLOCK_ALL, 0, index_max, opcode_grp_buf);
if (BCM_FAILURE(rv)) {
if (opcode_grp_buf) {
soc_cm_sfree(unit, opcode_grp_buf);
}
return rv;
}
rv = soc_mem_write_range(unit, EGR_OAM_OPCODE_GROUPm,
MEM_BLOCK_ALL, 0, index_max, opcode_grp_buf);
if (BCM_FAILURE(rv)) {
if (opcode_grp_buf) {
soc_cm_sfree(unit, opcode_grp_buf);
}
return rv;
}
if (opcode_grp_buf) {
soc_cm_sfree(unit, opcode_grp_buf);
}
return (rv);
}
STATIC void
_bcm_kt2_oam_get_passive_mdl_from_active_mdl(int unit, uint8 active_mdl,
int ma_base_index,
uint8 *passive_mdl,
int is_upmep)
{
uint32 mdl_value[1] = { 0 };
int ma_offset = 0;
int mdl = 0;
bcm_oam_endpoint_t ep_id_at_mdl = _BCM_OAM_INVALID_INDEX;
_bcm_oam_control_t *oc = NULL;
_bcm_oam_hash_data_t *h_data_p = NULL;
/* coverity[check_return] */
_bcm_kt2_oam_control_get(unit, &oc);
if (oc == NULL) {
/* Not expected */
return;
}
/* Initialize passive_mdl to 0 */
*passive_mdl = 0;
mdl_value[0] = active_mdl;
for (mdl = 0; mdl <= 7; mdl++) {
if (SHR_BITGET(mdl_value, mdl)) {
ep_id_at_mdl = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_get(oc,
_BCM_KT2_OAM_MA_IDX_TO_EP_ID_MAPPING_IDX
(ma_base_index, ma_offset, is_upmep));
if (ep_id_at_mdl != _BCM_OAM_INVALID_INDEX) {
h_data_p = &(oc->oam_hash_data[ep_id_at_mdl]);
/* Set the passive bitmap only if it is not a MIP */
if (!_BCM_OAM_EP_IS_MIP(h_data_p)) {
*passive_mdl = (1 << mdl) | ((1 << mdl) -1);
}
} else {
/* Log an error. Ideally should not happen. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: EP ID mapping get failed (MA_IDX=%d) -"
"\n"), unit, ma_base_index+ma_offset));
}
ma_offset++;
}
}
}
/*
* Function:
* _bcm_kt2_oam_ep_hash_key_construct
* Purpose:
* Construct hash table key for a given endpoint information.
* Parameters:
* unit - (IN) BCM device number
* oc - (IN) Pointer to OAM control structure.
* ep_info - (IN) Pointer to endpoint information structure.
* key - (IN/OUT) Pointer to hash key buffer.
* Retruns:
* None
*/
STATIC void
_bcm_kt2_oam_ep_hash_key_construct(int unit,
_bcm_oam_control_t *oc,
bcm_oam_endpoint_info_t *ep_info,
_bcm_oam_hash_key_t *key)
{
uint8 *loc = *key;
uint32 direction = 0;
sal_memset(key, 0, sizeof(_bcm_oam_hash_key_t));
if (NULL != ep_info) {
if ((ep_info->flags & BCM_OAM_ENDPOINT_UP_FACING)) {
direction = 1;
}
_BCM_OAM_KEY_PACK(loc, &ep_info->group, sizeof(ep_info->group));
_BCM_OAM_KEY_PACK(loc, &ep_info->name, sizeof(ep_info->name));
_BCM_OAM_KEY_PACK(loc, &ep_info->gport, sizeof(ep_info->gport));
_BCM_OAM_KEY_PACK(loc, &ep_info->level, sizeof(ep_info->level));
_BCM_OAM_KEY_PACK(loc, &ep_info->vlan, sizeof(ep_info->vlan));
_BCM_OAM_KEY_PACK(loc, &ep_info->inner_vlan,
sizeof(ep_info->inner_vlan));
_BCM_OAM_KEY_PACK(loc, &direction, sizeof(direction));
}
/* End address should not exceed size of _bcm_oam_hash_key_t. */
assert ((int) (loc - *key) <= sizeof(_bcm_oam_hash_key_t));
}
/*
* Function:
* _bcm_oam_egr_lmep_key_construct
* Purpose:
* Construct egress MP group table lookup key for a given endpoint.
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Pointer to endpoint hash data memory.
* egr_mp_grp_key_p - (IN/OUT) Pointer to entry buffer.
* Retruns:
* None
*/
STATIC void
_bcm_oam_egr_lmep_key_construct(int unit,
const _bcm_oam_hash_data_t *h_data_p,
egr_mp_group_entry_t *egr_mp_grp_key_p)
{
_bcm_oam_control_t *oc = NULL;
/* coverity[check_return] */
_bcm_kt2_oam_control_get(unit, &oc);
/* Set the valid bit. */
soc_EGR_MP_GROUPm_field32_set
(unit, egr_mp_grp_key_p, VALIDf, 1);
/* Set the search key type. */
soc_EGR_MP_GROUPm_field32_set
(unit, egr_mp_grp_key_p, KEY_TYPEf, h_data_p->oam_domain);
if (h_data_p->oam_domain != _BCM_OAM_DOMAIN_BHH) {
/* Set port type status in the search key. */
if (_BCM_OAM_EP_IS_VP_TYPE(h_data_p)) {
/* DGLP field contains virtual port. */
soc_EGR_MP_GROUPm_field32_set
(unit, egr_mp_grp_key_p, DGLPf, h_data_p->vp);
} else {
if (oc->eth_oam_mp_group_vlan_key ==
_BCM_KT2_ETH_OAM_MP_GROUP_KEY_DOMAIN_INDEPENDANT) {
/* In earlier versions of the SDK, both inner & outer vlan was
* always programmed in the MP_GROUP irrespective of the MEP
* type, this has been changed to program only CVLAN for CVLAN
* MEPs and SVLAN for SVLAN MEPs. To avoid breaking warm-upgrade
* the old scheme is still supported.
*/
soc_EGR_MP_GROUPm_field32_set(unit, egr_mp_grp_key_p,
SVIDf, h_data_p->vlan);
soc_EGR_MP_GROUPm_field32_set(unit, egr_mp_grp_key_p,
CVIDf, h_data_p->inner_vlan);
} else {
if (h_data_p->oam_domain != _BCM_OAM_DOMAIN_CVLAN) {
/* SVLAN/C+SVLAN */
soc_EGR_MP_GROUPm_field32_set(unit, egr_mp_grp_key_p,
SVIDf, h_data_p->vlan);
}
if (h_data_p->oam_domain != _BCM_OAM_DOMAIN_SVLAN) {
/* CLVAN/C+SVLAN */
soc_EGR_MP_GROUPm_field32_set(unit, egr_mp_grp_key_p,
CVIDf, h_data_p->inner_vlan);
}
}
/* DGLP contains generic logical port. */
soc_EGR_MP_GROUPm_field32_set(unit, egr_mp_grp_key_p,
DGLPf, h_data_p->dglp);
}
}
#if defined(INCLUDE_BHH)
else {
/* BHH Domain, Key = Next Hop Index
BHH API have been using L3_EGRESS type in
bcm_oam_endpoint_info_t.intf field.
However for supporting LM/DM on Katana2 this should be DVP_EGRESS type,
only CCM will work with L3_EGRESS type, retaining the old type for
backward compatibility */
if (BCM_XGS3_L3_EGRESS_IDX_VALID(unit, h_data_p->egress_if)) {
soc_EGR_MP_GROUPm_field32_set (unit, egr_mp_grp_key_p, NHIf,
h_data_p->egress_if - BCM_XGS3_EGRESS_IDX_MIN(unit));
} else {
if (BCM_XGS3_DVP_EGRESS_IDX_VALID(unit, h_data_p->egress_if)) {
soc_EGR_MP_GROUPm_field32_set (unit, egr_mp_grp_key_p, NHIf,
h_data_p->egress_if - BCM_XGS3_DVP_EGRESS_IDX_MIN(unit));
}
}
}
#endif
}
/*
* Function:
* _bcm_kt2_oam_lmep_key_construct
* Purpose:
* Construct LMEP view lookup key for a given endpoint.
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Pointer to endpoint hash data memory.
* l3_key_p - (IN/OUT) Pointer to entry buffer.
* Retruns:
* None
*/
STATIC void
_bcm_kt2_oam_lmep_key_construct(int unit,
const _bcm_oam_hash_data_t *h_data_p,
l3_entry_1_entry_t *l3_key_p)
{
_bcm_oam_control_t *oc = NULL;
/* coverity[check_return] */
_bcm_kt2_oam_control_get(unit, &oc);
/* Set the search key type. */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set
(unit, l3_key_p, KEY_TYPEf, SOC_MEM_KEY_L3_ENTRY_LMEP);
/* Set the search key sub type. */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set
(unit, l3_key_p, LMEP__OAM_LMEP_KEY_SUBTYPEf,
h_data_p->oam_domain);
if (h_data_p->oam_domain != _BCM_OAM_DOMAIN_BHH) {
/* Set port type status in the search key. */
if (_BCM_OAM_EP_IS_VP_TYPE(h_data_p)) {
/* SGLP field contains virtual port. */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set
(unit, l3_key_p, LMEP__SGLPf, h_data_p->vp);
} else {
if (oc->eth_oam_mp_group_vlan_key ==
_BCM_KT2_ETH_OAM_MP_GROUP_KEY_DOMAIN_INDEPENDANT) {
/* In earlier versions of the SDK, both inner & outer vlan was
* always programmed in the MP_GROUP irrespective of the MEP
* type, this has been changed to program only CVLAN for CVLAN
* MEPs and SVLAN for SVLAN MEPs. To avoid breaking warm-upgrade
* the old scheme is still supported.
*/
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, l3_key_p, LMEP__SVIDf,
h_data_p->vlan);
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, l3_key_p, LMEP__CVIDf,
h_data_p->inner_vlan);
} else {
if (h_data_p->oam_domain != _BCM_OAM_DOMAIN_CVLAN) {
/* SVLAN/C+SVLAN */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, l3_key_p,
LMEP__SVIDf,
h_data_p->vlan);
}
if (h_data_p->oam_domain != _BCM_OAM_DOMAIN_SVLAN) {
/* CLVAN/C+SVLAN */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, l3_key_p,
LMEP__CVIDf,
h_data_p->inner_vlan);
}
}
/* SGLP contains generic logical port. */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, l3_key_p, LMEP__SGLPf,
h_data_p->sglp);
}
}
#if defined(INCLUDE_BHH)
else {
/* If the label is not global, set port */
if (bcm_tr_mpls_port_independent_range(unit, h_data_p->label,
BCM_GPORT_INVALID) != BCM_E_NONE) {
/* SGLP contains generic logical port. */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set
(unit, l3_key_p, LMEP__SGLPf, h_data_p->sglp);
} else {
/* SGLP contains generic logical port. */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set
(unit, l3_key_p, LMEP__SGLPf, 0xFFFF);
}
/* Set incoming MPLS Label */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set
(unit, l3_key_p, LMEP__MPLS_LABELf, h_data_p->label);
}
#endif
}
/*
* Function:
* _bcm_kt2_oam_rmep_key_construct
* Purpose:
* Construct RMEP view lookup key for a given endpoint.
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Pointer to endpoint hash data memory.
* l3_key_p - (IN/OUT) Pointer to entry buffer.
* Retruns:
* None
*/
STATIC void
_bcm_kt2_oam_rmep_key_construct(int unit,
const _bcm_oam_hash_data_t *h_data_p,
l3_entry_1_entry_t *l3_key_p)
{
/* Set the search key type. */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set
(unit, l3_key_p, KEY_TYPEf, SOC_MEM_KEY_L3_ENTRY_RMEP);
/* Set endpoint name. */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set
(unit, l3_key_p, RMEP__MEPIDf, h_data_p->name);
/* Set MA_PTR */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set
(unit, l3_key_p, RMEP__MA_PTRf, h_data_p->group_index);
}
#if defined(KEY_PRINT)
/*
* Function:
* _bcm_oam_hash_key_print
* Purpose:
* Print the contents of a hash key buffer.
* Parameters:
* hash_key - (IN) Pointer to hash key buffer.
* Retruns:
* None
*/
STATIC void
_bcm_oam_hash_key_print(_bcm_oam_hash_key_t *hash_key)
{
int i;
LOG_CLI((BSL_META_U(unit,
"HASH KEY:")));
for(i = 0; i < _OAM_HASH_KEY_SIZE; i++) {
LOG_CLI((BSL_META_U(unit,
":%u"), *(hash_key[i])));
}
LOG_CLI((BSL_META_U(unit,
"\n")));
}
#endif
/*
* Function:
* _bcm_kt2_oam_control_get
* Purpose:
* Lookup a OAM control config from a bcm device id.
* Parameters:
* unit - (IN)BCM unit number.
* oc - (OUT) OAM control structure.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_control_get(int unit, _bcm_oam_control_t **oc)
{
if (NULL == oc) {
return (BCM_E_PARAM);
}
/* Ensure oam module is initialized. */
_BCM_OAM_IS_INIT(unit);
*oc = _kt2_oam_control[unit];
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_group_endpoint_count_init
* Purpose:
* Retrieves and initializes endpoint count information for this device.
* Parameters:
* unit - (IN) BCM unit number.
* oc - (IN) Pointer to device OAM control structure.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_group_endpoint_count_init(int unit, _bcm_oam_control_t *oc)
{
/* Input parameter check. */
if (NULL == oc) {
return (BCM_E_PARAM);
}
/*
* Get endpoint hardware table index count values and
* initialize device OAM control structure members variables.
*/
oc->rmep_count = soc_mem_index_count(unit, RMEPm);
oc->lmep_count = soc_mem_index_count(unit, LMEPm);
oc->ma_idx_count = soc_mem_index_count(unit, MA_INDEXm);
oc->egr_ma_idx_count = soc_mem_index_count(unit, EGR_MA_INDEXm);
/* Max number of endpoints supported by the device. */
oc->ep_count = (oc->rmep_count + oc->lmep_count + oc->ma_idx_count +
oc->egr_ma_idx_count);
LOG_VERBOSE(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: Total No. endpoint Count = %d.\n"),
oc->ep_count));
/* Max number of MA Groups supported by device. */
oc->group_count = soc_mem_index_count(unit, MA_STATEm);
LOG_VERBOSE(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: Total No. Group Count = %d.\n"),
oc->group_count));
oc->lm_counter_cnt = soc_mem_index_count(unit, OAM_LM_COUNTERS_0m);
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_ccm_rx_timeout_enable
* Purpose:
* Enable CCM Timer operations for endpoint state table.
* Parameters:
* unit - (IN) BCM unit number.
* state - (IN) Enable/Disable.
* Retruns:
* BCM_E_XXX
* Note:
* RMEP_MA_STATE_REFRESH_INDEXr - CPU access for debug only.
*/
STATIC int
_bcm_kt2_oam_ccm_rx_timeout_set(int unit, uint8 state)
{
int rv; /* Opreation return status. */
uint32 rval = 0; /* Register value. */
/* Enable timer instructions to RMEP/MA_STATE Table. */
soc_reg_field_set(unit, OAM_TIMER_CONTROLr, &rval,
TIMER_ENABLEf, state ? 1 : 0);
/* Set Clock granularity to 250us ticks - 1. */
soc_reg_field_set(unit, OAM_TIMER_CONTROLr, &rval,
CLK_GRANf, 1);
rv = WRITE_OAM_TIMER_CONTROLr(unit, rval);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Timer enable - Failed.\n")));
return (rv);
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_ccm_tx_config_enable
* Purpose:
* Enable transmission of OAM PDUs on local endpoint.
* Parameters:
* unit - (IN) BCM unit number.
* state - (IN) Enable/Disable
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_ccm_tx_config_set(int unit, uint8 state)
{
int rv; /* Opreation return status. */
uint32 rval = 0; /* Register value. */
/* Enable OAM LMEP Tx. */
soc_reg_field_set(unit, OAM_TX_CONTROLr, &rval,
TX_ENABLEf, state ? 1 : 0);
/* Enable CMIC buffer. */
soc_reg_field_set(unit, OAM_TX_CONTROLr, &rval,
CMIC_BUF_ENABLEf, state ? 1 : 0);
rv = WRITE_OAM_TX_CONTROLr(unit, rval);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Tx config enable - Failed.\n")));
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_misc_config
* Purpose:
* Miscellaneous OAM configurations:
* 1. Enable IFP lookup on the CPU port.
* Parameters:
* unit - (IN) BCM unit number.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_misc_config(int unit)
{
int rv; /* Opreation return status. */
/*
* Enable ingress FP for CPU port so LM/DM packets sent from CPU
* can be processed.
*/
rv = bcm_esw_port_control_set(unit, CMIC_PORT(unit),
bcmPortControlFilterIngress, 1);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: bcm_esw_port_control_set"
" - Failed.\n")));
return (rv);
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_profile_tables_init
* Purpose:
* Create ingress and egress service priority mapping profile table and
* setup a default profile. Create Ingress and egress OAM opcode control
* profile table, dglp profile
* Parameters:
* unit - (IN) BCM unit number.
* oc - (IN) Pointer to OAM control structure.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_profile_tables_init(int unit, _bcm_oam_control_t *oc)
{
int rv = BCM_E_NONE;/* Opreation return status. */
soc_mem_t mem; /* Profiled table memory. */
int entry_words; /* Profile table word size. */
int pri; /* Priority */
void *entries[1]; /* Profile entry. */
uint32 profile_index; /* Profile table index. */
ing_service_pri_map_entry_t pri_ent[BCM_OAM_INTPRI_MAX]; /* ing profile */
/* entry */
egr_service_pri_map_entry_t egr_pri_ent[BCM_OAM_INTPRI_MAX]; /*egr profile*/
/* entry */
bcm_module_t modid;
uint32 dglp = 0;
egr_oam_dglp_profile_entry_t egr_dglp_profile_entry;
ing_oam_dglp_profile_entry_t ing_dglp_profile_entry;
oam_opcode_control_profile_entry_t opcode_entry; /* Opcode control
profile entry. */
egr_oam_opcode_control_profile_entry_t egr_opcode_entry;
/* Ingress Service Priority Map profile table initialization. */
soc_profile_mem_t_init(&oc->ing_service_pri_map);
entry_words = (sizeof(ing_service_pri_map_entry_t) / sizeof(uint32));
mem = ING_SERVICE_PRI_MAPm;
rv = soc_profile_mem_create(unit, &mem, &entry_words, 1,
&oc->ing_service_pri_map);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: service map profile - Failed.\n")));
return (rv);
}
if (!SOC_WARM_BOOT(unit)) {
/*
* Initialize ingress priority map profile table.
* All priorities priorities map to priority:'0'.
*/
for (pri = 0; pri < BCM_OAM_INTPRI_MAX; pri++)
{
/* Clear ingress service pri map profile entry. */
sal_memcpy(&pri_ent[pri], soc_mem_entry_null(unit, mem),
soc_mem_entry_words(unit, mem) * sizeof(uint32));
if (SOC_MEM_FIELD_VALID(unit, mem, OFFSET_VALIDf)) {
soc_mem_field32_set(unit, mem, &pri_ent[pri], OFFSET_VALIDf, 1);
}
}
entries[0] = &pri_ent;
rv = soc_profile_mem_add(unit, &oc->ing_service_pri_map,
(void *)entries, BCM_OAM_INTPRI_MAX,
&profile_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: service map init - Failed.\n")));
return (rv);
}
}
/* Egress Service Priority Map profile table initialization. */
soc_profile_mem_t_init(&oc->egr_service_pri_map);
entry_words = (sizeof(egr_service_pri_map_entry_t) / sizeof(uint32));
mem = EGR_SERVICE_PRI_MAPm;
rv = soc_profile_mem_create(unit, &mem, &entry_words, 1,
&oc->egr_service_pri_map);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Egr service map profile - Failed.\n")));
return (rv);
}
if (!SOC_WARM_BOOT(unit)) {
/*
* Initialize Egress priority map profile table.
* All priorities priorities map to priority:'0'.
*/
for (pri = 0; pri < BCM_OAM_INTPRI_MAX; pri++)
{
/* Clear ingress service pri map profile entry. */
sal_memcpy(&egr_pri_ent[pri], soc_mem_entry_null(unit, mem),
soc_mem_entry_words(unit, mem) * sizeof(uint32));
if (SOC_MEM_FIELD_VALID(unit, mem, OFFSET_VALIDf)) {
soc_mem_field32_set(unit, mem,
&egr_pri_ent[pri], OFFSET_VALIDf, 1);
}
}
entries[0] = &egr_pri_ent;
rv = soc_profile_mem_add(unit, &oc->egr_service_pri_map,
(void *)entries, BCM_OAM_INTPRI_MAX,
&profile_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Egress service map init - Failed.\n")));
return (rv);
}
}
/* OAM Opcode Control profile table initialization. */
soc_profile_mem_t_init(&oc->oam_opcode_control_profile);
entry_words = sizeof(oam_opcode_control_profile_entry_t)
/ sizeof(uint32);
mem = OAM_OPCODE_CONTROL_PROFILEm;
rv = soc_profile_mem_create(unit, &mem, &entry_words, 1,
&oc->oam_opcode_control_profile);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: opcode control profile - Failed.\n")));
return (rv);
}
if (!SOC_WARM_BOOT(unit)) {
/* Create default opcode control profile */
sal_memset(&opcode_entry, 0, sizeof(oam_opcode_control_profile_entry_t));
rv = _bcm_kt2_oam_opcode_profile_entry_init(unit, mem, &opcode_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Opcode profile init failed "
" %s.\n"), bcm_errmsg(rv)));
return (rv);
}
/* Add entry to profile table. */
entries[0] = &opcode_entry;
rv = soc_profile_mem_add(unit, &oc->oam_opcode_control_profile,
(void *)entries, 1, &profile_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Opcode profile table is full - %s.\n"),
bcm_errmsg(rv)));
return rv;
}
}
/* Egress OAM Opcode Control profile table initialization. */
soc_profile_mem_t_init(&oc->egr_oam_opcode_control_profile);
entry_words = sizeof(egr_oam_opcode_control_profile_entry_t)
/ sizeof(uint32);
mem = EGR_OAM_OPCODE_CONTROL_PROFILEm;
rv = soc_profile_mem_create(unit, &mem, &entry_words, 1,
&oc->egr_oam_opcode_control_profile);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Egress opcode control profile- Failed.\n")));
return (rv);
}
if (!SOC_WARM_BOOT(unit)) {
/* Create default opcode control profile */
sal_memset(&egr_opcode_entry, 0,
sizeof(egr_oam_opcode_control_profile_entry_t));
rv = _bcm_kt2_oam_opcode_profile_entry_init(unit, mem, &egr_opcode_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Opcode profile init failed "
" %s.\n"), bcm_errmsg(rv)));
return (rv);
}
/* Add entry to profile table. */
entries[0] = &egr_opcode_entry;
rv = soc_profile_mem_add(unit, &oc->egr_oam_opcode_control_profile,
(void *)entries, 1, &profile_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Egr Opcode profile table is full - %s.\n"),
bcm_errmsg(rv)));
return rv;
}
}
/* Ingress OAM dglp profile table initialisation */
soc_profile_mem_t_init(&oc->ing_oam_dglp_profile);
entry_words = sizeof(ing_oam_dglp_profile_entry_t) / sizeof(uint32);
mem = ING_OAM_DGLP_PROFILEm;
rv = soc_profile_mem_create(unit, &mem, &entry_words, 1,
&oc->ing_oam_dglp_profile);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: ING OAM DGLP profile create - Failed.\n")));
return (rv);
}
if (!SOC_WARM_BOOT(unit)) {
/* Add default profile */
BCM_IF_ERROR_RETURN(bcm_esw_stk_my_modid_get(unit, &modid));
/* put local CPU as default dglp */
dglp = CMIC_PORT(unit);
dglp |= ((modid << DGLP_MODULE_ID_SHIFT_BITS));
soc_mem_field32_set(unit, ING_OAM_DGLP_PROFILEm,
&ing_dglp_profile_entry, DGLPf, dglp);
entries[0] = &ing_dglp_profile_entry;
rv = soc_profile_mem_add(unit, &oc->ing_oam_dglp_profile,
(void *)entries, 1, &profile_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM DGLP profile table is full - %s.\n"),
bcm_errmsg(rv)));
return rv;
}
}
/* Egress OAM dglp profile table initialisation */
soc_profile_mem_t_init(&oc->egr_oam_dglp_profile);
entry_words = sizeof(egr_oam_dglp_profile_entry_t) / sizeof(uint32);
mem = EGR_OAM_DGLP_PROFILEm;
rv = soc_profile_mem_create(unit, &mem, &entry_words, 1,
&oc->egr_oam_dglp_profile);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EGR OAM DGLP profile create - Failed.\n")));
return (rv);
}
if (!SOC_WARM_BOOT(unit)) {
profile_index = 0;
soc_mem_field32_set(unit, EGR_OAM_DGLP_PROFILEm,
&egr_dglp_profile_entry, DGLPf, dglp);
entries[0] = &egr_dglp_profile_entry;
rv = soc_profile_mem_add(unit, &oc->egr_oam_dglp_profile,
(void *)entries, 1, &profile_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM DGLP profile table is full - %s.\n"),
bcm_errmsg(rv)));
return rv;
}
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_control_free
* Purpose:
* Free OAM control structure resources allocated by this unit.
* Parameters:
* unit - (IN) BCM unit number.
* oc - (IN) Pointer to OAM control structure.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_control_free(int unit, _bcm_oam_control_t *oc)
{
int status = 0;
_kt2_oam_control[unit] = NULL;
if (NULL == oc) {
/* Module already un-initialized. */
return (BCM_E_NONE);
}
/* Free protection mutex. */
if (NULL != oc->oc_lock) {
sal_mutex_destroy(oc->oc_lock);
}
/* Free hash data storage memory. */
if (NULL != oc->oam_hash_data) {
sal_free(oc->oam_hash_data);
}
/* Destory endpoint hash table. */
if (NULL != oc->ma_mep_htbl) {
status = shr_htb_destroy(&oc->ma_mep_htbl, NULL);
if (BCM_FAILURE(status)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"Freeing ma_mep_htbl failed\n")));
}
}
/* Destory group indices list. */
if (NULL != oc->group_pool) {
shr_idxres_list_destroy(oc->group_pool);
oc->group_pool = NULL;
}
/* Destroy endpoint indices list. */
if (NULL != oc->mep_pool) {
shr_idxres_list_destroy(oc->mep_pool);
oc->mep_pool = NULL;
}
/* Destroy local endpoint indices list. */
if (NULL != oc->lmep_pool) {
shr_idxres_list_destroy(oc->lmep_pool);
oc->lmep_pool = NULL;
}
/* Destroy remote endpoint indices list. */
if (NULL != oc->rmep_pool) {
shr_idxres_list_destroy(oc->rmep_pool);
oc->rmep_pool = NULL;
}
/* Destroy ingress and egress group indices list. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_ma_idx_pool_destroy(oc));
if (NULL != oc->egr_ma_idx_pool) {
shr_idxres_list_destroy(oc->egr_ma_idx_pool);
oc->egr_ma_idx_pool = NULL;
}
/* Destroy LM counter indices list. */
if (NULL != oc->ing_lm_ctr_pool[0]) {
shr_aidxres_list_destroy(oc->ing_lm_ctr_pool[0]);
oc->ing_lm_ctr_pool[0] = NULL;
}
if (NULL != oc->ing_lm_ctr_pool[1]) {
shr_aidxres_list_destroy(oc->ing_lm_ctr_pool[1]);
oc->ing_lm_ctr_pool[1] = NULL;
}
if (soc_feature(unit, soc_feature_bhh)) {
#if defined(INCLUDE_BHH)
/* Destroy bhh indices list. */
if (NULL != oc->bhh_pool) {
shr_idxres_list_destroy(oc->bhh_pool);
oc->bhh_pool = NULL;
}
if (NULL != oc->dma_buffer){
soc_cm_sfree(oc->unit, oc->dma_buffer);
oc->dma_buffer = NULL;
}
if (NULL != oc->dmabuf_reply){
soc_cm_sfree(oc->unit, oc->dmabuf_reply);
oc->dmabuf_reply = NULL;
}
if (NULL != oc->ing_lm_sec_mep_ctr_pool) {
shr_aidxres_list_destroy(oc->ing_lm_sec_mep_ctr_pool);
oc->ing_lm_sec_mep_ctr_pool = NULL;
}
#endif
}
if (soc_feature(unit, soc_feature_oam_pm)) {
#if defined (INCLUDE_BHH)
if (_BCM_KT2_OAM_PM_BHH_DATA_COLLECTION_MODE_ENABLED(oc)) {
soc_cm_sfree(oc->unit, oc->pm_bhh_dma_buffer);
}
#endif
}
/* Free group memory. */
if (NULL != oc->group_info) {
sal_free(oc->group_info);
}
/* Free RMEP H/w to logical index mapping memory. */
if (NULL != oc->remote_endpoints) {
sal_free(oc->remote_endpoints);
}
/* Destroy ingress serivce priority mapping profile. */
if (NULL != oc->ing_service_pri_map.tables) {
soc_profile_mem_destroy(unit, &oc->ing_service_pri_map);
}
/* Destroy Egress serivce priority mapping profile. */
if (NULL != oc->egr_service_pri_map.tables) {
soc_profile_mem_destroy(unit, &oc->egr_service_pri_map);
}
/* Destroy Ingress OAM opcode control profile. */
if (NULL != oc->oam_opcode_control_profile.tables) {
soc_profile_mem_destroy(unit, &oc->oam_opcode_control_profile);
}
/* Destroy Egress OAM opcode control profile. */
if (NULL != oc->egr_oam_opcode_control_profile.tables) {
soc_profile_mem_destroy(unit, &oc->egr_oam_opcode_control_profile);
}
/* Destroy Ingress DGLP profile */
if (NULL != oc->ing_oam_dglp_profile.tables) {
soc_profile_mem_destroy(unit, &oc->ing_oam_dglp_profile);
}
/* Destroy Egress DGLP profile */
if (NULL != oc->egr_oam_dglp_profile.tables) {
soc_profile_mem_destroy(unit, &oc->egr_oam_dglp_profile);
}
#if defined(INCLUDE_BHH)
if (soc_feature(unit, soc_feature_oam_pm)) {
if (kt2_pm_profile_control[unit]) {
sal_free(kt2_pm_profile_control[unit]);
kt2_pm_profile_control[unit] = NULL;
}
}
#endif /* INCLUDE_BHH */
/* Free OAM control structure memory. */
sal_free(oc);
oc = NULL;
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_handle_interrupt
* Purpose:
* Process OAM interrupts generated by endpoints.
* Parameters:
* unit - (IN) BCM unit number.
* field - (IN) fault field.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_handle_interrupt(int unit, soc_field_t field)
{
_bcm_oam_interrupt_t *intr; /* OAM interrupt. */
_bcm_oam_control_t *oc; /* OAM control structure. */
uint32 intr_rval; /* Interrupt register value. */
uint32 intr_cur_status; /* Interrupt status. */
uint32 flags; /* Interrupt flags. */
bcm_oam_group_t grp_index; /* MA group index. */
bcm_oam_endpoint_t remote_ep_index; /* Remote Endpoint index. */
int intr_multi; /* Event occured multiple times. */
int intr_count; /* No. of times event detected. */
int rv; /* Operation return status */
_bcm_oam_event_handler_t *e_handler; /* Pointer to Event handler. */
/* Get OAM control structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
/* Loop through the supported interrupts for this device. */
for (intr = _kt2_oam_interrupts; intr->status_register != INVALIDr;
intr++) {
rv = soc_reg32_get(unit, intr->status_register,
REG_PORT_ANY, 0, &intr_rval);
if (BCM_FAILURE(rv)) {
continue;
}
rv = soc_reg32_set(unit, intr->status_register,
REG_PORT_ANY, 0, 0);
if (BCM_FAILURE(rv)) {
continue;
}
/* Get status of interrupt from hardware. */
intr_cur_status = soc_reg_field_get(unit, intr->status_register,
intr_rval, VALIDf);
if (0 == intr_cur_status) {
/* This interrupt event is not valid, so continue. */
continue;
}
/* Get this interrupt event counter value. */
intr_count = oc->event_handler_cnt[intr->event_type];
/* Check if the interrupt is set. */
if ((1 == intr_cur_status) && (intr_count > 0)) {
flags = 0;
/* Get MA group index for this interrupt. */
if (INVALIDf != intr->group_index_field ) {
grp_index = soc_reg_field_get(unit, intr->status_register,
intr_rval,
intr->group_index_field);
} else {
/* Group not valid for this interrupt. */
grp_index = BCM_OAM_GROUP_INVALID;
}
/* Get H/w index of RMEP for this interrupt. */
if (INVALIDf != intr->endpoint_index_field ) {
remote_ep_index = soc_reg_field_get(unit, intr->status_register,
intr_rval,
intr->endpoint_index_field);
/* Get Logical index from H/w index for this RMEP. */
remote_ep_index = oc->remote_endpoints[remote_ep_index];
} else {
/* Endpoint not valid for this interrupt. */
remote_ep_index = BCM_OAM_ENDPOINT_INVALID;
}
/* Get interrupt MULTIf status. */
intr_multi = soc_reg_field_get(unit, intr->status_register,
intr_rval, MULTIf);
if (1 == intr_multi) {
/*
* Interrupt event asserted more than once.
* Set flags status bit to indicate event multiple occurance.
*/
flags |= BCM_OAM_EVENT_FLAGS_MULTIPLE;
}
/* Check and call all the handlers registerd for this event. */
for (e_handler = oc->event_handler_list_p; e_handler != NULL;
e_handler = e_handler->next_p) {
/* Check if an event handler is register for this event type. */
if (SHR_BITGET(e_handler->event_types.w, intr->event_type)) {
/* Call the event handler with the call back parameters. */
e_handler->cb(unit, flags, intr->event_type, grp_index,
remote_ep_index, e_handler->user_data);
}
}
}
}
_BCM_OAM_UNLOCK(oc);
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_events_unregister
* Purpose:
* Unregister all OAM events for this unit.
* Parameters:
* unit - (IN) BCM unit number.
* oc - (IN) Pointer to OAM control structure.
* Retruns:
* BCM_E_NONE
*/
STATIC int
_bcm_kt2_oam_events_unregister(int unit, _bcm_oam_control_t *oc)
{
_bcm_oam_event_handler_t *e_handler; /* Pointer to event handler list. */
_bcm_oam_event_handler_t *e_delete; /* Event handler to be freed. */
/* Control lock taken by the calling routine. */
e_handler = oc->event_handler_list_p;
while (e_handler != NULL) {
e_delete = e_handler;
e_handler = e_handler->next_p;
sal_free(e_delete);
}
oc->event_handler_list_p = NULL;
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_clear_rmep
* Purpose:
* Delete RMEP entry or Install fresh RMEP entry in the Hardware, clearing
* the faults.
*
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Pointer to Endpoint info associated with RMEP entry
* being cleared.
* valid - (IN) 0 => Delete the H/w entry
* 1 => Install fresh entry with faults cleared.
* Returns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_clear_rmep (int unit, _bcm_oam_hash_data_t *h_data_p, int valid)
{
rmep_entry_t rmep_entry; /* RMEP table entry. */
int rv = BCM_E_INTERNAL; /* Operation return status entry */
uint32 oam_cur_time; /* Current time in H/w state machine */
LOG_VERBOSE(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM: EP id %d, valid %d\n"),
h_data_p->remote_index, valid));
if (h_data_p == NULL) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM ERR: Arg h_data_p NULL check failed\n")));
return BCM_E_INTERNAL;
}
/* RMEP table programming. */
sal_memset(&rmep_entry, 0, sizeof(rmep_entry_t));
/* if valid==0 delete RMEP entry, else create a clean RMEP entry */
if (!(valid)) {
rv = WRITE_RMEPm(unit, MEM_BLOCK_ALL, h_data_p->remote_index,
&rmep_entry);
if (rv != BCM_E_NONE) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM ERR: Deleting RMEP entry failied\n")));
}
return (rv);
}
/* Set the MA group index. */
soc_RMEPm_field32_set(unit, &rmep_entry, MAID_INDEXf,
h_data_p->group_index);
/*
* The following steps are necessary to enable CCM timeout events
* without having received any CCMs.
*/
soc_RMEPm_field32_set(unit, &rmep_entry, RMEP_TIMESTAMP_VALIDf, 1);
BCM_IF_ERROR_RETURN(READ_OAM_CURRENT_TIMEr(unit, &oam_cur_time));
soc_RMEPm_field32_set(unit, &rmep_entry, RMEP_TIMESTAMPf, oam_cur_time);
soc_RMEPm_field32_set(unit, &rmep_entry, RMEP_RECEIVED_CCMf,
_bcm_kt2_oam_ccm_msecs_to_hw_encode(h_data_p->period));
/* End of timeout setup */
soc_RMEPm_field32_set(unit, &rmep_entry, VALIDf, 1);
rv = WRITE_RMEPm(unit, MEM_BLOCK_ALL, h_data_p->remote_index,
&rmep_entry);
if (rv != BCM_E_NONE) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM ERR: Clearing RMEP entry failied\n")));
return (rv);
}
return BCM_E_NONE;
}
/*
* Function:
* _bcm_kt2_oam_clear_ma_state
* Purpose:
* Delete MA_STATE entry or Install fresh MA_STATE entry in the Hardware,
* Clearing the faults.
* Parameters:
* unit - (IN) BCM device number
* group_info - (IN) Group info associated with MA_STATE entry being
* cleared.
* index - (IN) H/w index of MA_STATE entry to be modified.
* valid - (IN) 0 => Delete the H/w entry
* 1 => Install fresh entry with faults cleared.
* Returns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_clear_ma_state(int unit, _bcm_oam_group_data_t *group_info,
int index, int valid)
{
ma_state_entry_t ma_state_entry; /* MA State table entry. */
LOG_VERBOSE(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM *group_info %p, index %d, valid %d\n"),
group_info, index, valid));
if (group_info == NULL) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM ERR: Arg group_info NULL check failed\n")));
return BCM_E_INTERNAL;
}
sal_memset(&ma_state_entry, 0, sizeof(ma_state_entry_t));
/* Mark the entry as valid. */
soc_MA_STATEm_field32_set(unit, &ma_state_entry, VALIDf, valid);
/* Set the lowest alarm priority info. */
if (valid) {
soc_MA_STATEm_field32_set(unit, &ma_state_entry, LOWESTALARMPRIf,
group_info->lowest_alarm_priority);
}
/* Write group information to hardware table. */
SOC_IF_ERROR_RETURN(WRITE_MA_STATEm(unit, MEM_BLOCK_ALL, index,
&ma_state_entry));
return BCM_E_NONE;
}
/*
* Function:
* _bcm_kt2_oam_group_recreate
* Purpose:
* Recreate MA_STATE and RMEP entries, due to current H/w limitation
* Currently there is a race condition between H/w and Software
* When RMEP is deleted and if it has any faults then,
* MA_STATE remote fault counters need to be decremented by software.
* However it may so happen that after reading RMEP faults and just
* before RMEP deletion, the fault gets cleared and H/w already decremented
* the MA_STATE table, now when software decrements the MA_STATE it would
* get MA_STATE table into an inconsistent state.
* As a solution we always recreate the entire group on a RMEP deletion.
* Parameters:
* unit - (IN) BCM device number
* group_id - (IN) Group Index,
* as per current implementation this is same as MA_STATE index.
* Returns:
* BCM_E_XXX
* Expects:
* OAM LOCK
*/
STATIC int
_bcm_kt2_oam_group_recreate(int unit, int index)
{
_bcm_oam_control_t *oc; /* OAM control structure. */
_bcm_oam_group_data_t *group_info; /* OAM group info */
_bcm_oam_ep_list_t *cur_ep_ptr = NULL; /* Pointer to group's EP list */
/* entry. */
int rv = BCM_E_NONE; /* Operation return status. */
/* Get OAM control structure handle. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Get the handle to group info */
group_info = &(oc->group_info[index]);
/*if unused Group, just clear the MA_STATE index, including the valid bit*/
if (!(group_info->in_use)) {
LOG_WARN(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM, WARN: Recieved group recreate request for "
"unused Group Id %d\n"), index));
rv = _bcm_kt2_oam_clear_ma_state(unit, group_info, index, 0);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MA_STATE clear failed group id %d - "
"%s.\n"), index, bcm_errmsg(rv)));
return rv;
}
}
/* 1. Traverse and delete the RMEPs in the group */
if (group_info->ep_list != NULL) {
cur_ep_ptr = *(group_info->ep_list);
}
while (cur_ep_ptr) {
if (cur_ep_ptr->ep_data_p->is_remote && cur_ep_ptr->ep_data_p->in_use) {
rv = _bcm_kt2_oam_clear_rmep(unit, cur_ep_ptr->ep_data_p, 0);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: RMEP delete failed Ep id %d - "
"%s.\n"), cur_ep_ptr->ep_data_p->ep_id, bcm_errmsg(rv)));
return rv;
}
}
cur_ep_ptr = cur_ep_ptr->next;
}
/* 2. Clear the MA_STATE table faults */
rv = _bcm_kt2_oam_clear_ma_state(unit, group_info, index, 1);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MA_STATE clear failed group id %d - "
"%s.\n"), index, bcm_errmsg(rv)));
return rv;
}
/* 3. Recreate the RMEP table entries. Clearing the faults. */
if (group_info->ep_list != NULL) {
cur_ep_ptr = *(group_info->ep_list);
}
while (cur_ep_ptr) {
if (cur_ep_ptr->ep_data_p->is_remote && cur_ep_ptr->ep_data_p->in_use) {
rv = _bcm_kt2_oam_clear_rmep(unit, cur_ep_ptr->ep_data_p, 1);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: RMEP clear failed EP id %d - "
"%s.\n"), cur_ep_ptr->ep_data_p->ep_id, bcm_errmsg(rv)));
return rv;
}
}
cur_ep_ptr = cur_ep_ptr->next;
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_ser_handler
* Purpose:
* Soft Error Correction routine for MA_STATE and RMEP tables.
* Parameters:
* unit - (IN) BCM device number
* mem - (IN) Memory MA_STATE or RMEP.
* index - (IN) Memory Index.
* Returns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_ser_handler(int unit, soc_mem_t mem, int index)
{
int rv = BCM_E_NONE; /* Operation return status. */
_bcm_oam_control_t *oc = NULL; /* OAM control structure. */
bcm_oam_endpoint_t remote_ep_index; /* Remote Endpoint index. */
_bcm_oam_hash_data_t *ep_data = NULL; /* Remote Endpoint hash data */
LOG_VERBOSE(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM SER on mem %s, index %d\n"),
SOC_MEM_NAME(unit, mem), index));
/* Get OAM control structure handle. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
switch(mem){
case MA_STATEm:
_BCM_OAM_GROUP_INDEX_VALIDATE(index);
_BCM_OAM_LOCK(oc);
rv = _bcm_kt2_oam_group_recreate(unit, index);
_BCM_OAM_UNLOCK(oc);
break;
case RMEPm:
_BCM_OAM_RMEP_INDEX_VALIDATE(index);
_BCM_OAM_LOCK(oc);
/* Get the logical index from H/w RMEP index */
remote_ep_index = oc->remote_endpoints[index];
if(remote_ep_index == BCM_OAM_ENDPOINT_INVALID) {
/* If endpoint not in use, just clear the RMEP entry in memory */
rmep_entry_t rmep_entry;
sal_memset(&rmep_entry, 0, sizeof(rmep_entry_t));
LOG_WARN(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d), WARN: Recieved Parity Error on"
"index %d & invalid Remote Id\n"), unit, index));
/* Just clear this entry including valid bit */
rv = WRITE_RMEPm(unit, MEM_BLOCK_ALL, index,
&rmep_entry);
} else {
/* Get handle to Hash data */
ep_data = &(oc->oam_hash_data[remote_ep_index]);
if (!(ep_data->in_use)) {
/* If endpoint not in use, just clear the RMEP entry in memory */
LOG_WARN(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM, WARN: Recieved Parity Error on"
"unused Remote Id %d\n"), remote_ep_index));
/* Just clear this entry including valid bit */
rv = _bcm_kt2_oam_clear_rmep(unit, ep_data, 0);
} else {
/* Get the group Id from EP data, call group recreate routine */
rv = _bcm_kt2_oam_group_recreate(unit, ep_data->group_index);
}
}
_BCM_OAM_UNLOCK(oc);
break;
default:
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM, ERR: Invalid mem in OAM SER correction "
"routine %s\n"), SOC_MEM_NAME(unit, mem)));
return BCM_E_INTERNAL;
}
LOG_VERBOSE(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM SER completed on mem %s, index %d, rv %d\n"),
SOC_MEM_NAME(unit, mem), index, rv));
return rv;
}
/*
* Function:
* _bcm_kt2_oam_group_name_mangle
* Purpose:
* Build tbe group name for hardware table write.
* Parameters:
* name_p - (IN) OAM group name.
* mangled_name_p - (IN/OUT) Buffer to write the group name in hardware
* format.
* Retruns:
* None
*/
STATIC void
_bcm_kt2_oam_group_name_mangle(uint8 *name_p,
uint8 *mangled_name_p)
{
uint8 *byte_p; /* Pointer to group name buffer. */
int bytes_left; /* Number of bytes left in group name. */
bytes_left = BCM_OAM_GROUP_NAME_LENGTH;
byte_p = (name_p + BCM_OAM_GROUP_NAME_LENGTH - 1);
while (bytes_left > 0) {
*mangled_name_p = *byte_p;
++mangled_name_p;
--byte_p;
--bytes_left;
}
}
/*
* Function:
* _bcm_kt2_oam_read_clear_faults
* Purpose:
* Clear OAM group and endpoint faults on hardware table read operation.
* Parameters:
* unit - (IN) BCM device number
* index - (IN) Group/Endpoint hardware table index
* mem - (IN) Memory/table value
* entry - (IN) Pointer to group/endpoint entry
* ma_mep - (IN) Pointer to group/endpoint info structure
* Returns:
* BCM_E_NONE - No errors.
* BCM_E_XXX - Otherwise.
*/
STATIC int
_bcm_kt2_oam_read_clear_faults(int unit, int index, soc_mem_t mem,
uint32 *entry, void *ma_rmep)
{
bcm_oam_group_info_t *group_info_p; /* Pointer to group info */
/* structure. */
bcm_oam_endpoint_info_t *ep_info_p; /* Pointer to endpoint info */
/* structure. */
_bcm_oam_fault_t *faults_list; /* Pointer to faults list. */
uint32 *faults; /* Faults flag bits. */
uint32 *p_faults; /* Persistent faults bits. */
uint32 clear_p_faults; /* Clear persistent faults bits. */
uint32 rval = 0; /* Hardware register value. */
uint32 clear_mask = 0; /* Mask to clear persistent faults*/
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_read_clear_faults index=%d "
"Table=%d.\n"), index, mem));
/* Switch on memory name. */
switch (mem) {
/* OAM group state table. */
case MA_STATEm:
/* Set the pointer to the start of the group faults array. */
faults_list = _kt2_oam_group_faults;
/* Typecast to group information structure pointer. */
group_info_p = (bcm_oam_group_info_t *) ma_rmep;
/* Get group faults information. */
faults = &group_info_p->faults;
p_faults = &group_info_p->persistent_faults;
clear_p_faults = group_info_p->clear_persistent_faults;
break;
/* OAM remote endpoint table. */
case RMEPm:
faults_list = _kt2_oam_endpoint_faults;
ep_info_p = (bcm_oam_endpoint_info_t *) ma_rmep;
faults = &ep_info_p->faults;
p_faults = &ep_info_p->persistent_faults;
clear_p_faults = ep_info_p->clear_persistent_faults;
break;
default:
return (BCM_E_NONE);
}
/* Loop on list of valid faults. */
for (;faults_list->mask != 0; ++faults_list) {
/* Get faults status. */
if (0 != soc_mem_field32_get(unit, mem, entry,
faults_list->current_field)) {
*faults |= faults_list->mask;
}
/* Get sticky faults status. */
if (0 != soc_mem_field32_get(unit, mem, entry,
faults_list->sticky_field)) {
*p_faults |= faults_list->mask;
/*
* If user has request to clear persistent faults,
* then set BITS_TO_CLEARf field in the register buffer.
*/
if (clear_p_faults) {
clear_mask |= faults_list->clear_sticky_mask;
}
}
}
/* Check if faults need to be cleared. */
if (clear_mask && clear_p_faults) {
LOG_VERBOSE(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM: clear_mask %d.\n"),
clear_mask));
soc_reg_field_set(unit, CCM_READ_CONTROLr, &rval, BITS_TO_CLEARf,
clear_mask);
/* Enable clearing of faults. */
soc_reg_field_set(unit, CCM_READ_CONTROLr, &rval, ENABLE_CLEARf, 1);
if (MA_STATEm == mem) {
soc_reg_field_set(unit, CCM_READ_CONTROLr, &rval, MEMORYf, 0);
} else {
soc_reg_field_set(unit, CCM_READ_CONTROLr, &rval, MEMORYf, 1);
}
soc_reg_field_set(unit, CCM_READ_CONTROLr, &rval, INDEXf, index);
/* Update read control register. */
BCM_IF_ERROR_RETURN(WRITE_CCM_READ_CONTROLr(unit, rval));
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_get_group
* Purpose:
* Get OAM group information.
* Parameters:
* unit - (IN) BCM device number
* group_index - (IN) Group hardware table index
* group - (IN) Pointer to group array list
* group_info - (IN/OUT) Pointer to group info structure
* Returns:
* BCM_E_NONE - No errors.
* BCM_E_XXX - Otherwise.
*/
STATIC int
_bcm_kt2_oam_get_group(int unit, bcm_oam_group_t group_index,
_bcm_oam_group_data_t *group_p,
bcm_oam_group_info_t *group_info)
{
maid_reduction_entry_t maid_reduction_entry; /* MAID reduction entry. */
ma_state_entry_t ma_state_entry; /* MA_STATE table entry. */
int rv; /* Operation return status. */
group_info->id = group_index;
BCM_IF_ERROR_RETURN(READ_MA_STATEm(unit, MEM_BLOCK_ANY,
group_index,
&ma_state_entry));
group_info->lowest_alarm_priority = soc_MA_STATEm_field32_get
(unit, &ma_state_entry,
LOWESTALARMPRIf);
rv = _bcm_kt2_oam_read_clear_faults(unit, group_index,
MA_STATEm,
(uint32 *) &ma_state_entry,
group_info);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Clean Faults Group ID=%d- Failed.\n"),
group_index));
return (rv);
}
if (group_info->flags & BCM_OAM_GROUP_GET_FAULTS_ONLY) {
group_info->flags &= ~BCM_OAM_GROUP_GET_FAULTS_ONLY;
return BCM_E_NONE;
}
sal_memcpy(group_info->name, group_p[group_index].name,
BCM_OAM_GROUP_NAME_LENGTH);
BCM_IF_ERROR_RETURN(READ_MAID_REDUCTIONm(unit, MEM_BLOCK_ANY,
group_index,
&maid_reduction_entry));
if (1 == soc_MAID_REDUCTIONm_field32_get(unit, &maid_reduction_entry,
SW_RDIf)) {
group_info->flags |= BCM_OAM_GROUP_REMOTE_DEFECT_TX;
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_group_ep_list_add
* Purpose:
* Add an endpoint to a group endpoint linked list.
* Parameters:
* unit - (IN) BCM device number
* group_id - (IN) OAM group ID.
* ep_id - (IN) OAM endpoint ID.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_group_ep_list_add(int unit,
bcm_oam_group_t group_id,
bcm_oam_endpoint_t ep_id)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
_bcm_oam_group_data_t *group_p; /* Pointer to group data. */
_bcm_oam_hash_data_t *h_data_p; /* Pointer to endpoint hash data. */
_bcm_oam_ep_list_t *ep_list_p = NULL; /* Pointer to endpoint list. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
group_p = &oc->group_info[group_id];
/* coverity[dead_error_condition] */
if (NULL == group_p) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group data access for GID=%d failed"
/* coverity[dead_error_begin] */
" %s.\n"), group_id, bcm_errmsg(BCM_E_INTERNAL)));
return (BCM_E_INTERNAL);
}
h_data_p = &oc->oam_hash_data[ep_id];
/* coverity[dead_error_condition] */
if (NULL == h_data_p) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint data access for EP=%d failed"
/* coverity[dead_error_begin] */
" %s.\n"), ep_id, bcm_errmsg(BCM_E_INTERNAL)));
return (BCM_E_INTERNAL);
}
_BCM_OAM_ALLOC(ep_list_p, _bcm_oam_ep_list_t, sizeof(_bcm_oam_ep_list_t),
"EP list");
if (NULL == ep_list_p) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint list alloc for EP=%d failed"
" %s.\n"), ep_id, bcm_errmsg(BCM_E_MEMORY)));
return (BCM_E_MEMORY);
}
ep_list_p->prev = NULL;
ep_list_p->ep_data_p = h_data_p;
if (NULL == (*group_p->ep_list)) {
/* Add endpoint as head node. */
ep_list_p->next = NULL;
*group_p->ep_list = ep_list_p;
} else {
/* Add the endpoint to the linked list. */
ep_list_p->next = *group_p->ep_list;
(*group_p->ep_list)->prev = ep_list_p;
*group_p->ep_list = ep_list_p;
}
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_group_ep_list_add"
" (GID=%d) (EP=%d).\n"), group_id, ep_id));
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_group_ep_list_remove
* Purpose:
* Remove an endpoint to a group endpoint linked list.
* Parameters:
* unit - (IN) BCM device number
* group_id - (IN) OAM group ID.
* ep_id - (IN) OAM endpoint ID.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_group_ep_list_remove(int unit,
bcm_oam_group_t group_id,
bcm_oam_endpoint_t ep_id)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
_bcm_oam_group_data_t *group_p; /* Pointer to group data. */
_bcm_oam_hash_data_t *h_data_p; /* Pointer to endpoint hash data. */
_bcm_oam_ep_list_t *cur; /* Current endpoint node pointer. */
_bcm_oam_ep_list_t *del_node; /* Pointer to a node to be deleted. */
/* Control lock already taken by calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
group_p = &oc->group_info[group_id];
cur = *group_p->ep_list;
if (NULL == cur) {
/* No endpoints to remove from this group. */
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: No endpoints to delete in group"
" GID:%d.\n"), group_id));
return (BCM_E_NONE);
}
/* Check if head node needs to be deleated. */
if (ep_id == cur->ep_data_p->ep_id) {
/* Delete head node. */
del_node = *group_p->ep_list;
if ((*group_p->ep_list)->next) {
*group_p->ep_list = (*group_p->ep_list)->next;
(*group_p->ep_list)->prev = NULL;
} else {
*group_p->ep_list = NULL;
}
sal_free(del_node);
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: Head node delete GID=%d - Success\n"),
group_id));
return (BCM_E_NONE);
}
/* Traverse the list and delete the matching node. */
while (NULL != cur->next->next) {
h_data_p = cur->next->ep_data_p;
if (NULL == h_data_p) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group=%d endpoints access failed -"
" %s.\n"), group_id, bcm_errmsg(BCM_E_INTERNAL)));
return (BCM_E_INTERNAL);
}
if (ep_id == h_data_p->ep_id) {
del_node = cur->next;
cur->next = del_node->next;
del_node->next->prev = cur;
sal_free(del_node);
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: Node delete GID=%d - Success\n"),
group_id));
return (BCM_E_NONE);
}
cur = cur->next;
}
/* Check if tail node needs to be deleted. */
h_data_p = cur->next->ep_data_p;
if (ep_id == h_data_p->ep_id) {
/* Delete tail node. */
del_node = cur->next;
cur->next = NULL;
sal_free(del_node);
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: Tail node delete GID=%d - Success\n"),
group_id));
return (BCM_E_NONE);
}
return (BCM_E_NOT_FOUND);
}
/*
* Function:
* _bcm_oam_kt2_remote_mep_hw_set
* Purpose:
* Configure hardware tables for a remote endpoint.
* Parameters:
* unit - (IN) BCM device number
* ep_info_p - (IN) Pointer to remote endpoint information.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_oam_kt2_remote_mep_hw_set(int unit,
const bcm_oam_endpoint_info_t *ep_info_p)
{
l3_entry_1_entry_t l3_entry; /* Remote view entry. */
rmep_entry_t rmep_entry; /* Remote MEP entry. */
uint32 oam_cur_time; /* Current time. */
_bcm_oam_hash_data_t *h_data_p = NULL; /* Hash data pointer. */
int rv; /* Operation return status. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
const bcm_oam_endpoint_info_t *ep_p; /* Pointer to endpoint info. */
if (NULL == ep_info_p) {
return (BCM_E_INTERNAL);
}
/* Initialize local endpoint info pointer. */
ep_p = ep_info_p;
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Get endpoint hash data pointer. */
h_data_p = &oc->oam_hash_data[ep_p->id];
if (0 == h_data_p->in_use) {
return (BCM_E_INTERNAL);
}
/* RMEP table programming. */
sal_memset(&rmep_entry, 0, sizeof(rmep_entry_t));
/* Set the MA group index. */
soc_RMEPm_field32_set(unit, &rmep_entry, MAID_INDEXf,
ep_p->group);
/*
* The following steps are necessary to enable CCM timeout events
* without having received any CCMs.
*/
soc_RMEPm_field32_set(unit, &rmep_entry,
RMEP_TIMESTAMP_VALIDf, 1);
BCM_IF_ERROR_RETURN
(READ_OAM_CURRENT_TIMEr(unit, &oam_cur_time));
soc_RMEPm_field32_set(unit, &rmep_entry, RMEP_TIMESTAMPf,
oam_cur_time);
soc_RMEPm_field32_set(unit, &rmep_entry,
RMEP_RECEIVED_CCMf,
_bcm_kt2_oam_ccm_msecs_to_hw_encode(h_data_p->period));
/* End of timeout setup */
soc_RMEPm_field32_set(unit, &rmep_entry, VALIDf, 1);
rv = WRITE_RMEPm(unit, MEM_BLOCK_ALL, h_data_p->remote_index,
&rmep_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: RMEP table write failed EP=%d %s.\n"),
ep_info_p->id, bcm_errmsg(rv)));
return (rv);
}
/* L3 unicast table programming. */
sal_memset(&l3_entry, 0, sizeof(l3_entry_1_entry_t));
/* Set the CCM interval. */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set
(unit, &l3_entry, RMEP__CCMf,
_bcm_kt2_oam_ccm_msecs_to_hw_encode(h_data_p->period));
/* Set the entry hardware index. */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, &l3_entry, RMEP__RMEP_PTRf,
h_data_p->remote_index);
/*
* Construct endpoint RMEP view key for L3 Table entry
* insert operation.
*/
_bcm_kt2_oam_rmep_key_construct(unit, h_data_p, &l3_entry);
/* Mark the entry as valid. */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, &l3_entry, VALIDf, 1);
/* Install entry in hardware. */
rv = soc_mem_insert(unit, L3_ENTRY_IPV4_UNICASTm,
MEM_BLOCK_ALL, &l3_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: L3 table insert failed EP=%d %s.\n"),
ep_p->id, bcm_errmsg(rv)));
return (rv);
}
/* Add the H/w index to logical index mapping for RMEP */
oc->remote_endpoints[h_data_p->remote_index] = ep_p->id;
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_find_egr_lmep
* Purpose:
* Search EGR_MP_GROUP table and return the match entry hardware index and
* match entry data.
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Pointer to endpoint hash data.
* entry_idx - (OUT) Pointer to match entry hardware index.
* mp_group_entry_p - (OUT) Pointer to match entry data.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_find_egr_lmep(int unit, const _bcm_oam_hash_data_t *h_data_p,
int *entry_idx,
egr_mp_group_entry_t *mp_group_entry_p)
{
egr_mp_group_entry_t mp_group_entry; /* MP group table
entry to search. */
int rv; /* Operation return status. */
if (NULL == h_data_p || NULL == entry_idx || NULL == mp_group_entry_p) {
return (BCM_E_INTERNAL);
}
sal_memset(&mp_group_entry, 0, sizeof(egr_mp_group_entry_t));
/* Construct endpoint egress MP group Table entry search operation. */
_bcm_oam_egr_lmep_key_construct(unit, h_data_p, &mp_group_entry);
soc_mem_lock(unit, EGR_MP_GROUPm);
/* Perform the search in EGR_MP_GROUP table. */
rv = soc_mem_search(unit, EGR_MP_GROUPm, MEM_BLOCK_ANY, entry_idx,
&mp_group_entry, mp_group_entry_p, 0);
soc_mem_unlock(unit, EGR_MP_GROUPm);
if (BCM_FAILURE(rv)) {
LOG_VERBOSE(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Egr MP group entry lookup "
"failed vlan=%d port =%x %s.\n"),
h_data_p->vlan, h_data_p->dglp, bcm_errmsg(rv)));
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_find_lmep
* Purpose:
* Search LMEP view table and return the match entry hardware index and
* match entry data.
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Pointer to endpoint hash data.
* entry_idx - (OUT) Pointer to match entry hardware index.
* l3_entry_p - (OUT) Pointer to match entry data.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_find_lmep(int unit, const _bcm_oam_hash_data_t *h_data_p,
int *entry_idx,
l3_entry_1_entry_t *l3_entry_p)
{
l3_entry_1_entry_t l3_entry; /* L3 entry to search. */
int rv; /* Operation return status. */
if (NULL == h_data_p || NULL == entry_idx || NULL == l3_entry_p) {
return (BCM_E_INTERNAL);
}
sal_memset(&l3_entry, 0, sizeof(l3_entry_1_entry_t));
/* Construct endpoint LMEP view key for L3 Table entry search operation. */
_bcm_kt2_oam_lmep_key_construct(unit, h_data_p, &l3_entry);
/* Take L3 module protection mutex to block any updates. */
L3_LOCK(unit);
/* Perform the search in L3_ENTRY table. */
rv = soc_mem_search(unit, L3_ENTRY_IPV4_UNICASTm, MEM_BLOCK_ANY, entry_idx,
&l3_entry, l3_entry_p, 0);
if (BCM_FAILURE(rv)) {
LOG_VERBOSE(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: L3 entry lookup vlan=%d port=%x %s.\n"),
h_data_p->vlan, h_data_p->sglp, bcm_errmsg(rv)));
}
/* Release L3 module protection mutex. */
L3_UNLOCK(unit);
return (rv);
}
STATIC int
_bcm_kt2_oam_find_port_lmep(int unit, const _bcm_oam_hash_data_t *h_data_p,
int *stm_index, source_trunk_map_table_entry_t *stm_entry,
uint8 *mdl)
{
port_tab_entry_t port_entry;
int rv = BCM_E_NONE;
int port_id, mod_id;
rv = soc_mem_read(unit, PORT_TABm, MEM_BLOCK_ANY,
h_data_p->src_pp_port, &port_entry);
if (BCM_FAILURE(rv)) {
return rv;
}
*mdl = soc_PORT_TABm_field32_get(unit, &port_entry, MDL_BITMAPf);
if (*mdl == 0) {
/* No port mep enabled already on this port */
return BCM_E_NOT_FOUND;
}
port_id = _BCM_OAM_GLP_PORT_GET(h_data_p->sglp);
mod_id = _BCM_OAM_GLP_MODULE_ID_GET(h_data_p->sglp);
/* Derive index to SOURCE_TRUNK_MAP tbl based on module ID and port*/
BCM_IF_ERROR_RETURN(_bcm_esw_src_mod_port_table_index_get(unit, mod_id,
port_id, stm_index));
BCM_IF_ERROR_RETURN(READ_SOURCE_TRUNK_MAP_TABLEm(unit, MEM_BLOCK_ANY,
*stm_index, stm_entry));
return BCM_E_NONE;
}
STATIC
int _bcm_kt2_oam_lm_counters_mems_from_pool_get(int pool_id, soc_mem_t *mems)
{
switch(pool_id) {
case 0:
mems[0] = OAM_LM_COUNTERS_0m;
mems[1] = EGR_OAM_LM_COUNTERS_0m;
break;
case 1:
mems[0] = OAM_LM_COUNTERS_1m;
mems[1] = EGR_OAM_LM_COUNTERS_1m;
break;
default:
/* should not happen */
return BCM_E_INTERNAL;
break;
}
return BCM_E_NONE;
}
STATIC int
_bcm_kt2_oam_lm_counters_hw_values_init(int unit, int pool_id, int ctr_index)
{
oam_lm_counters_0_entry_t entry;
/* Ingress and egress. 2 memories */
soc_mem_t mems[2] = {INVALIDm, INVALIDm};
int index, entry_offset;
BCM_IF_ERROR_RETURN(
_bcm_kt2_oam_lm_counters_mems_from_pool_get(pool_id, mems));
for (index = 0; index < 2; index++) {
sal_memset(&entry, 0, sizeof(entry));
/* 8 entries allocated per endpoint */
for(entry_offset = 0; entry_offset < 8; entry_offset++) {
BCM_IF_ERROR_RETURN(soc_mem_write(unit, mems[index], MEM_BLOCK_ALL,
ctr_index + entry_offset, &entry));
}
}
return BCM_E_NONE;
}
STATIC int
_bcm_kt2_oam_lm_counters_hw_values_reset(int unit, int pool_id, int index)
{
return _bcm_kt2_oam_lm_counters_hw_values_init(unit, pool_id, index);
}
/*
* Function:
* _bcm_kt2_oam_free_counter
* Purpose:
* Free counter from counter pool.
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_free_counter(int unit,
_bcm_oam_hash_data_t *hash_data)
{
int rv = BCM_E_NONE;/* Operation return status. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
int pool_id = 0;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
if (hash_data->rx_ctr != _BCM_OAM_INVALID_INDEX) {
pool_id = (hash_data->rx_ctr >> 24);
rv = shr_aidxres_list_free(oc->ing_lm_ctr_pool[pool_id],
(hash_data->rx_ctr & 0xffffff));
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_lm_counters_hw_values_reset(unit, pool_id,
hash_data->rx_ctr & 0xffffff));
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block "
"free failed (EP=%d) - %s.\n"),
hash_data->ep_id, bcm_errmsg(rv)));
return (rv);
}
hash_data->rx_ctr = _BCM_OAM_INVALID_INDEX;
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_alloc_counter
* Purpose:
* Allocate counter from counter pool.
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_alloc_counter(int unit, int pool_id,
shr_idxres_element_t *ctr_index)
{
int rv; /* Operation return status. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Allocate 8 consecutive couters from the pool */
rv = shr_aidxres_list_alloc_block(oc->ing_lm_ctr_pool[pool_id],
8, ctr_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block alloic failed "
"- %s.\n"), bcm_errmsg(rv)));
return (rv);
}
return (BCM_E_NONE);
}
#if defined(INCLUDE_BHH)
/*
* Function:
* _bcm_kt2_oam_bhh_sec_mep_alloc_counter
* Purpose:
* Allocate counter from counter pool.
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_bhh_sec_mep_alloc_counter(int unit, shr_idxres_element_t *ctr_index)
{
int rv; /* Operation return status. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Allocate 8 consecutive couters from the pool */
rv = shr_aidxres_list_alloc_block(oc->ing_lm_sec_mep_ctr_pool,
8, ctr_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block alloc failed "
"- %s.\n"), bcm_errmsg(rv)));
return (rv);
}
return (BCM_E_NONE);
}
#endif
/*
* Function:
* _bcm_kt2_oam_counter_set
* Purpose:
* Set Rx counter for MEP
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_counter_set(int unit, soc_mem_t mem,
uint32 *entry,
shr_idxres_element_t *ctr_index,
int *pool_id)
{
_bcm_oam_control_t *oc; /* Pointer to control structure. */
soc_field_t ctr1_field;
soc_field_t ctr2_field;
int ctr1_valid = 0;
int ctr2_valid = 0;
int rv = BCM_E_NONE;/* Operation return status. */
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
if (mem == L3_ENTRY_IPV4_UNICASTm) {
ctr1_field = LMEP__CTR1_VALIDf;
ctr2_field = LMEP__CTR2_VALIDf;
} else {
ctr1_field = CTR1_VALIDf;
ctr2_field = CTR2_VALIDf;
}
ctr1_valid = soc_mem_field32_get(unit, mem,
(uint32 *)entry, ctr1_field);
ctr2_valid = soc_mem_field32_get(unit, mem,
(uint32 *)entry, ctr2_field);
/* if both counters are already used, we can't enable LM on this endpoint */
if ((1 == ctr1_valid) && (1 == ctr2_valid)) {
rv = BCM_E_RESOURCE;
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: 2 counters are already "
"allocated for EP on this service interface"
"%s.\n"), bcm_errmsg(rv)));
return (rv);
} else if ((0 == ctr1_valid) && (1 == ctr2_valid)) {
*pool_id = 0;
rv = _bcm_kt2_oam_alloc_counter(unit, *pool_id, ctr_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block alloc - %s.\n"),
bcm_errmsg(rv)));
return (rv);
}
ctr1_valid = 1;
} else if ((0 == ctr2_valid) && (1 == ctr1_valid)) {
*pool_id = 1;
rv = _bcm_kt2_oam_alloc_counter(unit, *pool_id, ctr_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block alloc - %s.\n"),
bcm_errmsg(rv)));
return (rv);
}
ctr2_valid = 1;
} else {
*pool_id = 0;
rv = _bcm_kt2_oam_alloc_counter(unit, *pool_id, ctr_index);
if (BCM_FAILURE(rv)) {
*pool_id = 1;
rv = _bcm_kt2_oam_alloc_counter(unit, *pool_id, ctr_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block alloc - %s.\n"),
bcm_errmsg(rv)));
return (rv);
}
}
ctr1_valid = 1;
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_service_pri_profile_add
* Purpose:
* Create ING_SERVICE_PRI_MAP or EGR_SERVICE_PRI_MAP profile entry
* Parameters:
* unit - (IN) BCM device number
* egr - (IN) Ingress/egess profile
* ep_info_p - (IN) Pointer to remote endpoint information.
* profile_index - (IN) Index of the profile entry created
* Retruns:
* BCM_E_XXX
*/
int
_bcm_kt2_oam_service_pri_profile_add(int unit, int egr,
bcm_oam_endpoint_info_t *endpoint_info,
uint32 *profile_index)
{
int rv = BCM_E_NONE;
int i = 0;
void *entries[1];
_bcm_oam_control_t *oc; /* Pointer to control structure. */
soc_mem_t mem = ING_SERVICE_PRI_MAPm;
uint32 mem_entries[BCM_OAM_INTPRI_MAX];
soc_profile_mem_t *profile; /* profile to be used ingress or egress */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
if (egr) {
mem = EGR_SERVICE_PRI_MAPm;
profile = &oc->egr_service_pri_map;
} else {
profile = &oc->ing_service_pri_map;
}
for (i = 0; i < BCM_OAM_INTPRI_MAX; i++) {
if (endpoint_info->pri_map[i] > _BCM_OAM_SERVICE_PRI_MAX_OFFSET) {
return BCM_E_PARAM;
}
mem_entries[i] = endpoint_info->pri_map[i];
if (SOC_MEM_FIELD_VALID(unit, mem, OFFSET_VALIDf)) {
soc_mem_field32_set(unit, mem, &mem_entries[i], OFFSET_VALIDf, 1);
}
}
soc_mem_lock(unit, mem);
entries[0] = &mem_entries;
rv = soc_profile_mem_add(unit, profile, (void *) &entries,
BCM_OAM_INTPRI_MAX, profile_index);
if (BCM_FAILURE(rv)) {
soc_mem_unlock(unit, mem);
return rv;
}
*profile_index = ((*profile_index)/BCM_OAM_INTPRI_MAX);
soc_mem_unlock(unit, mem);
return rv;
}
/*
* Function:
* _bcm_kt2_oam_lmep_counters_set
* Purpose:
* Get Tx counters for LMEP
* Parameters:
* unit - (IN) BCM device number
* ep_info_p - (IN) Pointer to remote endpoint information.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_lmep_counters_set(int unit,
bcm_oam_endpoint_info_t *ep_info_p)
{
_bcm_oam_hash_data_t *hash_data; /* Pointer to endpoint hash data. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
l3_entry_1_entry_t l3_entry; /* L3 entry buffer. */
int l3_index = -1; /* L3 entry hardware index. */
shr_idxres_element_t ctr_index = 0;
int pool_id = 0;
egr_mp_group_entry_t egr_mp_group; /* Egress MP group tbl entry buffer */
int mp_grp_index = 0;
int rv = BCM_E_NONE;/* Operation return status. */
uint32 profile_index = 0;
uint32 direction = 0; /* Up MEP or Down MEP */
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Get the stored endpoint information from hash table. */
hash_data = &oc->oam_hash_data[ep_info_p->id];
if (0 == hash_data->in_use) {
return (BCM_E_INTERNAL);
}
if (ep_info_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
direction = 1;
}
/*For downMEP Rx counters are in Ing Mp group table and for UpMEP
it is in egr mp group table */
if (BCM_SUCCESS
(_bcm_kt2_oam_find_lmep(unit, hash_data, &l3_index, &l3_entry))) {
if (hash_data->tx_ctr == _BCM_OAM_INVALID_INDEX) {
/* Allocate counters and configure the same in L3 entry */
rv = _bcm_kt2_oam_counter_set(unit,
L3_ENTRY_IPV4_UNICASTm,
(uint32 *)&l3_entry,
&ctr_index, &pool_id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: L3_ENTRY table update failed "
"for EP=%d due to counter allocation failure "
"%s.\n"), ep_info_p->id, bcm_errmsg(rv)));
return (rv);
}
/* MS 8 bits of counter Id is pool id. LS 24 bits are index */
hash_data->tx_ctr = pool_id << 24 | (ctr_index);
hash_data->rx_ctr = hash_data->tx_ctr;
} else {
pool_id = hash_data->tx_ctr >> 24;
}
ep_info_p->lm_counter_base_id = hash_data->tx_ctr;
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, &l3_entry,
mep_ctr_info[pool_id].ctr_valid, 1);
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, &l3_entry,
mep_ctr_info[pool_id].ctr_base_ptr,
(hash_data->tx_ctr & 0xFFFFFF));
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, &l3_entry,
mep_ctr_info[pool_id].ctr_mep_type, direction);
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, &l3_entry,
mep_ctr_info[pool_id].ctr_mep_mdl, ep_info_p->level);
/* Create SERVICE_PRI_MAP profile */
rv = _bcm_kt2_oam_service_pri_profile_add(unit, 0, ep_info_p,
&profile_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM, \
(BSL_META_U(unit, \
"OAM Error: L3_ENTRY table update failed "
"for EP=%d due to service pri map profile allocation "
"failure %s.\n"),
ep_info_p->id, bcm_errmsg(rv)));
return (rv);
}
hash_data->pri_map_index = profile_index;
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, &l3_entry,
mep_ctr_info[pool_id].ctr_profile, profile_index);
rv = soc_mem_write(unit, L3_ENTRY_IPV4_UNICASTm, MEM_BLOCK_ALL,
l3_index, &l3_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: L3_ENTRY table write failed "
"for EP=%d %s.\n"),
ep_info_p->id, bcm_errmsg(rv)));
return (rv);
}
} else {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MP group tbl entry not found-%s.\n"),
bcm_errmsg(rv)));
return rv;
}
/*For downMEP Tx counters are in Egr Mp group table and for UpMEP it
is in ING MP group table */
if (BCM_SUCCESS(_bcm_kt2_oam_find_egr_lmep(unit, hash_data,
&mp_grp_index, &egr_mp_group))) {
if (hash_data->tx_ctr == _BCM_OAM_INVALID_INDEX) {
rv = _bcm_kt2_oam_counter_set(unit, EGR_MP_GROUPm,
(uint32 *)&egr_mp_group, &ctr_index, &pool_id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EGR_MP_GROUP table update "
"failed for EP=%d due to counter allocation "
"failure %s.\n"),
ep_info_p->id, bcm_errmsg(rv)));
return (rv);
}
/* MS 8 bits of counter Id is pool id. LS 24 bits are index */
hash_data->tx_ctr = pool_id << 24 | (ctr_index);
hash_data->rx_ctr = hash_data->tx_ctr;
} else {
pool_id = hash_data->tx_ctr >> 24;
}
ep_info_p->lm_counter_base_id = hash_data->tx_ctr;
if (SAL_BOOT_BCMSIM) {
/* Added as WAR for Cmodel issue */
/* Construct endpoint egress MP group Table entry key part */
_bcm_oam_egr_lmep_key_construct(unit, hash_data, &egr_mp_group);
soc_EGR_MP_GROUPm_field32_set(unit, &egr_mp_group, VALIDf, 1);
}
soc_EGR_MP_GROUPm_field32_set(unit, &egr_mp_group,
egr_mep_ctr_info[pool_id].ctr_valid, 1);
soc_EGR_MP_GROUPm_field32_set(unit, &egr_mp_group,
egr_mep_ctr_info[pool_id].ctr_base_ptr,
(hash_data->tx_ctr & 0xFFFFFF));
soc_EGR_MP_GROUPm_field32_set(unit, &egr_mp_group,
egr_mep_ctr_info[pool_id].ctr_mep_type, direction);
soc_EGR_MP_GROUPm_field32_set(unit, &egr_mp_group,
egr_mep_ctr_info[pool_id].ctr_mep_mdl, ep_info_p->level);
/* Create SERVICE_PRI_MAP profile */
rv = _bcm_kt2_oam_service_pri_profile_add(unit, 1, ep_info_p,
&profile_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EGR_MP_GROUP table update failed "
"for EP=%d due to service pri map profile allocation "
"failure %s.\n"),
ep_info_p->id, bcm_errmsg(rv)));
return (rv);
}
soc_EGR_MP_GROUPm_field32_set(unit, &egr_mp_group,
egr_mep_ctr_info[pool_id].ctr_profile, profile_index);
hash_data->egr_pri_map_index = profile_index;
rv = soc_mem_write(unit, EGR_MP_GROUPm, MEM_BLOCK_ALL,
mp_grp_index, &egr_mp_group);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EGR_MP_GROUP table update failed "
"for EP=%d %s.\n"),
ep_info_p->id, bcm_errmsg(rv)));
return (rv);
}
} else {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EGR MP group tbl entry not found-%s.\n"),
bcm_errmsg(rv)));
return rv;
}
return (rv);
}
/*
* Function:
* bcm_kt2_oam_hw_ccm_tx_ctr_update
* Purpose:
* Update CCM CTR params
* Parameters:
* unit - (IN) Unit number.
* ep_info - (IN) Pointer to endpoint parameters
* Returns:
* BCM_E_XXX
* Notes:
*/
STATIC int
bcm_kt2_oam_hw_ccm_tx_ctr_update(int unit,
bcm_oam_endpoint_info_t *ep_info)
{
_bcm_oam_hash_data_t *h_data_p;
_bcm_oam_control_t *oc;
int rv=BCM_E_NONE;
lmep_1_entry_t lmep_1_entry; /* LMEP_1 table entry.*/
uint8 num_counter;
uint32 counter_id;
uint32 pool;
lmep_entry_t lmep_entry; /* LMEP_1 table entry.*/
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
h_data_p = &oc->oam_hash_data[ep_info->id];
/* Get the LMEP_1 table entry. for the endpoint*/
rv = READ_LMEP_1m(unit, MEM_BLOCK_ANY, h_data_p->local_tx_index, &lmep_1_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: LMEP_1 table read (index=%d) failed "
"- %s.\n"), unit, h_data_p->local_tx_index, bcm_errmsg(rv)));
return rv;
}
/* Get the LMEP_1 table entry. for the endpoint*/
rv = READ_LMEPm(unit, MEM_BLOCK_ANY, h_data_p->local_tx_index, &lmep_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: LMEP table read (index=%d) failed "
"- %s.\n"), unit, h_data_p->local_tx_index, bcm_errmsg(rv)));
return rv;
}
if (_KT2_OAM_COUNTER_SIZE < ep_info->ccm_tx_update_lm_counter_size) {
return BCM_E_PARAM;
}
for(num_counter = 0; num_counter < ep_info->ccm_tx_update_lm_counter_size;
num_counter++) {
/* Get the counter pool id */
counter_id = ep_info->ccm_tx_update_lm_counter_base_id[num_counter] +
ep_info->ccm_tx_update_lm_counter_offset[num_counter];
pool = counter_id >> 24;
switch(pool){
case 0:
/* Set Counter 1 ID and action */
soc_LMEP_1m_field32_set(unit, &lmep_1_entry, COUNTER_1_IDf,
(counter_id & 0xFFFF));
soc_LMEP_1m_field32_set(unit, &lmep_1_entry, COUNTER_1_ACTIONf, 1);
break;
case 1:
/* Set Counter 2 ID and action */
soc_LMEP_1m_field32_set(unit, &lmep_1_entry, COUNTER_2_IDf,
(counter_id & 0xFFFF));
soc_LMEP_1m_field32_set(unit, &lmep_1_entry, COUNTER_2_ACTIONf, 1);
break;
default:
return BCM_E_PARAM;
}
}
if (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
/* TX counter is located in IP (0) */
soc_LMEPm_field32_set(unit, &lmep_entry, COUNTER_1_LOCATIONf, 0);
soc_LMEPm_field32_set(unit, &lmep_entry, COUNTER_2_LOCATIONf, 0);
} else {
/* TX counter is located in EP (1) */
soc_LMEPm_field32_set(unit, &lmep_entry, COUNTER_1_LOCATIONf, 1);
soc_LMEPm_field32_set(unit, &lmep_entry, COUNTER_2_LOCATIONf, 1);
}
rv = WRITE_LMEP_1m(unit, MEM_BLOCK_ALL, h_data_p->local_tx_index,
&lmep_1_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: LMEP_1 table write (EP=%d)"
" failed - %s.\n"), unit, h_data_p->ep_id, bcm_errmsg(rv)));
return rv;
}
rv = WRITE_LMEPm(unit, MEM_BLOCK_ALL, h_data_p->local_tx_index,
&lmep_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: LMEP table write (EP=%d)"
" failed - %s.\n"), unit, h_data_p->ep_id, bcm_errmsg(rv)));
return rv;
}
return rv;
}
STATIC int
_bcm_kt2_oam_lmep_down_modify_new_dest_port(int unit, _bcm_oam_hash_data_t *h_data_p)
{
lmep_1_entry_t lmep_1_entry; /* LMEP_1 table entry.*/
/* Get the LMEP_1 table entry. */
BCM_IF_ERROR_RETURN(READ_LMEP_1m(unit, MEM_BLOCK_ANY, h_data_p->local_tx_index, &lmep_1_entry));
/* Set the queue number */
soc_LMEP_1m_field32_set(unit, &lmep_1_entry, QUEUE_NUMf,
SOC_INFO(unit).port_uc_cosq_base[h_data_p->dst_pp_port] +
h_data_p->int_pri);
/* Set the PP port */
soc_LMEP_1m_field32_set(unit, &lmep_1_entry, PP_PORTf, h_data_p->dst_pp_port);
SOC_IF_ERROR_RETURN
(WRITE_LMEP_1m(unit, MEM_BLOCK_ALL, h_data_p->local_tx_index,
&lmep_1_entry));
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_oam_kt2_local_tx_mep_hw_set
* Purpose:
* Configure hardware tables for a local CCM Tx enabled endpoint.
* Parameters:
* unit - (IN) BCM device number
* ep_info_p - (IN) Pointer to remote endpoint information.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_oam_kt2_local_tx_mep_hw_set(int unit,
bcm_oam_endpoint_info_t *ep_info_p)
{
int rv = BCM_E_NONE;/* Operation return status. */
_bcm_oam_hash_data_t *hash_data; /* Pointer to endpoint hash data. */
lmep_entry_t entry; /* LMEP table entry. */
lmep_1_entry_t entry_1; /* LMEP_1 table entry. */
int word; /* Word index. */
uint32 reversed_maid[BCM_OAM_GROUP_NAME_LENGTH / 4];
/* Group name in Hw format. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
const bcm_oam_endpoint_info_t *ep_p;/* Pointer to endpoint info. */
int subport = 0;
int oam_replacement_offset = 0;
int tpid_index = 0;
_bcm_kt2_subport_info_t subport_info;
int up_mep = 0;
int pp_port = 0;
int vlan_tag_control = 0;
uint32 tag_type = 0;
bcm_port_t up_mep_tx_port = 0;
bcm_gport_t gport = 0;
if (NULL == ep_info_p) {
return (BCM_E_INTERNAL);
}
/* Initialize local endpoint info pointer. */
ep_p = ep_info_p;
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Get the stored endpoint information from hash table. */
hash_data = &oc->oam_hash_data[ep_p->id];
if (0 == hash_data->in_use) {
return(BCM_E_INTERNAL);
}
sal_memset(&entry, 0, sizeof(lmep_entry_t));
sal_memset(&entry_1, 0, sizeof(lmep_1_entry_t));
/* Set the Group base index. */
soc_LMEPm_field32_set(unit, &entry, MAID_INDEXf, ep_p->group);
/* Set source MAC address to be used on transmitted packets. */
soc_LMEPm_mac_addr_set(unit, &entry, SAf, ep_p->src_mac_address);
/* Set the Maintenance Domain Level. */
soc_LMEPm_field32_set(unit, &entry, MDLf, ep_p->level);
/* Set endpoint name. */
soc_LMEPm_field32_set(unit, &entry, MEPIDf, ep_p->name);
/*
* Set VLAN ID to be used in the transmitted packet.
* For link level MEPs, this VLAN_ID == 0.
*/
soc_LMEPm_field32_set(unit, &entry, SVLAN_TAGf, ep_p->vlan);
/* Set packet priority value to be used in transmitted packet. */
soc_LMEPm_field32_set(unit, &entry, PRIORITYf, ep_p->pkt_pri);
/* Set CVLAN Id */
soc_LMEPm_field32_set(unit, &entry, CVLAN_TAGf, ((ep_p->inner_pkt_pri << 13) | ep_p->inner_vlan));
/* Set interval between CCM packet transmissions. */
soc_LMEPm_field32_set
(unit, &entry, CCM_INTERVALf,
_bcm_kt2_oam_ccm_msecs_to_hw_encode(ep_p->ccm_period));
/* Set Port status TLV in CCM Tx packets. */
if (ep_p->flags & BCM_OAM_ENDPOINT_PORT_STATE_UPDATE) {
if (ep_p->port_state > BCM_OAM_PORT_TLV_UP) {
return BCM_E_PARAM;
}
soc_LMEPm_field32_set(unit, &entry, PORT_TLVf,
(ep_p->port_state == BCM_OAM_PORT_TLV_UP)
? 1 : 0);
}
/* Set the MEP type - up/down */
soc_LMEPm_field32_set(unit, &entry, MEP_TYPEf,
((ep_p->flags & BCM_OAM_ENDPOINT_UP_FACING) ? 1 : 0));
/*
* Construct group name for hardware.
* i.e Word-reverse the MAID bytes for hardware.
*/
for (word = 0; word < (BCM_OAM_GROUP_NAME_LENGTH / 4); ++word) {
reversed_maid[word]
= bcm_htonl(((uint32 *) oc->group_info[ep_p->group].name)
[((BCM_OAM_GROUP_NAME_LENGTH / 4) - 1) - word]);
}
/* Set the group name. */
soc_LMEPm_field_set(unit, &entry, MAIDf, reversed_maid);
/* Set OAM replacement offset & default TPID */
if (_BCM_OAM_EP_IS_VP_TYPE(hash_data)) {
oam_replacement_offset = 1;
if(hash_data->flags & BCM_OAM_ENDPOINT_MATCH_OUTER_AND_INNER_VLAN) {
oam_replacement_offset = 5;
vlan_tag_control = 3;
tag_type = _BCM_OAM_DOMAIN_S_PLUS_CVLAN;
}else if (hash_data->flags & BCM_OAM_ENDPOINT_MATCH_INNER_VLAN) {
oam_replacement_offset = 3;
vlan_tag_control = 1;
tag_type =_BCM_OAM_DOMAIN_CVLAN;
} else if (hash_data->vlan > 0) {
oam_replacement_offset = 3;
vlan_tag_control = 2;
tag_type =_BCM_OAM_DOMAIN_SVLAN;
}
}
switch (hash_data->oam_domain) {
case _BCM_OAM_DOMAIN_PORT:
oam_replacement_offset = 1;
vlan_tag_control = 0;
break;
case _BCM_OAM_DOMAIN_CVLAN:
vlan_tag_control = 1;
oam_replacement_offset = 3;
tag_type =_BCM_OAM_DOMAIN_CVLAN;
break;
case _BCM_OAM_DOMAIN_SVLAN:
oam_replacement_offset = 3;
vlan_tag_control = 2;
tag_type =_BCM_OAM_DOMAIN_SVLAN;
break;
case _BCM_OAM_DOMAIN_S_PLUS_CVLAN:
oam_replacement_offset = 5;
vlan_tag_control = 3;
tag_type = _BCM_OAM_DOMAIN_S_PLUS_CVLAN;
break;
default:
break;
}
/* if S or S+C vlan - set outer tpid index */
if ((tag_type == _BCM_OAM_DOMAIN_SVLAN) ||
(tag_type == _BCM_OAM_DOMAIN_S_PLUS_CVLAN)) {
rv = _bcm_kt2_tpid_entry_add(unit,
(ep_p->outer_tpid ? ep_p->outer_tpid :
BCM_OAM_DEFAULT_TPID),
BCM_OAM_TPID_TYPE_OUTER, &tpid_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: L3 entry config failed in TPID set "
"for EP=%d %s.\n"),
ep_p->id, bcm_errmsg(rv)));
return (rv);
}
hash_data->outer_tpid_profile_index = tpid_index;
soc_LMEPm_field32_set(unit, &entry, SVLAN_TPID_INDEXf, tpid_index);
}
if ((tag_type == _BCM_OAM_DOMAIN_CVLAN) ||
/* C or S+C */
(tag_type == _BCM_OAM_DOMAIN_S_PLUS_CVLAN)) {
rv = _bcm_kt2_tpid_entry_add(unit,
(ep_p->inner_tpid ? ep_p->inner_tpid :
BCM_OAM_DEFAULT_TPID),
BCM_OAM_TPID_TYPE_INNER, &tpid_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: L3 entry config failed in TPID set "
"for EP=%d"" %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (rv);
}
hash_data->inner_tpid_profile_index = tpid_index;
}
gport = hash_data->gport;
if (BCM_GPORT_IS_TRUNK(ep_p->gport)) {
gport = hash_data->resolved_trunk_gport;
}
if ((BCM_GPORT_IS_SUBPORT_PORT(gport)) &&
(_BCM_KT2_GPORT_IS_SUBTAG_SUBPORT_PORT(unit, gport))) {
/* CoE port */
rv = _bcm_kt2_tpid_entry_add(unit,
(ep_p->subport_tpid ? ep_p->subport_tpid :
BCM_OAM_DEFAULT_TPID),
BCM_OAM_TPID_TYPE_SUBPORT, &tpid_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: L3 entry config failed in TPID set "
"for EP=%d %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (rv);
}
soc_LMEPm_field32_set(unit, &entry,
SUBPORT_TAG_TPID_INDEXf, tpid_index);
hash_data->subport_tpid_profile_index = tpid_index;
}
/* Set LMEP_1 table entry */
/* If COE or LINKPHY port replacement offset should be increased
by 2 to accomodate - stream_id/subport tag */
if (BCM_GPORT_IS_SUBPORT_PORT(gport)) {
if ((_BCM_KT2_GPORT_IS_SUBTAG_SUBPORT_PORT(unit, gport)) ||
(_BCM_KT2_GPORT_IS_LINKPHY_SUBPORT_PORT(unit, gport))) {
oam_replacement_offset += 2;
subport = 1 << 2;
}
}
soc_LMEP_1m_field32_set(unit, &entry_1, OAM_REPLACEMENT_OFFSETf,
oam_replacement_offset);
/* Set Tag status of the generated CCM packet
we can set it based on incoming packet tag info
if SUBPORT - vlan tag control = 1 << 2 | hash_data->oam_domain & 0x3 */
if (subport) {
soc_LMEP_1m_field32_set(unit, &entry_1, VLAN_TAG_CONTROLf,
(subport | (vlan_tag_control & 0x3)));
} else {
soc_LMEP_1m_field32_set(unit, &entry_1, VLAN_TAG_CONTROLf,
(vlan_tag_control & 0x3));
}
/* allocate TX counters */
/* allocate TX counters */
if(ep_info_p->type == bcmOAMEndpointTypeEthernet) {
if((ep_info_p->ccm_tx_update_lm_counter_size) &&
(_BCM_OAM_INVALID_INDEX != hash_data->local_tx_index)) {
BCM_IF_ERROR_RETURN(bcm_kt2_oam_hw_ccm_tx_ctr_update(unit, ep_info_p));
}
}
if (ep_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
up_mep = 1;
}
/* For DownMEP, set Queue number to which this packet must be enqueued */
if (!up_mep) {
soc_LMEP_1m_field32_set(unit, &entry_1, QUEUE_NUMf,
SOC_INFO(unit).port_uc_cosq_base[hash_data->dst_pp_port] +
ep_p->int_pri);
/* Set the destination portt on which packet needs to Tx. */
pp_port = hash_data->dst_pp_port;
} else {
soc_LMEP_1m_field32_set(unit, &entry_1, INT_PRIf,
ep_p->int_pri);
/* Set the source on which packet is received */
pp_port = hash_data->src_pp_port;
}
if (ep_p->flags2 & BCM_OAM_ENDPOINT_FLAGS2_VLAN_VP_UP_MEP_IN_HW) {
up_mep_tx_port = _BCM_KT2_SUBPORT_PORT_ID_GET(ep_p->tx_gport);
hash_data->src_pp_port = up_mep_tx_port;
pp_port = hash_data->src_pp_port;
}
/* Set the PP port */
soc_LMEP_1m_field32_set(unit, &entry_1, PP_PORTf, pp_port);
/* Set Outgoing Subport tag to be inserted into CCM packet */
if ((BCM_GPORT_IS_SUBPORT_PORT(gport)) &&
(_BCM_KT2_GPORT_IS_SUBTAG_SUBPORT_PORT(unit, gport))) {
/* Get the subport tag */
rv = bcm_kt2_subport_pp_port_subport_info_get(unit, pp_port,
&subport_info);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: failed to get subport tag EP=%d"
" %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (rv);
}
if (subport_info.port_type == _BCM_KT2_SUBPORT_TYPE_SUBTAG) {
soc_LMEP_1m_field32_set(unit, &entry_1, SUBPORT_TAGf,
subport_info.subtag);
}
}
/* Set Interface status TLV in CCM Tx packets - 3-bits wide. */
if (ep_p->flags & BCM_OAM_ENDPOINT_INTERFACE_STATE_UPDATE) {
if ((ep_p->interface_state < BCM_OAM_INTERFACE_TLV_UP)
|| (ep_p->interface_state > BCM_OAM_INTERFACE_TLV_LLDOWN)) {
return BCM_E_PARAM;
}
soc_LMEP_1m_field32_set(unit, &entry_1, INTERFACE_TLVf,
ep_p->interface_state);
}
/*
* When this bit is '1', both port and interface TLV values are set in
* CCM Tx packets - set in LMEP table.
*/
if ((ep_p->flags & BCM_OAM_ENDPOINT_PORT_STATE_TX)
|| (ep_p->flags & BCM_OAM_ENDPOINT_INTERFACE_STATE_TX)) {
soc_LMEPm_field32_set(unit, &entry, INSERT_TLVf, 1);
}
/* Set CCM packet Destination MAC address value. */
soc_mem_mac_addr_set(unit, LMEP_1m, &entry_1, DAf,
ep_p->dst_mac_address);
/* Write entry to hardware LMEP table. */
SOC_IF_ERROR_RETURN
(WRITE_LMEPm(unit, MEM_BLOCK_ALL, hash_data->local_tx_index,
&entry));
/* Write entry to hardware LMEP_1 table. */
SOC_IF_ERROR_RETURN
(WRITE_LMEP_1m(unit, MEM_BLOCK_ALL, hash_data->local_tx_index,
&entry_1));
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_stm_table_update
* Purpose:
* Update MA_BASE_PTR in source trunk MAP table
* Parameters:
* unit - (IN) BCM device number
* mod_id - (IN) module id
* port_id - (IN) port id
* h_data_p - (IN) endpoint hash data
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_stm_table_update(int unit, bcm_module_t mod_id,
bcm_port_t port_id,
_bcm_oam_hash_data_t *h_data_p)
{
int stm_index = 0;
int rv;
source_trunk_map_table_entry_t stm_entry;
soc_mem_lock(unit, SOURCE_TRUNK_MAP_TABLEm);
/* Derive index to SOURCE_TRUNK_MAP tbl based on module ID and port*/
rv = _bcm_esw_src_mod_port_table_index_get(unit, mod_id,
port_id, &stm_index);
if(BCM_FAILURE(rv)) {
soc_mem_unlock(unit, SOURCE_TRUNK_MAP_TABLEm);
return rv;
}
rv = READ_SOURCE_TRUNK_MAP_TABLEm(unit, MEM_BLOCK_ANY,
stm_index, &stm_entry);
if(BCM_FAILURE(rv)) {
soc_mem_unlock(unit, SOURCE_TRUNK_MAP_TABLEm);
return rv;
}
soc_SOURCE_TRUNK_MAP_TABLEm_field32_set(unit, &stm_entry, MA_BASE_PTRf,
h_data_p->ma_base_index);
rv = WRITE_SOURCE_TRUNK_MAP_TABLEm(unit, MEM_BLOCK_ALL, stm_index,
&stm_entry);
soc_mem_unlock(unit, SOURCE_TRUNK_MAP_TABLEm);
return rv;
}
/*
* Function:
* _bcm_kt2_oam_port_mdl_update
* Purpose:
* Update MDL bitmap in port table
* Parameters:
* unit - (IN) BCM device number
* pp_port - (IN) pp port id
* reset - (IN) Reset MDL bitmap or not
* h_data_p - (IN) endpoint hash data
* mdl - (OUT) Maintenance domain level
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_port_mdl_update(int unit, uint32 pp_port, int reset,
_bcm_oam_hash_data_t *h_data_p, uint8 *mdl)
{
port_tab_entry_t port_entry;
if (NULL == h_data_p) {
return (BCM_E_INTERNAL);
}
BCM_IF_ERROR_RETURN
(soc_mem_read(unit, PORT_TABm, MEM_BLOCK_ANY, pp_port, &port_entry));
/* read and update MDL bitmap */
*mdl = soc_PORT_TABm_field32_get(unit, &port_entry, MDL_BITMAPf);
if (reset) {
/* Clear the MDL bit for this endpoint. */
*mdl &= ~(1 << h_data_p->level);
} else {
*mdl |= (1 << h_data_p->level);
}
soc_PORT_TABm_field32_set(unit, &port_entry, MDL_BITMAPf, *mdl);
BCM_IF_ERROR_RETURN
(soc_mem_write(unit, PORT_TABm, MEM_BLOCK_ALL, pp_port, &port_entry));
return BCM_E_NONE;
}
/*
* Function:
* _bcm_kt2_oam_port_mdl_passive_update
* Purpose:
* Update passive MDL bitmap in egress port table
* Parameters:
* unit - (IN) BCM device number
* reset - (IN) Reset MDL bitmap
* h_data_p - (IN) endpoint hash data
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_port_mdl_passive_update(int unit, int reset,
_bcm_oam_hash_data_t *h_data_p, uint8 mdl)
{
egr_port_entry_t egr_port_entry;
uint8 passive_mdl = 0; /* Maintenance domain level-passive */
if (NULL == h_data_p) {
return (BCM_E_INTERNAL);
}
/* Passive demux should not be done for MIP */
if (_BCM_OAM_EP_IS_MIP(h_data_p)) {
return BCM_E_NONE;
}
/* Set MDL passive bitmap in EGR_PORT table */
BCM_IF_ERROR_RETURN
(soc_mem_read(unit, EGR_PORTm, MEM_BLOCK_ANY, h_data_p->dst_pp_port,
&egr_port_entry));
if (reset) {
if (mdl > 0) {
/* Passive MDL bitmap */
_bcm_kt2_oam_get_passive_mdl_from_active_mdl(unit, mdl,
h_data_p->ma_base_index, &passive_mdl, 0);
} else {
passive_mdl = 0;
}
soc_EGR_PORTm_field32_set(unit, &egr_port_entry,
MDL_BITMAP_PASSIVEf, passive_mdl);
} else {
/* read and update MDL bitmap */
mdl = soc_EGR_PORTm_field32_get(unit, &egr_port_entry,
MDL_BITMAP_PASSIVEf);
mdl |= (1 << h_data_p->level);
/* Set all the bits till the highest configured MDL, so that there are
no holes in the bitmap */
mdl |= ((1 << h_data_p->level) - 1);
soc_EGR_PORTm_field32_set(unit, &egr_port_entry,
MDL_BITMAP_PASSIVEf, mdl);
}
BCM_IF_ERROR_RETURN
(soc_mem_write(unit, EGR_PORTm, MEM_BLOCK_ALL,
h_data_p->dst_pp_port, &egr_port_entry));
return BCM_E_NONE;
}
/*
* Function:
* _bcm_kt2_oam_trunk_port_mdl_config
* Purpose:
* Update port mdl and stm table for OAM on trunk case
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Pointer to hash data
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_trunk_port_mdl_config(int unit,
_bcm_oam_hash_data_t *h_data_p)
{
int rv = BCM_E_NONE;/* Operation return status. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
int member_count = 0;
bcm_port_t *member_array = NULL; /* Trunk member port array. */
int local_member_count = 0;
int i = 0;
bcm_port_t port = 0;
bcm_module_t module_id; /* Module ID */
uint8 mdl = 0;
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
if(h_data_p->trunk_id == BCM_TRUNK_INVALID) {
return BCM_E_PARAM;
}
/* Get all the local member ports belonging to this trunk */
BCM_IF_ERROR_RETURN(bcm_esw_trunk_get(unit, h_data_p->trunk_id,
NULL, 0, NULL, &member_count));
if (0 == member_count) {
/* No members have been added to the trunk group yet */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: No local members have been added to "
"the trunk group yet - %s.\n"), bcm_errmsg(rv)));
return BCM_E_PARAM;
}
_BCM_OAM_ALLOC(member_array, bcm_port_t,
sizeof(bcm_port_t) * member_count, "Trunk info");
if (NULL == member_array) {
return (BCM_E_MEMORY);
}
/* Get members of the trunk belonging to this module */
if (BCM_SUCCESS(_bcm_esw_trunk_local_members_get(unit,
h_data_p->trunk_id,
member_count, member_array,
&local_member_count))) {
if (local_member_count > 0) {
for(i = 0; i < local_member_count; i++) {
rv = _bcm_kt2_pp_port_to_modport_get(unit, member_array[i],
&module_id, &port);
if (BCM_FAILURE(rv)) {
sal_free(member_array);
return (rv);
}
rv = _bcm_kt2_oam_stm_table_update(unit, module_id,
member_array[i], h_data_p);
if (BCM_FAILURE(rv)) {
sal_free(member_array);
return (rv);
}
rv =_bcm_kt2_oam_port_mdl_update(unit, member_array[i], 0,
h_data_p, &mdl);
if (BCM_FAILURE(rv)) {
sal_free(member_array);
return (rv);
}
}
}
h_data_p->active_mdl_bitmap = mdl;
}
sal_free(member_array);
return BCM_E_NONE;
}
/*
* Function:
* _bcm_port_domain_oam_hw_set
* Purpose:
* Configure port table and stm table entries for port based MEP.
* Parameters:
* unit - (IN) BCM device number
* ep_info_p - (IN) Pointer to remote endpoint information.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_port_domain_oam_hw_set(int unit,
const bcm_oam_endpoint_info_t *ep_info_p)
{
_bcm_oam_hash_data_t *h_data_p; /* Endpoint hash data pointer. */
int rv = BCM_E_NONE;/* Operation return status. */
uint8 mdl = 0; /* Maintenance domain level. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
const bcm_oam_endpoint_info_t *ep_p; /* Pointer to endpoint info. */
bcm_module_t mod_id = 0;
bcm_port_t port_id = 0;
if (NULL == ep_info_p) {
return (BCM_E_INTERNAL);
}
/* Initialize local endpoint info pointer. */
ep_p = ep_info_p;
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Get endpoint hash data pointer. */
h_data_p = &oc->oam_hash_data[ep_p->id];
/* Set port table and source trunk map table */
if (0 == ep_info_p->vlan) {
if(h_data_p->trunk_id != BCM_TRUNK_INVALID) {
rv = _bcm_kt2_oam_trunk_port_mdl_config(unit, h_data_p);
} else {
port_id = _BCM_OAM_GLP_PORT_GET(h_data_p->sglp);
mod_id = _BCM_OAM_GLP_MODULE_ID_GET(h_data_p->sglp);
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_stm_table_update(unit, mod_id,
port_id, h_data_p));
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_port_mdl_update(unit, h_data_p->src_pp_port, 0,
h_data_p, &mdl));
h_data_p->active_mdl_bitmap = mdl;
}
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_port_mdl_passive_update(unit, 0, h_data_p, mdl));
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_l3_entry_set
* Purpose:
* Configure l3 entry tables entry for a local CCM Rx enabled endpoint.
* Parameters:
* unit - (IN) BCM device number
* ep_info_p - (IN) Pointer to remote endpoint information.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_l3_entry_set(int unit,
bcm_oam_endpoint_info_t *ep_info_p)
{
_bcm_oam_hash_data_t *h_data_p; /* Endpoint hash data pointer. */
l3_entry_1_entry_t l3_entry; /* L3 entry buffer. */
int l3_index = -1; /* L3 entry hardware index. */
int rv; /* Operation return status. */
uint8 mdl = 0; /* Maintenance domain level. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
bcm_oam_endpoint_info_t *ep_p; /* Pointer to endpoint info. */
soc_field_t mdl_field = 0;
int up_mep = 0; /* Endpoint is an upMep */
if (NULL == ep_info_p) {
return (BCM_E_INTERNAL);
}
/* Initialize local endpoint info pointer. */
ep_p = ep_info_p;
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Get endpoint hash data pointer. */
h_data_p = &oc->oam_hash_data[ep_p->id];
if (ep_info_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
up_mep = 1 ;
mdl_field = LMEP__MDL_BITMAP_PASSIVEf;
} else {
mdl_field = LMEP__MDL_BITMAP_ACTIVEf;
}
/* L3 entry */
L3_LOCK(unit);
if (BCM_SUCCESS
(_bcm_kt2_oam_find_lmep(unit, h_data_p, &l3_index, &l3_entry))) {
if ((mdl_field == LMEP__MDL_BITMAP_PASSIVEf) &&
(_BCM_OAM_EP_IS_MIP(h_data_p))) {
/* Passive demultiplexing should not be done on a MIP */
L3_UNLOCK(unit);
return BCM_E_NONE;
}
/* There's already an entry for this */
mdl = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, &l3_entry, mdl_field);
/* If active bitmap is 0, so for first active endppoint in the mp_group
* need to programme ma_base_ptr field during down_mep creation */
if((mdl == 0) && (up_mep == 0)) {
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, &l3_entry, LMEP__MA_BASE_PTRf,
h_data_p->ma_base_index);
}
mdl |= (1 << ep_p->level);
/* Set all the bits till the highest configured MDL, so that there are
no holes in the passive bitmap */
if (up_mep) {
mdl |= ((1 << ep_p->level) - 1);
}
/* set MDL bitmap passive or active depending on up or down mep */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, &l3_entry, mdl_field, mdl);
rv = soc_mem_write(unit, L3_ENTRY_IPV4_UNICASTm, MEM_BLOCK_ALL,
l3_index, &l3_entry);
if (BCM_FAILURE(rv)) {
L3_UNLOCK(unit);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: L3_ENTRY table update failed for "
"EP=%d" " %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (rv);
}
} else {
/* This is the first entry at this */
sal_memset(&l3_entry, 0, sizeof(l3_entry_1_entry_t));
/* Passive demultiplexing should not be done on a MIP */
if (!((mdl_field == LMEP__MDL_BITMAP_PASSIVEf)
&& (_BCM_OAM_EP_IS_MIP(h_data_p)))) {
mdl |= (1 << ep_p->level);
/* Set all the bits till the highest configured MDL, so that there are
no holes in the passive bitmap */
if (up_mep) {
mdl |= ((1 << ep_p->level) - 1);
}
}
/* set MDL bitmap passive or active depending on up or down mep */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, &l3_entry, mdl_field, mdl);
if (mdl == 0) {
/* MDL = 0 for case of MIP. Dont make the entry valid by
programming other fields. */
return BCM_E_NONE;
}
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, &l3_entry,
LMEP__MA_BASE_PTRf,
h_data_p->ma_base_index);
/* Configure time stamp type 1588(PTP)/NTP */
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, &l3_entry,
LMEP__TIMESTAMP_TYPEf,
((ep_p->timestamp_format == bcmOAMTimestampFormatNTP) ? 1 : 0));
h_data_p->ts_format = ep_p->timestamp_format;
/* Construct LMEP view key for L3 Table insert operation. */
_bcm_kt2_oam_lmep_key_construct(unit, h_data_p, &l3_entry);
soc_L3_ENTRY_IPV4_UNICASTm_field32_set(unit, &l3_entry, VALIDf, 1);
rv = soc_mem_insert(unit, L3_ENTRY_IPV4_UNICASTm,
MEM_BLOCK_ALL, &l3_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: L3_ENTRY table insert failed for "
"EP=%d" " %s.\n"), ep_p->id, bcm_errmsg(rv)));
L3_UNLOCK(unit);
return (rv);
}
}
if (up_mep == 0) {
h_data_p->active_mdl_bitmap = mdl;
}
L3_UNLOCK(unit);
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_egr_mp_group_entry_set
* Purpose:
* Configure Egress MP group tables entry for local CCM Rx enabled endpoint.
* Parameters:
* unit - (IN) BCM device number
* ep_info_p - (IN) Pointer to remote endpoint information.
* ctr_index - (OUT) LM counter index
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_egr_mp_group_entry_set(int unit,
bcm_oam_endpoint_info_t *ep_info_p,
shr_idxres_element_t *ctr_index)
{
_bcm_oam_hash_data_t *h_data_p; /* Endpoint hash data pointer. */
int rv; /* Operation return status. */
uint8 mdl = 0; /* Maintenance domain level. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
bcm_oam_endpoint_info_t *ep_p; /* Pointer to endpoint info. */
soc_field_t mdl_field = 0;
egr_mp_group_entry_t egr_mp_group; /* Egress MP group tbl entry buffer */
int up_mep = 0; /* Endpoint is an upMep */
int mp_grp_index = 0;
if (NULL == ep_info_p) {
return (BCM_E_INTERNAL);
}
/* Initialize local endpoint info pointer. */
ep_p = ep_info_p;
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Get endpoint hash data pointer. */
h_data_p = &oc->oam_hash_data[ep_p->id];
if (ep_info_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
up_mep = 1;
mdl_field = MDL_BITMAP_ACTIVEf;
} else {
mdl_field = MDL_BITMAP_PASSIVEf;
}
/* form the key to search EGR_MP_GRUP table */
if (BCM_SUCCESS
(_bcm_kt2_oam_find_egr_lmep(unit, h_data_p,
&mp_grp_index, &egr_mp_group))) {
/* There's already an entry for this */
if (SAL_BOOT_BCMSIM) {
/* Added as WAR for Cmodel issue */
/* Construct endpoint egress MP group Table entry search operation. */
_bcm_oam_egr_lmep_key_construct(unit, h_data_p, &egr_mp_group);
soc_EGR_MP_GROUPm_field32_set(unit, &egr_mp_group, VALIDf, 1);
}
mdl = soc_EGR_MP_GROUPm_field32_get(unit, &egr_mp_group, mdl_field);
/* If active bitmap is 0, so for first active endppoint in the mp_group
* need to programme ma_base_ptr field during up_mep creation */
if((mdl == 0) && up_mep) {
soc_EGR_MP_GROUPm_field32_set(unit, &egr_mp_group, MA_BASE_PTRf,
h_data_p->ma_base_index);
}
/* Passive demultiplexing should not be done on a MIP */
if (!((mdl_field == MDL_BITMAP_PASSIVEf)
&& (_BCM_OAM_EP_IS_MIP(h_data_p)))) {
mdl |= (1 << ep_p->level);
/* Set all the bits till the highest configured MDL, so that there are
no holes in the passive bitmap */
if (mdl_field == MDL_BITMAP_PASSIVEf) {
mdl |= ((1 << ep_p->level) - 1);
}
}
/* set MDL bitmap passive or active depending on up or down mep */
soc_EGR_MP_GROUPm_field32_set(unit, &egr_mp_group, mdl_field, mdl);
soc_mem_lock(unit, EGR_MP_GROUPm);
rv = soc_mem_write(unit, EGR_MP_GROUPm, MEM_BLOCK_ALL,
mp_grp_index, &egr_mp_group);
if (BCM_FAILURE(rv)) {
soc_mem_unlock(unit, EGR_MP_GROUPm);
return (rv);
}
soc_mem_unlock(unit, EGR_MP_GROUPm);
} else {
/* This is the first entry at this */
sal_memset(&egr_mp_group, 0, sizeof(egr_mp_group_entry_t));
/* Passive demultiplexing should not be done on a MIP */
if (!((mdl_field == MDL_BITMAP_PASSIVEf)
&& (_BCM_OAM_EP_IS_MIP(h_data_p)))) {
mdl = (1 << ep_p->level);
/* Set all the bits till the highest configured MDL, so that there are
no holes in the passive bitmap */
if ( mdl_field == MDL_BITMAP_PASSIVEf && !(BHH_EP_TYPE(ep_info_p)) ) {
mdl |= ((1 << ep_p->level) - 1);
}
}
if (mdl == 0) {
/* MDL = 0 for case of MIP. Dont make the entry valid by
programming other fields. */
return BCM_E_NONE;
}
/* set MDL bitmap passive or active depending on up or down mep */
soc_EGR_MP_GROUPm_field32_set(unit, &egr_mp_group, mdl_field, mdl);
soc_EGR_MP_GROUPm_field32_set(unit, &egr_mp_group, MA_BASE_PTRf,
h_data_p->ma_base_index);
/* Configure time stamp type 1588(PTP)/NTP */
soc_EGR_MP_GROUPm_field32_set(unit, &egr_mp_group,
TIMESTAMP_TYPEf,
((ep_p->timestamp_format == bcmOAMTimestampFormatNTP) ? 1 : 0));
h_data_p->ts_format = ep_p->timestamp_format;
/* Construct LMEP view key for L3 Table insert operation. */
_bcm_oam_egr_lmep_key_construct(unit, h_data_p, &egr_mp_group);
soc_mem_lock(unit, EGR_MP_GROUPm);
rv = soc_mem_insert(unit, EGR_MP_GROUPm, MEM_BLOCK_ALL, &egr_mp_group);
if (BCM_FAILURE(rv)) {
soc_mem_unlock(unit, EGR_MP_GROUPm);
return (rv);
}
soc_mem_unlock(unit, EGR_MP_GROUPm);
}
if (up_mep == 1) {
h_data_p->active_mdl_bitmap = mdl;
}
return (BCM_E_NONE);
}
STATIC int
_bcm_kt2_oam_ma_idx_to_ep_id_mapping_op (int unit,
_bcm_oam_hash_data_t *h_data_p,
soc_mem_t mem,
int delete)
{
uint32 mdl_value[1] = { 0 };
int bit_pos = 0;
int current_offset = 0;
int new_offset = 0;
int entry_count = 0;
shr_idxres_list_handle_t pool;
bcm_oam_endpoint_t *temp_array = NULL;
_bcm_oam_control_t *oc = NULL;
int ma_base_index = _BCM_OAM_INVALID_INDEX;
int is_upmep = 0;
l3_entry_1_entry_t l3_entry; /* LMEP view table entry. */
int l3_index = -1; /* L3 table hardware index. */
egr_mp_group_entry_t egr_mp_group; /* Egress MP group tbl entry buffer */
int mp_grp_index = -1;
int stm_index = 0;
source_trunk_map_table_entry_t stm_entry;
uint8 port_mdl = 0;
int rv = BCM_E_NONE;
if (NULL == h_data_p) {
return (BCM_E_INTERNAL);
}
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
if (mem == MA_INDEXm) {
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_ma_idx_num_entries_and_pool_get(unit,
h_data_p, &entry_count, &pool));
} else {
entry_count = 8;
}
if (entry_count <= 0) {
return BCM_E_INTERNAL;
}
ma_base_index = h_data_p->ma_base_index;
is_upmep = (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING)?1:0;
if (h_data_p->oam_domain == _BCM_OAM_DOMAIN_PORT) {
rv = _bcm_kt2_oam_find_port_lmep(unit, h_data_p, &stm_index, &stm_entry, &port_mdl);
if (BCM_SUCCESS(rv)) {
mdl_value[0] = port_mdl;
} else {
/* If app is not initialized (say rc), then during detach (delete)
* we will not find the entry. simply return without error */
if ((delete) && (rv == BCM_E_NOT_FOUND) && !(oc->init)) {
return BCM_E_NONE;
} else {
return BCM_E_INTERNAL;
}
}
} else if (is_upmep) {
rv = _bcm_kt2_oam_find_egr_lmep(unit, h_data_p,
&mp_grp_index, &egr_mp_group);
if (BCM_SUCCESS(rv)) {
mdl_value[0] = soc_EGR_MP_GROUPm_field32_get(unit, &egr_mp_group,
MDL_BITMAP_ACTIVEf);
} else {
/* If app is not initialized (say rc), then during detach (delete)
* we will not find the entry. simply return without error */
if ((delete) && (rv == BCM_E_NOT_FOUND) && !(oc->init)) {
return BCM_E_NONE;
} else {
return BCM_E_INTERNAL;
}
}
} else {
rv = _bcm_kt2_oam_find_lmep(unit, h_data_p, &l3_index, &l3_entry);
if (BCM_SUCCESS(rv)) {
mdl_value[0] = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, &l3_entry,
LMEP__MDL_BITMAP_ACTIVEf);
} else {
/* If app is not initialized (say rc), then during detach (delete)
* we will not find the entry. simply return without error */
if ((delete) && (rv == BCM_E_NOT_FOUND) && !(oc->init)) {
return BCM_E_NONE;
} else {
return BCM_E_INTERNAL;
}
}
}
_BCM_OAM_ALLOC(temp_array, bcm_oam_endpoint_t,
sizeof(bcm_oam_endpoint_t) * entry_count, "endpoint id array");
if (NULL == temp_array) {
return (BCM_E_MEMORY);
}
/* Initialize the entry buffer. */
sal_memset(temp_array, _BCM_OAM_INVALID_INDEX, sizeof(bcm_oam_endpoint_t) * entry_count);
if (delete) {
for (bit_pos = 0; bit_pos <= 7; bit_pos++) {
if (SHR_BITGET(mdl_value, bit_pos)) {
if (bit_pos == h_data_p->level) {
current_offset++;
} else {
temp_array[new_offset] =
oc->_bcm_oam_ma_idx_to_ep_id_map
[_BCM_KT2_OAM_MA_IDX_TO_EP_ID_MAPPING_IDX
(ma_base_index, current_offset, is_upmep)];
new_offset++;
current_offset++;
}
}
}
} else {
for (bit_pos = 0; bit_pos <= 7; bit_pos++) {
if (SHR_BITGET(mdl_value, bit_pos)) {
if (bit_pos == h_data_p->level) {
temp_array[new_offset] = h_data_p->ep_id;
new_offset++;
} else {
temp_array[new_offset] =
oc->_bcm_oam_ma_idx_to_ep_id_map[
_BCM_KT2_OAM_MA_IDX_TO_EP_ID_MAPPING_IDX(ma_base_index,
current_offset, is_upmep)];
new_offset++;
current_offset++;
}
}
}
}
sal_memcpy(&(oc->_bcm_oam_ma_idx_to_ep_id_map[
_BCM_KT2_OAM_MA_IDX_TO_EP_ID_MAPPING_IDX
(ma_base_index, 0, is_upmep)]),
temp_array, sizeof(bcm_oam_endpoint_t) * entry_count);
sal_free(temp_array);
return (BCM_E_NONE);
}
STATIC int
_bcm_kt2_oam_ma_idx_to_ep_id_mapping_add (int unit,
_bcm_oam_hash_data_t *h_data_p)
{
int is_upmep = 0;
int is_remote = 0;
int is_rx_enabled = 0;
soc_mem_t mem = MA_INDEXm;
int rv = BCM_E_NONE;
/* h_data_p being NULL could already be some other error
* scenario. Lets not return error for such a scenario.
*/
if (h_data_p == NULL) {
return BCM_E_NONE;
}
is_upmep = (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING)?1:0;
if (is_upmep) {
mem = EGR_MA_INDEXm;
}
is_remote = (h_data_p->flags & BCM_OAM_ENDPOINT_REMOTE)?1:0;
is_rx_enabled = (h_data_p->local_rx_enabled == 1) && (h_data_p->ma_base_index != _BCM_OAM_INVALID_INDEX);
if (is_remote || !is_rx_enabled) {
/* Dont touch anything for remote endpoints or only tx enabled */
return BCM_E_NONE;
}
rv = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_op(unit, h_data_p, mem, 0);
return rv;
}
STATIC int
_bcm_kt2_oam_ma_idx_to_ep_id_mapping_delete (int unit,
_bcm_oam_hash_data_t *h_data_p)
{
int is_upmep = 0;
int is_remote = 0;
int is_rx_enabled = 0;
soc_mem_t mem = MA_INDEXm;
int rv = BCM_E_NONE;
/* h_data_p being NULL could already be some other error
* scenario. Lets not return error for such a scenario.
*/
if (h_data_p == NULL) {
return BCM_E_NONE;
}
is_upmep = (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING)?1:0;
if (is_upmep) {
mem = EGR_MA_INDEXm;
}
is_remote = (h_data_p->flags & BCM_OAM_ENDPOINT_REMOTE)?1:0;
is_rx_enabled = (h_data_p->local_rx_enabled == 1) && (h_data_p->ma_base_index != _BCM_OAM_INVALID_INDEX);
if (is_remote || !is_rx_enabled) {
/* Dont touch anything for remote endpoints or only tx enabled */
return BCM_E_NONE;
}
rv = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_op(unit, h_data_p, mem, 1);
return rv;
}
STATIC INLINE bcm_oam_endpoint_t
_bcm_kt2_oam_ma_idx_to_ep_id_mapping_get (_bcm_oam_control_t *oc, int ma_idx)
{
return oc->_bcm_oam_ma_idx_to_ep_id_map[ma_idx];
}
STATIC int _bcm_kt2_oam_ma_idx_to_ep_id_mapping_init (int unit)
{
_bcm_oam_control_t *oc; /* Pointer to control structure. */
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_ALLOC(oc->_bcm_oam_ma_idx_to_ep_id_map, bcm_oam_endpoint_t,
_BCM_KT2_OAM_MA_IDX_TO_EP_ID_MAPPING_TBL_SIZE , "MA_IDX_TO_EP_ID map");
if (oc->_bcm_oam_ma_idx_to_ep_id_map == NULL) {
return BCM_E_MEMORY;
}
/* Init it to all invalid */
sal_memset(oc->_bcm_oam_ma_idx_to_ep_id_map, _BCM_OAM_INVALID_INDEX,
_BCM_KT2_OAM_MA_IDX_TO_EP_ID_MAPPING_TBL_SIZE);
return BCM_E_NONE;
}
STATIC int _bcm_kt2_oam_ma_idx_to_ep_id_mapping_destroy (int unit)
{
_bcm_oam_control_t *oc; /* Pointer to control structure. */
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
if (oc->_bcm_oam_ma_idx_to_ep_id_map != NULL) {
sal_free(oc->_bcm_oam_ma_idx_to_ep_id_map);
oc->_bcm_oam_ma_idx_to_ep_id_map = NULL;
}
return BCM_E_NONE;
}
STATIC int
_bcm_kt2_oam_ma_index_entry_set(int unit,
_bcm_oam_hash_data_t *h_data_p,
soc_mem_t mem,
void *ma_idx_ptr, int delete)
{
int rv = BCM_E_NONE;
uint32 mdl_value[1] = { 0 };
int bit_pos = 0;
int current_offset = 0;
int new_offset = 0;
int entry_count = 0;
int entry_mem_size = 0;
ma_index_entry_t *entry_buf;
ma_index_entry_t *entry;
ma_index_entry_t *entry_current;
shr_idxres_list_handle_t pool = NULL;
if (NULL == h_data_p) {
return (BCM_E_INTERNAL);
}
if (mem == MA_INDEXm) {
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_ma_idx_num_entries_and_pool_get(unit,
h_data_p, &entry_count, &pool));
new_offset = entry_count;
entry_mem_size = sizeof(ma_index_entry_t);
} else {
entry_mem_size = sizeof(egr_ma_index_entry_t);
new_offset = entry_count = 8;
}
if(entry_count <= 0) {
return BCM_E_INTERNAL;
}
/* Allocate buffer to store the DMAed table entries. */
entry_buf = soc_cm_salloc(unit, entry_mem_size * entry_count * 2,
"MA index table entry buffer");
if (NULL == entry_buf) {
return (BCM_E_MEMORY);
}
/* Initialize the entry buffer. */
sal_memset(entry_buf, 0, entry_mem_size * entry_count * 2);
/* Read the table entries into the buffer. */
rv = soc_mem_read_range(unit, mem, MEM_BLOCK_ALL, h_data_p->ma_base_index,
(h_data_p->ma_base_index + entry_count - 1),
entry_buf);
if (BCM_FAILURE(rv)) {
if (entry_buf) {
soc_cm_sfree(unit, entry_buf);
}
return rv;
}
mdl_value[0] = h_data_p->active_mdl_bitmap;
if (delete) {
for (bit_pos = 0; bit_pos <= 7; bit_pos++) {
if (SHR_BITGET(mdl_value, bit_pos)) {
if (bit_pos == h_data_p->level) {
current_offset++;
} else {
entry = soc_mem_table_idx_to_pointer(unit, mem,
ma_index_entry_t *,
entry_buf, new_offset);
entry_current = soc_mem_table_idx_to_pointer(unit, mem,
ma_index_entry_t *,
entry_buf, current_offset);
sal_memcpy(entry, entry_current, entry_mem_size);
new_offset++;
current_offset++;
}
}
}
} else {
for (bit_pos = 0; bit_pos <= 7; bit_pos++) {
if (SHR_BITGET(mdl_value, bit_pos)) {
if (bit_pos == h_data_p->level) {
entry = soc_mem_table_idx_to_pointer(unit, mem,
ma_index_entry_t *,
entry_buf, new_offset);
sal_memcpy(entry, ma_idx_ptr, entry_mem_size);
/* Doing Mod entry_count since new_offset starts
from entry_count */
h_data_p->local_rx_index = h_data_p->ma_base_index +
(new_offset % entry_count);
new_offset++;
} else {
entry = soc_mem_table_idx_to_pointer(unit, mem,
ma_index_entry_t *,
entry_buf, new_offset);
entry_current = soc_mem_table_idx_to_pointer(unit, mem,
ma_index_entry_t *,
entry_buf, current_offset);
sal_memcpy(entry, entry_current, entry_mem_size);
new_offset++;
current_offset++;
}
}
}
}
/* Move to the entry_count index since from there the newly programmed
* ma_index table memory is present */
entry = soc_mem_table_idx_to_pointer(unit, mem, ma_index_entry_t *,
entry_buf, entry_count);
rv = soc_mem_write_range(unit, mem, MEM_BLOCK_ALL, h_data_p->ma_base_index,
(h_data_p->ma_base_index + entry_count - 1),
entry);
if (BCM_FAILURE(rv)) {
if (entry_buf) {
soc_cm_sfree(unit, entry_buf);
}
return rv;
}
if (entry_buf) {
soc_cm_sfree(unit, entry_buf);
}
return (BCM_E_NONE);
}
STATIC int
_bcm_kt2_oam_port_table_ma_index_offset_get(int unit,
_bcm_oam_hash_data_t *h_data_p,
int *ma_offset)
{
int rv = BCM_E_NONE;
uint32 mdl_value[1] = { 0 };
int bit_pos = 0;
bcm_port_t port_id; /* Port ID. */
int local_member_count = 0;
port_tab_entry_t port_entry;
int offset_found = 0;
if (NULL == h_data_p) {
return (BCM_E_INTERNAL);
}
if(h_data_p->trunk_id != BCM_TRUNK_INVALID) {
rv = _bcm_kt2_oam_trunk_port_mdl_config(unit, h_data_p);
/* Get a member of the trunk belonging to this module */
if (BCM_SUCCESS(_bcm_esw_trunk_local_members_get(unit,
h_data_p->trunk_id, 1,
&port_id,
&local_member_count))) {
} else {
return (BCM_E_INTERNAL);
}
BCM_IF_ERROR_RETURN
(soc_mem_read(unit, PORT_TABm,
MEM_BLOCK_ANY, port_id, &port_entry));
} else {
BCM_IF_ERROR_RETURN
(soc_mem_read(unit, PORT_TABm, MEM_BLOCK_ANY,
h_data_p->src_pp_port, &port_entry));
}
mdl_value[0] = soc_PORT_TABm_field32_get(unit, &port_entry, MDL_BITMAPf);
for (bit_pos = 0; bit_pos <= 7; bit_pos++) {
if (SHR_BITGET(mdl_value, bit_pos)) {
if (bit_pos == h_data_p->level) {
offset_found = 1;
break;
} else {
*ma_offset += 1;
}
}
}
h_data_p->active_mdl_bitmap = mdl_value[0];
/* Added the line below to avoid compiler warning - unused variable */
mdl_value[0] = *ma_offset;
if (offset_found == 0) {
return (BCM_E_INTERNAL);
}
return rv;
}
STATIC int
_bcm_kt2_oam_mp_grp_table_ma_index_offset_get(int unit,
_bcm_oam_hash_data_t *h_data_p,
int *ma_offset)
{
int rv = BCM_E_NONE;
uint32 mdl_value[1] = { 0 };
int bit_pos = 0;
soc_mem_t mem = 0;
l3_entry_1_entry_t l3_entry; /* L3 entry buffer. */
int entry_index = 0;
egr_mp_group_entry_t egr_mp_group; /* Egress MP group tbl entry buffer */
void *entry;
soc_field_t mdl_field = 0;
int offset_found = 0;
if (NULL == h_data_p) {
return (BCM_E_INTERNAL);
}
if (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
mem = EGR_MP_GROUPm;
mdl_field = MDL_BITMAP_ACTIVEf;
entry = &egr_mp_group;
if (BCM_FAILURE(_bcm_kt2_oam_find_egr_lmep(unit, h_data_p,
&entry_index,
&egr_mp_group))) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EGR MP group tbl entry not found-%s.\n"),
bcm_errmsg(rv)));
return rv;
}
} else {
mem = L3_ENTRY_IPV4_UNICASTm;
mdl_field = LMEP__MDL_BITMAP_ACTIVEf;
entry = &l3_entry;
if (BCM_FAILURE
(_bcm_kt2_oam_find_lmep(unit, h_data_p, &entry_index, &l3_entry))) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MP group tbl entry not found - %s.\n"),
bcm_errmsg(rv)));
return rv;
}
}
mdl_value[0] = soc_mem_field32_get(unit, mem, entry, mdl_field);
for (bit_pos = 0; bit_pos <= 7; bit_pos++) {
if (SHR_BITGET(mdl_value, bit_pos)) {
if (bit_pos == h_data_p->level) {
offset_found = 1;
break;
} else {
*ma_offset += 1;
}
}
}
h_data_p->active_mdl_bitmap = mdl_value[0];
if (offset_found == 0) {
return (BCM_E_INTERNAL);
}
return rv;
}
STATIC int
_bcm_kt2_oam_ma_index_offset_get(int unit, _bcm_oam_hash_data_t *h_data_p,
int *ma_offset)
{
int rv = BCM_E_NONE;
if (NULL == h_data_p) {
return (BCM_E_INTERNAL);
}
/* setting this to 0 is necessary as in the below functions this variable
* passed as a pointer is just incremented based upon the level.
* setting it to zero avoids any junk value that might have come in
* by the caller function. */
*ma_offset = 0;
if (h_data_p->oam_domain == _BCM_OAM_DOMAIN_PORT) {
rv = _bcm_kt2_oam_port_table_ma_index_offset_get(unit, h_data_p,
ma_offset);
/* BCM_E_NOT_FOUND is not an error when switch is re-initializing
as entry would have been already deleted by port module init */
if (BCM_FAILURE(rv) && (rv != BCM_E_NOT_FOUND)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Port table read failed - %s.\n"),
bcm_errmsg(rv)));
return rv;
}
} else {
rv = _bcm_kt2_oam_mp_grp_table_ma_index_offset_get(unit, h_data_p,
ma_offset);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MP group table read failed - %s.\n"),
bcm_errmsg(rv)));
return rv;
}
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_local_rx_mep_hw_set
* Purpose:
* Configure hardware tables for a local CCM Rx enabled endpoint.
* Parameters:
* unit - (IN) BCM device number
* ep_info_p - (IN) Pointer to remote endpoint information.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_local_rx_mep_hw_set(int unit,
bcm_oam_endpoint_info_t *ep_info_p)
{
_bcm_oam_hash_data_t *h_data_p; /* Endpoint hash data pointer. */
ma_index_entry_t ma_idx_entry; /* MA_INDEX table entry buffer. */
egr_ma_index_entry_t egr_ma_idx_entry; /* EGR_MA_INDEX table entry buffer*/
oam_opcode_control_profile_entry_t opcode_entry; /* Opcode control
profile entry. */
egr_oam_opcode_control_profile_entry_t egr_opcode_entry; /* Egress
Opcode control profile entry*/
void *entries[1]; /* Pointer to opcode control entry. */
uint32 profile_index; /* opcode control profile index. */
int rv; /* Operation return status. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
bcm_oam_endpoint_info_t *ep_p; /* Pointer to endpoint info. */
soc_mem_t opcode_profile_mem = 0;
soc_mem_t ma_index_mem = 0;
shr_idxres_element_t egr_ctr_index = 0;
soc_profile_mem_t *opcode_control_profile; /* profile to be used
ingress or egress */
void *ma_idx_ptr;
void *opcode_profile_entry;
if (NULL == ep_info_p) {
return (BCM_E_INTERNAL);
}
/* Initialize local endpoint info pointer. */
ep_p = ep_info_p;
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Get endpoint hash data pointer. */
h_data_p = &oc->oam_hash_data[ep_p->id];
if ( 0 == h_data_p->in_use) {
return (BCM_E_INTERNAL);
}
if (ep_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
opcode_profile_mem = EGR_OAM_OPCODE_CONTROL_PROFILEm;
opcode_control_profile = &oc->egr_oam_opcode_control_profile;
ma_index_mem = EGR_MA_INDEXm;
ma_idx_ptr = &egr_ma_idx_entry;
opcode_profile_entry = &egr_opcode_entry;
/* Clear the entry data. */
sal_memset(&egr_opcode_entry, 0,
sizeof(egr_oam_opcode_control_profile_entry_t));
sal_memset(&egr_ma_idx_entry, 0, sizeof(egr_ma_index_entry_t));
} else {
opcode_profile_mem = OAM_OPCODE_CONTROL_PROFILEm;
opcode_control_profile = &oc->oam_opcode_control_profile;
ma_index_mem = MA_INDEXm;
ma_idx_ptr = &ma_idx_entry;
opcode_profile_entry = &opcode_entry;
/* Clear the entry data. */
sal_memset(&opcode_entry, 0,
sizeof(oam_opcode_control_profile_entry_t));
sal_memset(&ma_idx_entry, 0, sizeof(ma_index_entry_t));
}
/* Construct the opcode control profile table entry. */
if (ep_p->opcode_flags & _BCM_KT2_OAM_OPCODE_MASK) {
/* Use application specified opcode control settings. */
rv = _bcm_kt2_oam_opcode_profile_entry_set(unit, h_data_p,
opcode_profile_mem,
ep_p->opcode_flags,
opcode_profile_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Opcode profile set failed for EP=%d"
" %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (rv);
}
} else {
/* Use default opcode control profile settings. */
if (_BCM_OAM_EP_IS_MIP(h_data_p)) {
rv = _bcm_kt2_oam_mip_opcode_profile_entry_init(unit, opcode_profile_mem,
opcode_profile_entry);
} else {
rv = _bcm_kt2_oam_opcode_profile_entry_init(unit, opcode_profile_mem,
opcode_profile_entry);
}
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Opcode profile init failed for EP=%d"
" %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (rv);
}
}
soc_mem_lock(unit, opcode_profile_mem);
/* Add entry to profile table. */
entries[0] = opcode_profile_entry;
rv = soc_profile_mem_add(unit, opcode_control_profile,
(void *)entries, 1, &profile_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Opcode profile add failed for EP=%d"
" %s.\n"), ep_p->id, bcm_errmsg(rv)));
/* Release opcode control profile table lock. */
soc_mem_unlock(unit, opcode_profile_mem);
return (rv);
}
/* Store endpoint OAM opcode profile table index value. */
h_data_p->profile_index = profile_index;
/* Release opcode control profile table lock. */
soc_mem_unlock(unit, opcode_profile_mem);
/*
* MA_INDEX table programming.
*/
/* Set group index value. */
/* In case of BHH this needs to be the session Index */
#if defined(INCLUDE_BHH)
if ( BHH_EP_TYPE(ep_p) ) {
soc_mem_field32_set(unit, ma_index_mem, ma_idx_ptr, MA_PTRf,
BCM_OAM_BHH_GET_UKERNEL_EP(ep_p->id));
} else
#endif
{
soc_mem_field32_set(unit, ma_index_mem, ma_idx_ptr, MA_PTRf,
ep_p->group);
}
/* Set OAM opcode control profile table index. */
soc_mem_field32_set(unit, ma_index_mem, ma_idx_ptr,
OAM_OPCODE_CONTROL_PROFILE_PTRf,
h_data_p->profile_index);
soc_mem_field32_set(unit, ma_index_mem, ma_idx_ptr,
INT_PRIf,
ep_p->int_pri);
/* If the OAM domain is port, set source trunk map table */
if (h_data_p->oam_domain == _BCM_OAM_DOMAIN_PORT) {
rv = _bcm_port_domain_oam_hw_set(unit, ep_info_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: port OAM config failed for EP=%d"
" %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (rv);
}
} else {
rv = _bcm_kt2_oam_l3_entry_set(unit, ep_info_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: L3 entry config failed for EP=%d"
" %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (rv);
}
rv = _bcm_kt2_oam_egr_mp_group_entry_set(unit, ep_info_p,
&egr_ctr_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Egress MP group entry config "
"failed for EP=%d %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (rv);
}
if ((ep_p->flags & BCM_OAM_ENDPOINT_LOSS_MEASUREMENT)) {
rv = _bcm_kt2_oam_lmep_counters_set(unit, ep_info_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Counter config failed for EP=%d"
" %s.\n"), ep_p->id, bcm_errmsg(rv)));
if (h_data_p->rx_ctr != _BCM_OAM_INVALID_INDEX) {
rv = shr_aidxres_list_free(oc->ing_lm_ctr_pool[
(h_data_p->rx_ctr >> 24)],
(h_data_p->rx_ctr & 0xffffff));
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Ing LM counter block "
"free failed (EP=%d) - %s.\n"),
ep_info_p->id, bcm_errmsg(rv)));
return (rv);
}
}
return (rv);
}
}
}
rv = _bcm_kt2_oam_ma_index_entry_set(unit, h_data_p,
ma_index_mem, ma_idx_ptr, 0);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MA_INDEX table write failed for EP=%d"
" %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (rv);
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_upmep_rx_endpoint_reserve
* Purpose:
* Reserve an hardware index in the group state table to maintain the state
* for a CCM Rx enabled endpoint.
* Parameters:
* unit - (IN) BCM device number
* ep_info_p - (IN) Pointer to remote endpoint information.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_upmep_rx_endpoint_reserve(int unit,
const bcm_oam_endpoint_info_t *ep_info_p)
{
_bcm_oam_hash_data_t *h_data_p; /* Endpoint hash data pointer. */
int rv; /* Operation return status. */
egr_mp_group_entry_t egr_mp_grp_entry; /* egress mp group entry buffer. */
int mp_group_index = -1;/*MP group entry hardware index*/
int count = 0; /* Successful Hw indices allocated. */
uint8 mdl = 0; /* Maintenance domain level. */
int rx_index[1 << _BCM_OAM_EP_LEVEL_BIT_COUNT] = {0};
/* Endpoint Rx hardware index. */
uint16 ma_base_idx; /* Base pointer to endpoint state */
/* table [MA_INDEX = (BASE + MDL)]. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
const bcm_oam_endpoint_info_t *ep_p; /* Pointer to endpoint information. */
if (NULL == ep_info_p) {
return (BCM_E_INTERNAL);
}
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Initialize local endpoint information pointer. */
ep_p = ep_info_p;
/* Get endpoint hash data pointer. */
h_data_p = &oc->oam_hash_data[ep_p->id];
if (0 == h_data_p->in_use) {
return(BCM_E_INTERNAL);
}
/* Initialize egress MP group table entry buffer. */
sal_memset(&egr_mp_grp_entry, 0, sizeof(egr_mp_group_entry_t));
/* Find out if a matching entry already installed in hardware table. */
rv = _bcm_kt2_oam_find_egr_lmep(unit, h_data_p, &mp_group_index,
&egr_mp_grp_entry);
if (BCM_FAILURE(rv)) {
/*
* If NO match found, allocate a new hardware index from EGR_MA_INDEX
* pool.
*/
/*
* Endpoint MDL values can be (0-7) i.e 8 MDLs are supported per-MA
* group endpoints. While allocating the base index, next 7 hardware
* indices are also reserved.
*/
rv = shr_idxres_list_alloc_set(oc->egr_ma_idx_pool, 8,
(shr_idxres_element_t *) rx_index,
(shr_idxres_element_t *) &count);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EGR MA_INDEX index alloc failed EP:%d"
" %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (rv);
} else {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: Egr MA_INDEX alloc for EP:%d success"
" rx_idx_base:%d alloc-count:%d.\n"), ep_p->id,
rx_index[0], count));
h_data_p->ma_base_index = rx_index[0];
}
} else if (BCM_SUCCESS(rv)) {
/* Matching entry found, get installed entry MDL value. */
mdl = soc_EGR_MP_GROUPm_field32_get(unit, &egr_mp_grp_entry,
MDL_BITMAP_ACTIVEf);
if (mdl == 0) {
/* This is the first one in the active bitmap.
Allocate the hw indexes. */
/*
* Endpoint MDL values can be (0-7) i.e 8 MDLs are supported per-MA
* group endpoints. While allocating the base index, next 7 hardware
* indices are also reserved.
*/
rv = shr_idxres_list_alloc_set(oc->egr_ma_idx_pool, 8,
(shr_idxres_element_t *) rx_index,
(shr_idxres_element_t *) &count);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: EGR MA_INDEX index alloc failed EP:%d"
" %s.\n"), unit, ep_p->id, bcm_errmsg(rv)));
return (rv);
} else {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Info: Egr MA_INDEX alloc for EP:%d success"
" rx_idx_base:%d alloc-count:%d.\n"), unit, ep_p->id,
rx_index[0], count));
h_data_p->ma_base_index = rx_index[0];
}
} else if (0 == (mdl & (1 << ep_p->level))) { /* Findout if MDLs are same. */
ma_base_idx = soc_EGR_MP_GROUPm_field32_get(unit, &egr_mp_grp_entry,
MA_BASE_PTRf);
h_data_p->ma_base_index = ma_base_idx;
} else {
/* Rx index already taken return error. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: No free Rx index found for EP:%d"
" %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (BCM_E_RESOURCE);
}
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_downmep_rx_endpoint_reserve
* Purpose:
* Reserve an hardware index in the group state table to maintain the state
* for a CCM Rx enabled endpoint.
* Parameters:
* unit - (IN) BCM device number
* ep_info_p - (IN) Pointer to remote endpoint information.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_downmep_rx_endpoint_reserve(int unit,
const bcm_oam_endpoint_info_t *ep_info_p)
{
_bcm_oam_hash_data_t *h_data_p; /* Endpoint hash data pointer. */
int rv; /* Operation return status. */
l3_entry_1_entry_t l3_entry; /* L3 entry buffer. */
int l3_index = -1; /* L3 entry hardware index. */
int count = 0; /* Successful Hw indices allocated. */
uint8 mdl = 0; /* Maintenance domain level. */
int rx_index[1 << _BCM_OAM_EP_LEVEL_BIT_COUNT] = {0};
/* Endpoint Rx hardware index. */
uint16 ma_base_idx; /* Base pointer to endpoint state */
/* table [MA_INDEX = (BASE + MDL)]. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
const bcm_oam_endpoint_info_t *ep_p; /* Pointer to endpoint information. */
int stm_index = 0;
source_trunk_map_table_entry_t stm_entry;
int ma_idx_entry_count = 0;
shr_idxres_list_handle_t pool = NULL;
if (NULL == ep_info_p) {
return (BCM_E_INTERNAL);
}
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Initialize local endpoint information pointer. */
ep_p = ep_info_p;
/* Get endpoint hash data pointer. */
h_data_p = &oc->oam_hash_data[ep_p->id];
if ( 0 == h_data_p->in_use) {
return (BCM_E_INTERNAL);
}
/* Initialize L3 entry buffer. */
sal_memset(&l3_entry, 0, sizeof(l3_entry_1_entry_t));
sal_memset(&stm_entry, 0, sizeof(source_trunk_map_table_entry_t));
if (h_data_p->oam_domain == _BCM_OAM_DOMAIN_PORT) {
rv = _bcm_kt2_oam_find_port_lmep(unit, h_data_p, &stm_index, &stm_entry, &mdl);
} else {
/* Find out if a matching entry already installed in hardware table. */
rv = _bcm_kt2_oam_find_lmep(unit, h_data_p, &l3_index, &l3_entry);
}
if (BCM_FAILURE(rv)) {
/*
* If NO match found, allocate a new hardware index from MA_INDEX
* pool.
*/
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_ma_idx_num_entries_and_pool_get(unit, h_data_p,
&ma_idx_entry_count, &pool));
/*
* Endpoint MDL values can be (0-7) i.e 8 MDLs are supported per-MA
* group endpoints. While allocating the base index, next 7 hardware
* indices are also reserved.
*/
rv = shr_idxres_list_alloc_set(pool, ma_idx_entry_count,
(shr_idxres_element_t *) rx_index,
(shr_idxres_element_t *) &count);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MA_INDEX index alloc failed EP:%d"
" %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (rv);
} else {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: MA_INDEX alloc for EP:%d success."
" rx_idx_base:%d alloc-count:%d.\n"), ep_p->id,
rx_index[0], count));
}
h_data_p->ma_base_index = rx_index[0];
} else if (BCM_SUCCESS(rv)) {
if (h_data_p->oam_domain == _BCM_OAM_DOMAIN_PORT) {
/* MDL already found.*/
/* Findout if MDLs are same. */
if (0 == (mdl & (1 << ep_p->level))) {
ma_base_idx =
soc_SOURCE_TRUNK_MAP_TABLEm_field32_get(unit, &stm_entry,
MA_BASE_PTRf);
h_data_p->ma_base_index = ma_base_idx;
} else {
/* Rx index already taken return error. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: No free Rx index found for EP:%d"
" %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (BCM_E_RESOURCE);
}
} else {
/* Matching entry found, get installed entry MDL value. */
mdl = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, &l3_entry,
LMEP__MDL_BITMAP_ACTIVEf);
if (mdl == 0) {
/* This is the first one in the active bitmap.
Allocate the hw indexes. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_ma_idx_num_entries_and_pool_get(unit, h_data_p,
&ma_idx_entry_count, &pool));
/*
* Endpoint MDL values can be (0-7) i.e 8 MDLs are supported per-MA
* group endpoints. While allocating the base index, next 7 hardware
* indices are also reserved.
*/
rv = shr_idxres_list_alloc_set(pool, ma_idx_entry_count,
(shr_idxres_element_t *) rx_index,
(shr_idxres_element_t *) &count);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: MA_INDEX index alloc failed EP:%d"
" %s.\n"), unit, ep_p->id, bcm_errmsg(rv)));
return (rv);
} else {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Info: MA_INDEX alloc for EP:%d success."
" rx_idx_base:%d alloc-count:%d.\n"), unit, ep_p->id,
rx_index[0], count));
}
h_data_p->ma_base_index = rx_index[0];
} else if (0 == (mdl & (1 << ep_p->level))) { /* Findout if MDLs are same. */
ma_base_idx = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, &l3_entry,
LMEP__MA_BASE_PTRf);
h_data_p->ma_base_index = ma_base_idx;
} else {
/* Rx index already taken return error. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: No free Rx index found for EP:%d"
" %s.\n"), ep_p->id, bcm_errmsg(rv)));
return (BCM_E_RESOURCE);
}
}
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_remote_endpoint_delete
* Purpose:
* Delete a remote endpoint.
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Pointer to endpoint hash data
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_remote_endpoint_delete(int unit,
_bcm_oam_hash_data_t *h_data_p)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
l3_entry_1_entry_t l3_entry; /* RMEP view table entry. */
ma_state_entry_t ma_state_entry; /* Group state machine table entry. */
rmep_entry_t rmep_entry; /* Remote endpoint table entry. */
int rv; /* Operation return status. */
uint32 some_rmep_ccm_defect_counter = 0; /* No. of RMEPs in MA with CCM */
/* defects. */
uint32 some_rdi_defect_counter = 0; /* No. of RMEPs in MA with RDI */
/* defects. */
uint32 cur_some_rdi_defect = 0; /* Any RMEP in MA with RDI defect. */
uint32 cur_some_rmep_ccm_defect = 0; /* Any RMEP in MA with CCM defect. */
uint32 cur_rmep_ccm_defect = 0; /* RMEP lookup failed or CCM */
/* interval mismatch. */
uint32 cur_rmep_last_rdi = 0; /* Last CCM RDI Rx from this RMEP. */
uint32 first, last, validLow, validHigh, freeCount, allocCount;
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
sal_memset(&rmep_entry, 0, sizeof(rmep_entry_t));
/* Get remote endpoint entry value from hardware. */
rv = READ_RMEPm(unit, MEM_BLOCK_ANY, h_data_p->remote_index, &rmep_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: RMEP table read failed for EP=%d"
"%s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
cur_rmep_ccm_defect
= soc_RMEPm_field32_get(unit, &rmep_entry,
CURRENT_RMEP_CCM_DEFECTf);
cur_rmep_last_rdi
= soc_RMEPm_field32_get(unit, &rmep_entry,
CURRENT_RMEP_LAST_RDIf);
sal_memset(&ma_state_entry, 0, sizeof(ma_state_entry));
rv = READ_MA_STATEm(unit, MEM_BLOCK_ANY, h_data_p->group_index,
&ma_state_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group state (GID=%d) table read"
" failed - %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
if ((0 != cur_rmep_ccm_defect) || (0 != cur_rmep_last_rdi)) {
some_rmep_ccm_defect_counter
= soc_MA_STATEm_field32_get(unit, &ma_state_entry,
SOME_RMEP_CCM_DEFECT_COUNTERf);
cur_some_rmep_ccm_defect
= soc_MA_STATEm_field32_get(unit, &ma_state_entry,
CURRENT_SOME_RMEP_CCM_DEFECTf);
if ((0 != cur_rmep_ccm_defect)
&& (some_rmep_ccm_defect_counter > 0)) {
--some_rmep_ccm_defect_counter;
soc_MA_STATEm_field32_set
(unit, &ma_state_entry, SOME_RMEP_CCM_DEFECT_COUNTERf,
some_rmep_ccm_defect_counter);
if (0 == some_rmep_ccm_defect_counter) {
cur_some_rmep_ccm_defect = 0;
soc_MA_STATEm_field32_set
(unit, &ma_state_entry, CURRENT_SOME_RMEP_CCM_DEFECTf,
cur_some_rmep_ccm_defect);
}
}
some_rdi_defect_counter
= soc_MA_STATEm_field32_get(unit, &ma_state_entry,
SOME_RDI_DEFECT_COUNTERf);
cur_some_rdi_defect
= soc_MA_STATEm_field32_get(unit, &ma_state_entry,
CURRENT_SOME_RDI_DEFECTf);
if ((0 != cur_rmep_last_rdi)
&& (some_rdi_defect_counter > 0)) {
--some_rdi_defect_counter;
soc_MA_STATEm_field32_set
(unit, &ma_state_entry, SOME_RDI_DEFECT_COUNTERf,
some_rdi_defect_counter);
if (0 == some_rdi_defect_counter) {
cur_some_rdi_defect = 0;
soc_MA_STATEm_field32_set
(unit, &ma_state_entry, CURRENT_SOME_RDI_DEFECTf,
cur_some_rdi_defect);
}
}
rv = WRITE_MA_STATEm(unit, MEM_BLOCK_ALL, h_data_p->group_index,
&ma_state_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group state (GID=%d) table write"
" failed - %s.\n"), h_data_p->group_index,
bcm_errmsg(rv)));
return (rv);
}
}
/* Clear RMEP table entry for this endpoint index. */
sal_memset(&rmep_entry, 0, sizeof(rmep_entry_t));
rv = WRITE_RMEPm(unit, MEM_BLOCK_ALL, h_data_p->remote_index,
&rmep_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: RMEP table write index=%x (EP=%d)"
" - %s.\n"), h_data_p->remote_index,
h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
sal_memset(&l3_entry, 0, sizeof(l3_entry_1_entry_t));
/* Construct endpoint RMEP view key for L3 Table entry delete operation. */
_bcm_kt2_oam_rmep_key_construct(unit, h_data_p, &l3_entry);
rv = soc_mem_delete(unit, L3_ENTRY_IPV4_UNICASTm, MEM_BLOCK_ALL, &l3_entry);
if (BCM_FAILURE(rv) && (oc->init)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: RMEP view update (EP=%d)"
" - %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
/* Return ID back to free RMEP ID pool. */
BCM_IF_ERROR_RETURN
(shr_idxres_list_free(oc->rmep_pool, h_data_p->remote_index));
rv = shr_idxres_list_state(oc->rmep_pool,
&first, &last,
&validLow, &validHigh,
&freeCount, &allocCount);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: RMEP pool state get failed "
" - %s.\n"), bcm_errmsg(rv)));
return (rv);
}
if(allocCount == 0)
{
soc_MA_STATEm_field32_set
(unit, &ma_state_entry, CURRENT_XCON_CCM_DEFECTf,
0);
soc_MA_STATEm_field32_set
(unit, &ma_state_entry, CURRENT_ERROR_CCM_DEFECTf,
0);
soc_MA_STATEm_field32_set
(unit, &ma_state_entry, STICKY_ERROR_CCM_DEFECTf,
0);
soc_MA_STATEm_field32_set
(unit, &ma_state_entry, ERROR_CCM_DEFECT_TIMESTAMPf,
0);
soc_MA_STATEm_field32_set
(unit, &ma_state_entry, ERROR_CCM_DEFECT_RECEIVE_CCMf,
0);
rv = WRITE_MA_STATEm(unit, MEM_BLOCK_ALL, h_data_p->group_index,
&ma_state_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group state (GID=%d) table write"
" failed - %s.\n"), h_data_p->group_index,
bcm_errmsg(rv)));
return (rv);
}
}
/* Clear the H/w index to logical index Mapping for RMEP */
oc->remote_endpoints[h_data_p->remote_index] = BCM_OAM_ENDPOINT_INVALID;
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_clear_counter
* Purpose:
* Remove the counter associated with the endpoint being deleted
* Parameters:
* unit - (IN) BCM device number
* mem - (IN) L3_ENTRY_IPV4_UNICAST/EGR_MP_GROUP
* index - (IN) Index of the table entry to be modified
* h_data_p - (IN) Pointer to endpoint hash data
* entry - (IN) pointer to table entry
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_clear_counter(int unit, soc_mem_t mem,
int index,
_bcm_oam_hash_data_t *h_data_p,
void *entry)
{
int rv = BCM_E_NONE;
mep_ctr_info_t *ctr_info;
int ctr_type = 0;
int ctr1_valid = 0;
int ctr2_valid = 0;
int ctr_mdl = 0;
int up_mep = 0;
if (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
up_mep = 1;
}
if (mem == L3_ENTRY_IPV4_UNICASTm) {
ctr_info = mep_ctr_info;
} else {
ctr_info = egr_mep_ctr_info;
}
ctr1_valid = soc_mem_field32_get(unit, mem,
(uint32 *)entry, ctr_info[0].ctr_valid);
if (ctr1_valid) {
ctr_type = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[0].ctr_mep_type);
ctr_mdl = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[0].ctr_mep_mdl);
if ((ctr_type == up_mep) && (ctr_mdl == h_data_p->level)) {
ctr1_valid = 0;
rv = soc_mem_field32_modify(unit, mem, index,
ctr_info[0].ctr_valid, 0);
}
}
ctr2_valid = soc_mem_field32_get(unit, mem,
(uint32 *)entry, ctr_info[1].ctr_valid);
if (ctr2_valid) {
ctr_type = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[1].ctr_mep_type);
ctr_mdl = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[1].ctr_mep_mdl);
if ((ctr_type == up_mep) && (ctr_mdl == h_data_p->level)) {
rv = soc_mem_field32_modify(unit, mem, index,
ctr_info[1].ctr_valid, 0);
ctr2_valid = 0;
}
}
/* return rx & tx counters allocated if any */
rv =_bcm_kt2_oam_free_counter(unit, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block "
"free failed (EP=%d) - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_stm_table_clear
* Purpose:
* Clear MA base ptr and return MA index to the pool
* Parameters:
* unit - (IN) BCM device number
* mod_id - (IN) Module Id
* index - (IN) stm member index
* port_id - (IN) port number
* h_data_p - (IN) Pointer to hash data
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_stm_table_clear(int unit, bcm_module_t mod_id, int index,
bcm_port_t port_id,
_bcm_oam_hash_data_t *h_data_p)
{
int rv = BCM_E_NONE;/* Operation return status. */
int stm_index = 0;
source_trunk_map_table_entry_t stm_entry;
uint16 ma_base_idx; /* Base pointer to endpoint state */
/* table [MA_INDEX = (BASE + MDL)]. */
int idx = 0;
_bcm_oam_control_t *oc; /* Pointer to control structure. */
int count = 0;
int ma_idx_entry_count = 0;
shr_idxres_list_handle_t pool = NULL;
uint32 rx_index[1 << _BCM_OAM_EP_LEVEL_BIT_COUNT] = {0};
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
soc_mem_lock(unit, SOURCE_TRUNK_MAP_TABLEm);
rv = _bcm_esw_src_mod_port_table_index_get(unit,
mod_id, port_id, &stm_index);
if(BCM_FAILURE(rv)) {
soc_mem_unlock(unit, SOURCE_TRUNK_MAP_TABLEm);
return rv;
}
rv = READ_SOURCE_TRUNK_MAP_TABLEm(unit, MEM_BLOCK_ANY,
stm_index, &stm_entry);
if(BCM_FAILURE(rv)) {
soc_mem_unlock(unit, SOURCE_TRUNK_MAP_TABLEm);
return rv;
}
ma_base_idx = soc_SOURCE_TRUNK_MAP_TABLEm_field32_get(unit,
&stm_entry, MA_BASE_PTRf);
if (index == 0) {
/* Return Rx indices to free pool. */
for (idx = 0; idx < (1 << _BCM_OAM_EP_LEVEL_BIT_COUNT); idx++) {
rx_index[idx] = ma_base_idx + idx;
}
rv = _bcm_kt2_oam_ma_idx_num_entries_and_pool_get(unit, h_data_p,
&ma_idx_entry_count, &pool);
if(BCM_FAILURE(rv)) {
soc_mem_unlock(unit, SOURCE_TRUNK_MAP_TABLEm);
return rv;
}
rv = shr_idxres_list_free_set(pool, ma_idx_entry_count,
(shr_idxres_element_t *) rx_index,
(shr_idxres_element_t *) &count);
if (BCM_FAILURE(rv) || (8 != count)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Rx index list free (EP=%d)"
" (count=%d).\n"), h_data_p->ep_id, count));
soc_mem_unlock(unit, SOURCE_TRUNK_MAP_TABLEm);
return (rv);
}
}
/* Set MA_BASE_PTR to 0 */
soc_SOURCE_TRUNK_MAP_TABLEm_field32_set(unit, &stm_entry, MA_BASE_PTRf, 0);
rv = WRITE_SOURCE_TRUNK_MAP_TABLEm(unit, MEM_BLOCK_ALL, stm_index,
&stm_entry);
soc_mem_unlock(unit, SOURCE_TRUNK_MAP_TABLEm);
return rv;
}
/*
* Function:
* _bcm_kt2_oam_trunk_port_mdl_update
* Purpose:
* Modify port, egr port and stm table entry
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Pointer to hash data
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_trunk_port_mdl_update(int unit,
_bcm_oam_hash_data_t *h_data_p)
{
int rv = BCM_E_NONE; /* Operation return status. */
_bcm_oam_control_t *oc; /* Pointer to control structure*/
int member_count = 0;
bcm_port_t *member_array = NULL; /* Trunk member port array. */
int local_member_count = 0;
int i = 0;
bcm_port_t pp_port = 0;
bcm_module_t module_id; /* Module ID */
uint8 mdl = 0;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
if(h_data_p->trunk_id == BCM_TRUNK_INVALID) {
return BCM_E_PARAM;
}
/* Get all the local member ports belonging to this trunk */
BCM_IF_ERROR_RETURN(bcm_esw_trunk_get(unit, h_data_p->trunk_id,
NULL, 0, NULL, &member_count));
if (0 == member_count) {
/* No members have been added to the trunk group yet */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: No local members have been added to "
"the trunk group yet - %s.\n"), bcm_errmsg(rv)));
return BCM_E_PARAM;
}
_BCM_OAM_ALLOC(member_array, bcm_port_t,
sizeof(bcm_port_t) * member_count, "Trunk info");
if (NULL == member_array) {
return (BCM_E_MEMORY);
}
/* Get members of the trunk belonging to this module */
if (BCM_SUCCESS(_bcm_esw_trunk_local_members_get(unit,
h_data_p->trunk_id,
member_count, member_array,
&local_member_count))) {
if (local_member_count > 0) {
for(i = 0; i < local_member_count; i++) {
rv = _bcm_kt2_pp_port_to_modport_get(unit, member_array[i],
&module_id, &pp_port);
if (BCM_FAILURE(rv)) {
sal_free(member_array);
return (rv);
}
rv =_bcm_kt2_oam_port_mdl_update(unit, pp_port, 1,
h_data_p, &mdl);
if (BCM_FAILURE(rv)) {
sal_free(member_array);
return (rv);
}
}
if (mdl == 0) {
for(i = 0; i < local_member_count; i++) {
rv = _bcm_kt2_oam_stm_table_clear(unit, module_id, i,
member_array[i], h_data_p);
if (BCM_FAILURE(rv)) {
sal_free(member_array);
return (rv);
}
}
}
rv = _bcm_kt2_oam_port_mdl_passive_update(unit, 1, h_data_p, mdl);
if (BCM_FAILURE(rv)) {
sal_free(member_array);
return (rv);
}
}
}
sal_free(member_array);
return BCM_E_NONE;
}
/*
* Function:
* _bcm_port_domain_mdl_bitmap_update
* Purpose:
* Update Port table MDL bitmap and STM table MA_BASE_PTR.
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Pointer to endpoint hash data
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_port_domain_mdl_bitmap_update(int unit, _bcm_oam_hash_data_t *h_data_p)
{
int rv = BCM_E_NONE;/* Operation return status. */
uint8 mdl; /* Maintenance domain level. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
bcm_module_t mod_id = 0;
bcm_port_t port_id = 0;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* If the OAM domain is port */
if (_BCM_OAM_DOMAIN_PORT != h_data_p->oam_domain) {
return (BCM_E_INTERNAL);
}
if(h_data_p->trunk_id != BCM_TRUNK_INVALID) {
rv = _bcm_kt2_oam_trunk_port_mdl_update(unit, h_data_p);
} else {
port_id = _BCM_OAM_GLP_PORT_GET(h_data_p->sglp);
mod_id = _BCM_OAM_GLP_MODULE_ID_GET(h_data_p->sglp);
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_port_mdl_update(unit, h_data_p->src_pp_port,
1, h_data_p, &mdl));
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_port_mdl_passive_update(unit, 1, h_data_p, mdl));
if (mdl == 0) {
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_stm_table_clear(unit, mod_id, 0,
port_id, h_data_p));
}
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_l3_entry_destroy
* Purpose:
* Update Ingress MP group table(L3 Entry- LMEP view)entry.
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Pointer to endpoint hash data
* active_mdl -(IN) Active MDL bitmap value
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_l3_entry_destroy(int unit, _bcm_oam_hash_data_t *h_data_p,
uint8 *active_mdl)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
l3_entry_1_entry_t l3_entry; /* LMEP view table entry. */
int l3_index = -1; /* L3 table hardware index. */
int rv; /* Operation return status. */
uint8 mdl; /* Maintenance domain level. */
uint8 other_mdl = 0; /* Maintenance domain level. */
uint32 ma_base_index; /* Endpoint tbl base index. */
uint32 rx_index[1 << _BCM_OAM_EP_LEVEL_BIT_COUNT] = {0};
/* Endpoint Rx hardware index. */
uint32 count; /* No. of Rx indices freed */
/* successfully. */
int idx; /* Iterator variable. */
soc_field_t mdl_field = 0;
soc_field_t other_mdl_field = 0;
int upmep = 0;
int ma_idx_entry_count = 0;
shr_idxres_list_handle_t pool = NULL;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Check whether up/down MEP */
if (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
mdl_field = LMEP__MDL_BITMAP_PASSIVEf;
other_mdl_field = LMEP__MDL_BITMAP_ACTIVEf;
upmep = 1;
} else {
mdl_field = LMEP__MDL_BITMAP_ACTIVEf;
other_mdl_field = LMEP__MDL_BITMAP_PASSIVEf;
}
rv = _bcm_kt2_oam_find_lmep(unit, h_data_p, &l3_index, &l3_entry);
if (BCM_SUCCESS(rv)) {
/* Endpoint found, get MDL bitmap value. */
mdl = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, &l3_entry,
mdl_field);
/* Passive demultiplexing should not be done on a MIP.
* Hence do not touch PASSIVE bitmap when destroying
* also.
*/
if (!((mdl_field == LMEP__MDL_BITMAP_PASSIVEf) &&
(_BCM_OAM_EP_IS_MIP(h_data_p)))) {
/* Clear the MDL bit for this endpoint. */
mdl &= ~(1 << h_data_p->level);
if (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
/* Passive MDL bitmap */
_bcm_kt2_oam_get_passive_mdl_from_active_mdl(unit, *active_mdl,
h_data_p->ma_base_index, &mdl, 1);
} else {
*active_mdl = mdl;
}
}
/* Take L3 module protection mutex to block any updates. */
L3_LOCK(unit);
if (0 != mdl) {
/* Valid endpoints exist for other MDLs at this index. */
rv = soc_mem_field32_modify(unit, L3_ENTRY_IPV4_UNICASTm, l3_index,
mdl_field, mdl);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error:L3 entry LMEP view update(EP=%d) -"
" %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
L3_UNLOCK(unit);
return (rv);
}
/* Clear the counter, if any */
rv = _bcm_kt2_oam_clear_counter(unit, L3_ENTRY_IPV4_UNICASTm,
l3_index, h_data_p,
(void *)&l3_entry);
if (BCM_FAILURE(rv)) {
/* coverity[copy_paste_error] */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Clear counter failed (EP=%d) -"
" %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
L3_UNLOCK(unit);
return (rv);
}
} else {
/* check if other MDL bitmap is also zero (passive bitmap incase
of downmep and active bitmap in case of upmep), if so delete the
entry completely */
other_mdl = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, &l3_entry,
other_mdl_field);
if (0 == other_mdl) {
rv = soc_mem_delete_index(unit, L3_ENTRY_IPV4_UNICASTm,
MEM_BLOCK_ALL, l3_index);
rv =_bcm_kt2_oam_free_counter(unit, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block "
"free failed (EP=%d) - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
L3_UNLOCK(unit);
return (rv);
}
} else {
/* Valid endpoints exist for other MDLs at this index. */
rv = soc_mem_field32_modify(unit, L3_ENTRY_IPV4_UNICASTm,
l3_index, mdl_field, mdl);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LMEP view update (EP=%d) -"
" %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
L3_UNLOCK(unit);
return (rv);
}
/* Clear the counter, if any */
rv = _bcm_kt2_oam_clear_counter(unit, L3_ENTRY_IPV4_UNICASTm,
l3_index, h_data_p,
(void *)&l3_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Clear counter failed (EP=%d) -"
" %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
L3_UNLOCK(unit);
return (rv);
}
}
}
L3_UNLOCK(unit);
/* This is the last Rx endpoint in this OAM group. */
if ((0 == mdl) && (upmep == 0)) {
ma_base_index = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit,
&l3_entry,
LMEP__MA_BASE_PTRf);
/* Return Rx indices to free pool. */
for (idx = 0; idx < (1 << _BCM_OAM_EP_LEVEL_BIT_COUNT); idx++) {
rx_index[idx] = ma_base_index + idx;
}
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_ma_idx_num_entries_and_pool_get(unit, h_data_p,
&ma_idx_entry_count, &pool));
rv = shr_idxres_list_free_set(pool, ma_idx_entry_count,
(shr_idxres_element_t *) rx_index,
(shr_idxres_element_t *) &count);
if (BCM_FAILURE(rv) || (8 != count)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Rx index list free (EP=%d)"
" (count=%d).\n"), h_data_p->ep_id, count));
return (rv);
}
}
/* Delete ING_SERVICE pri map profile entry for this endpoint. */
if (h_data_p->pri_map_index != _BCM_OAM_INVALID_INDEX) {
rv = soc_profile_mem_delete(unit, &oc->ing_service_pri_map,
(h_data_p->pri_map_index * BCM_OAM_INTPRI_MAX));
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Profile table update error (idx=%d)"
"- %s.\n"), h_data_p->pri_map_index, bcm_errmsg(rv)));
return (rv);
}
h_data_p->pri_map_index = _BCM_OAM_INVALID_INDEX;
}
} else if (BCM_FAILURE(rv) && (oc->init)) {
/* We do not program passive bitmap for MIP. So dont throw error
* if you don't find an entry, since we might not have created
* such an entry at all. */
if (!((mdl_field == LMEP__MDL_BITMAP_PASSIVEf) &&
(_BCM_OAM_EP_IS_MIP(h_data_p)))) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: L3 entry table lookup (EP=%d) -"
" %s.\n"), unit, h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_egr_mp_group_entry_destroy
* Purpose:
* Update Egress MP group table entry.
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Pointer to endpoint hash data
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_egr_mp_group_entry_destroy(int unit,
_bcm_oam_hash_data_t *h_data_p,
uint8 *active_mdl)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
uint8 mdl; /* Maintenance domain level. */
uint8 other_mdl = 0; /* Maintenance domain level. */
int rv; /* Operation return status. */
uint32 ma_base_index; /* Endpoint tbl base index. */
uint32 rx_index[1 << _BCM_OAM_EP_LEVEL_BIT_COUNT] = {0};
/* Endpoint Rx hardware index. */
uint32 count; /* No. of Rx indices freed */
/* successfully. */
int idx; /* Iterator variable. */
soc_field_t mdl_field = 0;
soc_field_t other_mdl_field = 0;
egr_mp_group_entry_t egr_mp_group; /* Egress MP group tbl entry buffer */
int mp_grp_index = 0;
int upmep = 0;
#if defined(INCLUDE_BHH)
_bcm_oam_hash_data_t *hash_data;
bcm_oam_endpoint_t ep_id;
int key1 = 0, key2 = 0;
#endif /* INCLUDE_BHH */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Check whether up/down MEP */
if (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
mdl_field = MDL_BITMAP_ACTIVEf;
other_mdl_field = MDL_BITMAP_PASSIVEf;
upmep = 1;
} else {
mdl_field = MDL_BITMAP_PASSIVEf;
other_mdl_field = MDL_BITMAP_ACTIVEf;
}
rv = _bcm_kt2_oam_find_egr_lmep(unit, h_data_p,
&mp_grp_index, &egr_mp_group);
if (BCM_SUCCESS(rv)) {
#if defined(INCLUDE_BHH)
if (BHH_EP_TYPE(h_data_p)) {
if (BCM_XGS3_L3_EGRESS_IDX_VALID(unit, h_data_p->egress_if)) {
key1 = h_data_p->egress_if - BCM_XGS3_EGRESS_IDX_MIN(unit);
} else {
if (BCM_XGS3_DVP_EGRESS_IDX_VALID(unit, h_data_p->egress_if)) {
key1 = h_data_p->egress_if - BCM_XGS3_DVP_EGRESS_IDX_MIN(unit);
}
}
for (ep_id = _BCM_OAM_BHH_KT2_ENDPOINT_OFFSET;
ep_id < (_BCM_OAM_BHH_KT2_ENDPOINT_OFFSET + oc->bhh_endpoint_count);
ep_id++) {
hash_data = &(oc->oam_hash_data[ep_id]);
if ((!hash_data->in_use) || (hash_data->ep_id == h_data_p->ep_id)) {
continue;
}
if (BCM_XGS3_L3_EGRESS_IDX_VALID(unit, hash_data->egress_if)) {
key2 = hash_data->egress_if - BCM_XGS3_EGRESS_IDX_MIN(unit);
} else {
if (BCM_XGS3_DVP_EGRESS_IDX_VALID(unit, hash_data->egress_if)) {
key2 = h_data_p->egress_if -
BCM_XGS3_DVP_EGRESS_IDX_MIN(unit);
}
}
if (key1 == key2) {
/* There are other EPs using the same NHI */
return BCM_E_NONE;
}
}
}
#endif /* INCLUDE_BHH */
/* Endpoint found, get MDL bitmap value. */
mdl = soc_EGR_MP_GROUPm_field32_get(unit, &egr_mp_group,
mdl_field);
/* Passive demultiplexing should not be done on a MIP.
* Hence do not touch PASSIVE bitmap when destroying
* also.
*/
if (!((mdl_field == MDL_BITMAP_PASSIVEf) &&
(_BCM_OAM_EP_IS_MIP(h_data_p)))) {
/* Clear the MDL bit for this endpoint. */
mdl &= ~(1 << h_data_p->level);
if (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
*active_mdl = mdl;
} else{
/* Passive MDL bitmap */
_bcm_kt2_oam_get_passive_mdl_from_active_mdl(unit, *active_mdl,
h_data_p->ma_base_index, &mdl, 0);
}
}
/* Take L3 module protection mutex to block any updates. */
soc_mem_lock(unit, EGR_MP_GROUPm);
if (0 != mdl) {
/* Valid endpoints exist for other MDLs at this index. */
rv = soc_mem_field32_modify(unit, EGR_MP_GROUPm, mp_grp_index,
mdl_field, mdl);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EGR MP group table update (EP=%d) -"
" %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
soc_mem_unlock(unit, EGR_MP_GROUPm);
return (rv);
}
/* Clear the counter, if any */
rv = _bcm_kt2_oam_clear_counter(unit, EGR_MP_GROUPm, mp_grp_index,
h_data_p, (void *)&egr_mp_group);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error:EGR MP group table update(EP=%d) -"
" %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
soc_mem_unlock(unit, EGR_MP_GROUPm);
return (rv);
}
} else {
/* check if other MDL bitmap is also zero (passive bitmap in case
of downmep and active bitmap in case of upmep), if so delete the
entry completely */
other_mdl = soc_EGR_MP_GROUPm_field32_get(unit, &egr_mp_group,
other_mdl_field);
if (0 == other_mdl) {
rv = soc_mem_delete_index(unit, EGR_MP_GROUPm, MEM_BLOCK_ALL,
mp_grp_index);
rv =_bcm_kt2_oam_free_counter(unit, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block "
"free failed (EP=%d) - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
soc_mem_unlock(unit, EGR_MP_GROUPm);
return (rv);
}
} else {
rv = soc_mem_field32_modify(unit, EGR_MP_GROUPm, mp_grp_index,
mdl_field, mdl);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EGR MP group table update "
"(EP=%d) -" " %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
soc_mem_unlock(unit, EGR_MP_GROUPm);
return (rv);
}
/* Clear the counter, if any */
rv = _bcm_kt2_oam_clear_counter(unit, EGR_MP_GROUPm, mp_grp_index,
h_data_p, (void *)&egr_mp_group);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error:EGR MP group tbl update(EP=%d)-"
" %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
soc_mem_unlock(unit, EGR_MP_GROUPm);
return (rv);
}
}
}
soc_mem_unlock(unit, EGR_MP_GROUPm);
/* This is the last Rx endpoint in this OAM group. */
if ((0 == mdl) && (upmep)) {
ma_base_index = soc_EGR_MP_GROUPm_field32_get(unit, &egr_mp_group,
MA_BASE_PTRf);
/* Return Rx indices to free pool. */
for (idx = 0; idx < (1 << _BCM_OAM_EP_LEVEL_BIT_COUNT); idx++) {
rx_index[idx] = ma_base_index + idx;
}
rv = shr_idxres_list_free_set(oc->egr_ma_idx_pool, 8,
(shr_idxres_element_t *) rx_index,
(shr_idxres_element_t *) &count);
if (BCM_FAILURE(rv) || (8 != count)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Egr Rx index list free (EP=%d)"
" (count=%d).\n"), h_data_p->ep_id, count));
return (rv);
}
}
/* Delete EGR_SERVICE pri map profile entry for this endpoint. */
if (h_data_p->egr_pri_map_index != _BCM_OAM_INVALID_INDEX) {
rv = soc_profile_mem_delete(unit, &oc->egr_service_pri_map,
(h_data_p->egr_pri_map_index * BCM_OAM_INTPRI_MAX));
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAMError:Profile table update error (idx=%d)"
"- %s.\n"), h_data_p->egr_pri_map_index, bcm_errmsg(rv)));
return (rv);
}
h_data_p->egr_pri_map_index = _BCM_OAM_INVALID_INDEX;
}
} else if (BCM_FAILURE(rv) && (oc->init)) {
/* We do not program passive bitmap for MIP. So dont throw error
* if you don't find an entry, since we might not have created
* such an entry at all. */
if (!((mdl_field == MDL_BITMAP_PASSIVEf) &&
(_BCM_OAM_EP_IS_MIP(h_data_p)))) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: LMEP table write (EP=%d) -"
" %s.\n"), unit, h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_local_endpoint_delete
* Purpose:
* Delete a local endpoint.
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Pointer to endpoint hash data
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_local_endpoint_delete(int unit, _bcm_oam_hash_data_t *h_data_p)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
lmep_entry_t lmep_entry; /* Local endpoint table entry. */
int rv; /* Operation return status. */
lmep_1_entry_t lmep_1_entry; /* LMEP_1 table entry. */
soc_profile_mem_t *opcode_control_profile; /* profile used -
ingress or egress */
soc_profile_mem_t *dglp_profile; /* dglp profile used -
ingress or egress */
ma_index_entry_t ma_idx_entry; /* MA_INDEX table entry buffer. */
ma_index_entry_t egr_ma_idx_entry;/* MA_INDEX table entry buffer. */
void *ma_idx_ptr;
soc_mem_t ma_index_mem = 0;
uint8 active_mdl = 0;
int ma_offset = 0;
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
if (1 == h_data_p->local_tx_enabled) {
/* Clear endpoint Tx config in hardware. */
sal_memset(&lmep_entry, 0, sizeof(lmep_entry_t));
sal_memset(&lmep_1_entry, 0, sizeof(lmep_1_entry_t));
rv = WRITE_LMEPm(unit, MEM_BLOCK_ALL, h_data_p->local_tx_index,
&lmep_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LMEP table write (EP=%d)"
" failed - %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
rv = WRITE_LMEP_1m(unit, MEM_BLOCK_ALL, h_data_p->local_tx_index,
&lmep_1_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LMEP_1 table write (EP=%d)"
" failed - %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
/* Return ID back to free LMEP ID pool. */
BCM_IF_ERROR_RETURN
(shr_idxres_list_free(oc->lmep_pool, h_data_p->local_tx_index));
}
if (1 == h_data_p->local_rx_enabled) {
/* Check whether up/down MEP */
if (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
opcode_control_profile = &oc->egr_oam_opcode_control_profile;
dglp_profile = &oc->egr_oam_dglp_profile;
ma_index_mem = EGR_MA_INDEXm;
sal_memset(&egr_ma_idx_entry, 0, sizeof(egr_ma_index_entry_t));
ma_idx_ptr = &egr_ma_idx_entry;
} else {
opcode_control_profile = &oc->oam_opcode_control_profile;
dglp_profile = &oc->ing_oam_dglp_profile;
ma_index_mem = MA_INDEXm;
sal_memset(&ma_idx_entry, 0, sizeof(ma_index_entry_t));
ma_idx_ptr = &ma_idx_entry;
}
/* Delete OAM opcode profile entry for this endpoint. */
rv = soc_profile_mem_delete(unit, opcode_control_profile,
h_data_p->profile_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Profile table update error (idx=%d)"
"- %s.\n"), h_data_p->profile_index, bcm_errmsg(rv)));
return (rv);
}
/* Delete DGLP profile entry for this endpoint. */
if (h_data_p->dglp1_profile_index != _BCM_OAM_INVALID_INDEX) {
rv = soc_profile_mem_delete(unit, dglp_profile,
h_data_p->dglp1_profile_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Profile table update "
"error (idx=%d)" "- %s.\n"),
h_data_p->dglp1_profile_index, bcm_errmsg(rv)));
return (rv);
}
}
if (h_data_p->dglp2_profile_index != _BCM_OAM_INVALID_INDEX) {
rv = soc_profile_mem_delete(unit, dglp_profile,
h_data_p->dglp2_profile_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Profile table update "
"error (idx=%d)" "- %s.\n"),
h_data_p->dglp2_profile_index, bcm_errmsg(rv)));
return (rv);
}
}
/* Get MA_INDEX offset */
rv = _bcm_kt2_oam_ma_index_offset_get(unit, h_data_p, &ma_offset);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MA INDEX table offset get failed EP=%d"
" %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
h_data_p->local_rx_index = h_data_p->ma_base_index + ma_offset;
/* Delete MA INDEX entry */
rv = _bcm_kt2_oam_ma_index_entry_set(unit, h_data_p,
ma_index_mem, ma_idx_ptr, 1);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MA INDEX table delete failed EP=%d"
" %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
if (h_data_p->oam_domain == _BCM_OAM_DOMAIN_PORT) {
rv = _bcm_port_domain_mdl_bitmap_update(unit, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: port OAM MDL update failed EP=%d"
" %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
} else if (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
rv = _bcm_kt2_oam_egr_mp_group_entry_destroy(unit, h_data_p,
&active_mdl);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Egr MP Group update (EP=%d) -"
" %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
/* Update Ingress MP group table(L3_ENTRY) */
rv = _bcm_kt2_oam_l3_entry_destroy(unit, h_data_p, &active_mdl);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LMEP view update (EP=%d) -"
" %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
} else {
/* Update Ingress MP group table(L3_ENTRY) */
rv = _bcm_kt2_oam_l3_entry_destroy(unit, h_data_p, &active_mdl);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LMEP view update (EP=%d) -"
" %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
rv = _bcm_kt2_oam_egr_mp_group_entry_destroy(unit, h_data_p,
&active_mdl);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Egr MP Group update (EP=%d) -"
" %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
}
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_port_table_key_update1
* Purpose:
* Update OAM_KEY1 and OAM_KEY2 in port table
* Parameters:
* unit - (IN) BCM device number
* mem - (IN) PORT_TAB/EGR_PORT
* pp_port - (IN) PP port
* h_data_p - (IN/OUT) Pointer to endpoint hash data
* Returns:
* BCM_E_XXX
*/
int
_bcm_kt2_oam_port_table_key_update1(int unit, soc_mem_t mem, int pp_port,
_bcm_oam_hash_data_t *h_data_p)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
int rv = BCM_E_NONE; /* Operation return status. */
port_tab_entry_t port_entry;
egr_port_entry_t egr_port_entry;
uint32 key_type = 0;
uint32 oam_key1 = 0;
uint32 oam_key2 = 0;
void *port_entry_ptr;
uint16 *key1_ref_count = NULL;
uint16 *key2_ref_count = NULL;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Set the search key type. */
if (_BCM_OAM_EP_IS_VP_TYPE(h_data_p)) {
key_type = _BCM_OAM_DOMAIN_VP;
} else if(h_data_p->flags & BCM_OAM_ENDPOINT_MATCH_INNER_VLAN) {
key_type = _BCM_OAM_DOMAIN_CVLAN;
} else if(h_data_p->flags &
BCM_OAM_ENDPOINT_MATCH_OUTER_AND_INNER_VLAN) {
key_type = _BCM_OAM_DOMAIN_S_PLUS_CVLAN;
} else if((h_data_p->vlan == 0) && (h_data_p->inner_vlan == 0)) {
key_type = _BCM_OAM_DOMAIN_PORT;
} else {
key_type = _BCM_OAM_DOMAIN_SVLAN;
}
if (mem == PORT_TABm) {
port_entry_ptr = &port_entry;
key1_ref_count = &oc->oam_key1_ref_count[0];
key2_ref_count = &oc->oam_key2_ref_count[0];
} else {
port_entry_ptr = &egr_port_entry;
key1_ref_count = &oc->egr_oam_key1_ref_count[0];
key2_ref_count = &oc->egr_oam_key2_ref_count[0];
}
/*
We need to set the OAM_KEY1 and OAM_KEY2 fields of the PORT_TABLE and
EGR_PORT_TABLE based on the key_type of OAM */
rv = soc_mem_read(unit, mem, MEM_BLOCK_ANY, pp_port, port_entry_ptr);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Port table read - %s.\n"),
bcm_errmsg(rv)));
return (rv);
}
oam_key1 = soc_mem_field32_get(unit, mem, port_entry_ptr, OAM_KEY1f);
oam_key2 = soc_mem_field32_get(unit, mem, port_entry_ptr, OAM_KEY2f);
if (key_type != _BCM_OAM_DOMAIN_PORT) {
if ((oam_key1 > 0) && (oam_key2 > 0)) {
if ((oam_key1 == key_type) || (oam_key2 == key_type)) {
} else {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Invalid OAM domain to resolve "
"(EP=%d) - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
return (BCM_E_PARAM);
}
}
}
if (key_type != _BCM_OAM_DOMAIN_PORT) {
if ((oam_key1 == 0) && (oam_key2 == 0)) {
soc_mem_field32_set(unit, mem, port_entry_ptr, OAM_KEY1f, key_type);
key1_ref_count[pp_port]++;
} else if ((oam_key1 != key_type) && (oam_key2 == 0)) {
if (key_type > oam_key1) {
soc_mem_field32_set(unit, mem, port_entry_ptr, OAM_KEY2f,
key_type);
key2_ref_count[pp_port]++;
} else {
key2_ref_count[pp_port] = key1_ref_count[pp_port];
soc_mem_field32_set(unit, mem, port_entry_ptr,
OAM_KEY1f, key_type);
key1_ref_count[pp_port] = 1;
soc_mem_field32_set(unit, mem, port_entry_ptr,
OAM_KEY2f, oam_key1);
}
} else if ((oam_key2 != key_type) && (oam_key1 == 0)) {
if (key_type < oam_key2) {
soc_mem_field32_set(unit, mem, port_entry_ptr, OAM_KEY1f,
key_type);
key1_ref_count[pp_port]++;
} else {
key1_ref_count[pp_port] = key2_ref_count[pp_port];
soc_mem_field32_set(unit, mem, port_entry_ptr,
OAM_KEY1f, oam_key2);
soc_mem_field32_set(unit, mem, port_entry_ptr,
OAM_KEY2f, key_type);
key2_ref_count[pp_port] = 1;
}
} else if (oam_key1 == key_type) {
key1_ref_count[pp_port]++;
} else if (oam_key2 == key_type) {
key2_ref_count[pp_port]++ ;
}
if (SOC_WARM_BOOT(unit)) {
return rv;
}
rv = soc_mem_write(unit, mem, MEM_BLOCK_ALL, pp_port, port_entry_ptr);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Port table update failed "
"%s.\n"), bcm_errmsg(rv)));
return (rv);
}
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_port_table_key_update
* Purpose:
* Update port table OAM key fields.
* Parameters:
* unit - (IN) BCM device number
* mem - (IN) PORT_TAB/EGR_PORT
* h_data_p - (IN/OUT) Pointer to endpoint hash data
* Returns:
* BCM_E_XXX
*/
int
_bcm_kt2_oam_port_table_key_update(int unit, soc_mem_t mem,
_bcm_oam_hash_data_t *h_data_p)
{
int rv = BCM_E_NONE; /* Operation return status. */
bcm_port_t *member_array = NULL; /* Trunk member port array. */
bcm_module_t module_id; /* Module ID */
bcm_port_t pp_port = 0;
int member_count = 0;
int local_member_count = 0;
int i = 0;
if ((h_data_p->trunk_id != BCM_TRUNK_INVALID)) {
/* Get count of ports in this trunk. */
BCM_IF_ERROR_RETURN
(bcm_esw_trunk_get(unit, h_data_p->trunk_id,
NULL, 0, NULL, &member_count));
if (0 == member_count) {
/* No members have been added to the trunk group yet */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: No local members have been added to "
"the trunk group yet - %s.\n"), bcm_errmsg(rv)));
return BCM_E_PARAM;
}
_BCM_OAM_ALLOC(member_array, bcm_port_t,
sizeof(bcm_port_t) * member_count, "Trunk info");
if (NULL == member_array) {
return (BCM_E_MEMORY);
}
/* Get a member of the trunk belonging to this module */
if (BCM_SUCCESS(_bcm_esw_trunk_local_members_get(unit,
h_data_p->trunk_id,
member_count, member_array,
&local_member_count))) {
if (local_member_count > 0) {
for(i = 0; i < local_member_count; i++) {
rv = _bcm_kt2_pp_port_to_modport_get(unit, member_array[i],
&module_id, &pp_port);
if (BCM_FAILURE(rv)) {
sal_free(member_array);
return (rv);
}
rv = _bcm_kt2_oam_port_table_key_update1(unit, mem,
pp_port, h_data_p);
if (BCM_FAILURE(rv)) {
sal_free(member_array);
return (rv);
}
}
}
}
sal_free(member_array);
} else {
if (mem == PORT_TABm) {
pp_port = h_data_p->src_pp_port;
} else {
pp_port = h_data_p->dst_pp_port;
}
rv = _bcm_kt2_oam_port_table_key_update1(unit, mem, pp_port, h_data_p);
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_decrement_key_ref_count1
* Purpose:
* Decrement PORT tab key1/key2 usage ref count.
* Parameters:
* unit - (IN) BCM device number
* src_pp_port - (IN) Source PP port
* dst_pp_port - (IN) destination PP port
* h_data_p - (IN) Endpoint hash data.
* Retruns:
* BCM_E_XXX
*/
int
_bcm_kt2_oam_decrement_key_ref_count1(int unit, uint32 src_pp_port,
uint32 dst_pp_port,
_bcm_oam_hash_data_t *h_data_p)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
int rv = BCM_E_NONE; /* Operation return status. */
port_tab_entry_t port_entry;
egr_port_entry_t egr_port_entry;
uint32 key_type = 0;
uint32 oam_key1 = 0;
uint32 oam_key2 = 0;
int update_entry = 0;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Set the search key type. */
if (_BCM_OAM_EP_IS_VP_TYPE(h_data_p)) {
key_type = _BCM_OAM_DOMAIN_VP;
} else if(h_data_p->flags & BCM_OAM_ENDPOINT_MATCH_INNER_VLAN) {
key_type = _BCM_OAM_DOMAIN_CVLAN;
} else if(h_data_p->flags &
BCM_OAM_ENDPOINT_MATCH_OUTER_AND_INNER_VLAN) {
key_type = _BCM_OAM_DOMAIN_S_PLUS_CVLAN;
} else if((h_data_p->vlan == 0) && (h_data_p->inner_vlan == 0)) {
key_type = _BCM_OAM_DOMAIN_PORT;
} else {
key_type = _BCM_OAM_DOMAIN_SVLAN;
}
rv = soc_mem_read(unit, PORT_TABm, MEM_BLOCK_ANY, src_pp_port, &port_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Port table entry read failed - %s.\n"),
bcm_errmsg(rv)));
return (rv);
}
rv = soc_mem_read(unit, EGR_PORTm, MEM_BLOCK_ANY,
dst_pp_port, &egr_port_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Port table entry read failed - %s.\n"),
bcm_errmsg(rv)));
return (rv);
}
oam_key1 = soc_PORT_TABm_field32_get(unit, &port_entry, OAM_KEY1f);
oam_key2 = soc_PORT_TABm_field32_get(unit, &port_entry, OAM_KEY2f);
if (key_type != _BCM_OAM_DOMAIN_PORT) {
if (key_type == oam_key1) {
oc->oam_key1_ref_count[src_pp_port]--;
if (oc->oam_key1_ref_count[src_pp_port] == 0) {
soc_PORT_TABm_field32_set(unit, &port_entry, OAM_KEY1f, 0);
update_entry = 1;
}
} else if (key_type == oam_key2) {
oc->oam_key2_ref_count[src_pp_port]--;
if (oc->oam_key2_ref_count[src_pp_port] == 0) {
soc_PORT_TABm_field32_set(unit, &port_entry, OAM_KEY2f, 0);
update_entry = 1;
}
}
if (update_entry) {
rv = soc_mem_write(unit, PORT_TABm, MEM_BLOCK_ALL,
src_pp_port, &port_entry);
update_entry = 0;
}
oam_key1 = soc_EGR_PORTm_field32_get(unit, &egr_port_entry, OAM_KEY1f);
oam_key2 = soc_EGR_PORTm_field32_get(unit, &egr_port_entry, OAM_KEY2f);
if (key_type == oam_key1) {
oc->egr_oam_key1_ref_count[dst_pp_port]--;
if (oc->egr_oam_key1_ref_count[dst_pp_port] == 0) {
soc_EGR_PORTm_field32_set(unit, &egr_port_entry, OAM_KEY1f, 0);
update_entry = 1;
}
} else if (key_type == oam_key2) {
oc->egr_oam_key2_ref_count[dst_pp_port]--;
if (oc->egr_oam_key2_ref_count[dst_pp_port] == 0) {
soc_EGR_PORTm_field32_set(unit, &egr_port_entry, OAM_KEY2f, 0);
update_entry = 1;
}
}
if (update_entry) {
rv = soc_mem_write(unit, EGR_PORTm, MEM_BLOCK_ALL,
dst_pp_port, &egr_port_entry);
update_entry = 0;
}
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_decrement_key_ref_count
* Purpose:
* Decrement PORT tab key1/key2 usage ref count.
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Endpoint hash data.
* Retruns:
* BCM_E_XXX
*/
int
_bcm_kt2_oam_decrement_key_ref_count(int unit,
_bcm_oam_hash_data_t *h_data_p)
{
int rv = BCM_E_NONE; /* Operation return status. */
bcm_port_t *member_array = NULL; /* Trunk member port array. */
bcm_module_t module_id; /* Module ID */
bcm_port_t src_pp_port = 0;
int member_count = 0;
int local_member_count = 0;
int i = 0;
if (h_data_p->trunk_id != BCM_TRUNK_INVALID) {
/* Get count of ports in this trunk. */
BCM_IF_ERROR_RETURN
(bcm_esw_trunk_get(unit, h_data_p->trunk_id,
NULL, 0, NULL, &member_count));
if (0 == member_count) {
/* No members have been added to the trunk group yet */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: No local members have been added to "
"the trunk group yet - %s.\n"), bcm_errmsg(rv)));
return BCM_E_PARAM;
}
_BCM_OAM_ALLOC(member_array, bcm_port_t,
sizeof(bcm_port_t) * member_count, "Trunk info");
if (NULL == member_array) {
return (BCM_E_MEMORY);
}
/* Get a member of the trunk belonging to this module */
if (BCM_SUCCESS(_bcm_esw_trunk_local_members_get(unit,
h_data_p->trunk_id,
member_count, member_array,
&local_member_count))) {
if (local_member_count > 0) {
for(i = 0; i < local_member_count; i++) {
rv = _bcm_kt2_pp_port_to_modport_get(unit, member_array[i],
&module_id, &src_pp_port);
if (BCM_FAILURE(rv)) {
sal_free(member_array);
return (rv);
}
rv = _bcm_kt2_oam_decrement_key_ref_count1(unit, src_pp_port,
h_data_p->dst_pp_port, h_data_p);
if (BCM_FAILURE(rv)) {
sal_free(member_array);
return (rv);
}
}
}
}
sal_free(member_array);
} else {
rv = _bcm_kt2_oam_decrement_key_ref_count1(unit, h_data_p->src_pp_port,
h_data_p->dst_pp_port, h_data_p);
}
return rv;
}
/*
* Function: _bcm_kt2_oam_tpid_decrement_ref_count
*
* Purpose:
* Decrement TPID reference count.
* Parameters:
* unit - (IN) BCM device number
* h_data_p - (IN) Endpoint hash data.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_tpid_decrement_ref_count(int unit, _bcm_oam_hash_data_t *h_data_p)
{
int rv = BCM_E_NONE;
uint32 tag_type = 0;
/* If local CCM tx generation was not enabled, then the tpid entries
* would not have been set. So do not try to delete them even though the
* h_data_p->outer/inner/subport_tpid has a non zero value */
if (!h_data_p->local_tx_enabled) {
return BCM_E_NONE;
}
if (_BCM_OAM_EP_IS_VP_TYPE(h_data_p)) {
if(h_data_p->flags & BCM_OAM_ENDPOINT_MATCH_OUTER_AND_INNER_VLAN) {
tag_type = _BCM_OAM_DOMAIN_S_PLUS_CVLAN;
} else if (h_data_p->flags & BCM_OAM_ENDPOINT_MATCH_INNER_VLAN) {
tag_type =_BCM_OAM_DOMAIN_CVLAN;
} else if (h_data_p->vlan > 0) {
tag_type =_BCM_OAM_DOMAIN_SVLAN;
}
} else {
tag_type =h_data_p->oam_domain;
}
/* if S or S+C vlan - set outer tpid index */
if (((tag_type == _BCM_OAM_DOMAIN_SVLAN) ||
(tag_type == _BCM_OAM_DOMAIN_S_PLUS_CVLAN)) &&
(h_data_p->outer_tpid > 0)) {
if (BCM_SUCCESS(rv)) {
rv = _bcm_kt2_tpid_entry_delete(unit,
h_data_p->outer_tpid_profile_index,
BCM_OAM_TPID_TYPE_OUTER);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: L3 entry config failed in Outer TPID "
"decrement ref count"" %s.\n"), bcm_errmsg(rv)));
return (rv);
}
}
}
if (((tag_type == _BCM_OAM_DOMAIN_CVLAN) ||
(tag_type == _BCM_OAM_DOMAIN_S_PLUS_CVLAN)) &&
(h_data_p->inner_tpid > 0)) {
/* C or S+C */
if (BCM_SUCCESS(rv)) {
rv = _bcm_kt2_tpid_entry_delete(unit,
h_data_p->inner_tpid_profile_index,
BCM_OAM_TPID_TYPE_INNER);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: L3 entry config failed in Inner TPID "
"decrement ref count"" %s.\n"), bcm_errmsg(rv)));
return (rv);
}
}
}
if ((BCM_GPORT_IS_SUBPORT_PORT(h_data_p->gport)) &&
(_BCM_KT2_GPORT_IS_SUBTAG_SUBPORT_PORT(unit, h_data_p->gport)) &&
(h_data_p->subport_tpid > 0)) {
/* CoE port */
if (BCM_SUCCESS(rv)) {
rv = _bcm_kt2_tpid_entry_delete(unit,
h_data_p->subport_tpid_profile_index,
BCM_OAM_TPID_TYPE_SUBPORT);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: L3 entry config failed in Subport TPID "
"decrement ref count"" %s.\n"), bcm_errmsg(rv)));
return (rv);
}
}
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_endpoint_destroy
* Purpose:
* Delete an endpoint and free all its allocated resources.
* Parameters:
* unit - (IN) BCM device number
* ep_id - (IN) Endpoint ID value.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_endpoint_destroy(int unit,
bcm_oam_endpoint_t ep_id)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
_bcm_oam_hash_data_t *h_data_p; /* Pointer to endpoint data. */
_bcm_oam_hash_key_t hash_key; /* Hash key buffer for lookup. */
bcm_oam_endpoint_info_t ep_info; /* Endpoint information. */
_bcm_oam_hash_data_t h_data; /* Pointer to endpoint data. */
int rv; /* Operation return status. */
#if defined(INCLUDE_BHH)
uint16 reply_len;
bcm_oam_endpoint_t bhh_pool_ep_id;
#endif
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Get the hash data pointer. */
h_data_p = &oc->oam_hash_data[ep_id];
if(0 == h_data_p->in_use){
return BCM_E_NOT_FOUND;
}
rv = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_delete(unit, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Remove from ma_idx to ep id map (EP=%d) -"
" %s.\n"), unit, h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
if(bcmOAMEndpointTypeEthernet == h_data_p->type)
{
if (h_data_p->flags & BCM_OAM_ENDPOINT_REMOTE) {
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_remote_endpoint_delete(unit, h_data_p));
} else {
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_local_endpoint_delete(unit, h_data_p));
/* decrement TPID ref count */
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_tpid_decrement_ref_count(unit, h_data_p));
}
/* Remove endpoint for group's endpoint list. */
rv = _bcm_kt2_oam_group_ep_list_remove(unit, h_data_p->group_index,
h_data_p->ep_id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Remove from group list (EP=%d) -"
" %s.\n"), ep_id, bcm_errmsg(rv)));
return (rv);
}
/* Return ID back to free MEP ID pool.*/
BCM_IF_ERROR_RETURN(shr_idxres_list_free(oc->mep_pool, ep_id));
/* Decrement PORT tab key1/key2 usage ref count */
if (!(h_data_p->flags & BCM_OAM_ENDPOINT_REMOTE)) {
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_decrement_key_ref_count(unit, h_data_p));
}
/* Initialize endpoint info structure. */
bcm_oam_endpoint_info_t_init(&ep_info);
/* Set up endpoint information for key construction. */
ep_info.group = h_data_p->group_index;
ep_info.name = h_data_p->name;
ep_info.gport = h_data_p->gport;
ep_info.level = h_data_p->level;
ep_info.vlan = h_data_p->vlan;
ep_info.inner_vlan = h_data_p->inner_vlan;
ep_info.flags = h_data_p->flags;
/* Construct hash key for lookup + delete operation. */
_bcm_kt2_oam_ep_hash_key_construct(unit, oc, &ep_info, &hash_key);
/* Remove entry from hash table. */
BCM_IF_ERROR_RETURN(shr_htb_find(oc->ma_mep_htbl, hash_key,
(shr_htb_data_t *)&h_data,
1));
/* Clear the hash data memory previously occupied by this endpoint. */
_BCM_OAM_HASH_DATA_CLEAR(h_data_p);
}
/*
* BHH specific
*/
else if (soc_feature(unit, soc_feature_bhh)
&& BHH_EP_TYPE(h_data_p)) {
#if defined(INCLUDE_BHH)
if (h_data_p->is_remote) {
/*
* BHH uses same index for local and remote. So, delete always goes through
* local endpoint destory
*/
return (BCM_E_NONE);
} else {
bhh_pool_ep_id = BCM_OAM_BHH_GET_UKERNEL_EP(ep_id);
/* Delete BHH Session in HW. BCM_E_NOT_FOUND is not an error when
switch is re-initializing as entry would have been already
deleted by respective module init. */
rv = _bcm_kt2_oam_bhh_session_hw_delete(unit, h_data_p);
if (BCM_FAILURE(rv) && rv != BCM_E_NOT_FOUND) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: bhh_session_hw_delete failled "
"(EP=%d) - %s.\n"), ep_id, bcm_errmsg(rv)));
return (rv);
}
if (!(h_data_p->flags2 & BCM_OAM_ENDPOINT_FLAGS2_REDIRECT_TO_CPU)) {
/* Send BHH Session Delete message to uC. Error response is not an
during switch re-init as uKernel is reloaded and its state is
cleared before oam init. As such endpoint wont exist during oam init*/
_bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_SESS_DELETE,
(int)bhh_pool_ep_id, 0,
MOS_MSG_SUBCLASS_BHH_SESS_DELETE_REPLY,
&reply_len);
}
if (!BHH_EP_MPLS_SECTION_TYPE(h_data_p)) {
rv = _bcm_kt2_oam_local_endpoint_delete(unit, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error:BHH local_endpoint_delete failled "
"(EP=%d) - %s.\n"), ep_id, bcm_errmsg(rv)));
return (rv);
}
}
h_data_p->in_use = 0;
/* Remove endpoint for group's endpoint list. */
rv = _bcm_kt2_oam_group_ep_list_remove(unit, h_data_p->group_index,
h_data_p->ep_id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Remove from group list (EP=%d) -"
" %s.\n"), ep_id, bcm_errmsg(rv)));
return (rv);
}
/* Return ID back to free MEP ID pool.*/
BCM_IF_ERROR_RETURN(shr_idxres_list_free(oc->bhh_pool, bhh_pool_ep_id));
/* Set up endpoint information for key construction. */
ep_info.group = h_data_p->group_index;
ep_info.name = h_data_p->name;
ep_info.gport = h_data_p->gport;
ep_info.level = h_data_p->level;
ep_info.vlan = h_data_p->vlan;
ep_info.inner_vlan = h_data_p->inner_vlan;
ep_info.flags = h_data_p->flags;
/* Construct hash key for lookup + delete operation. */
_bcm_kt2_oam_ep_hash_key_construct(unit, oc, &ep_info, &hash_key);
/* Remove entry from hash table. */
BCM_IF_ERROR_RETURN(shr_htb_find(oc->ma_mep_htbl, hash_key,
(shr_htb_data_t *)&h_data,
1));
/* Clear the hash data memory previously occupied by this endpoint. */
_BCM_OAM_HASH_DATA_CLEAR(h_data_p);
}
#else
return (BCM_E_UNAVAIL);
#endif /* INCLUDE_BHH */
}
else {
return (BCM_E_PARAM);
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_group_endpoints_destroy
* Purpose:
* Delete all endpoints associated with a group and free all
* resources allocated by these endpoints.
* Parameters:
* unit - (IN) BCM device number
* g_info_p - (IN) Pointer to group information
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_group_endpoints_destroy(int unit,
_bcm_oam_group_data_t *g_info_p)
{
bcm_oam_endpoint_t ep_id; /* Endpoint ID. */
_bcm_oam_ep_list_t *cur; /* Pointer to endpoint list node. */
int rv; /* Operation return status. */
if (NULL == g_info_p) {
return (BCM_E_INTERNAL);
}
/* Get the endpoint list head pointer. */
cur = *(g_info_p->ep_list);
if (NULL == cur) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: No endpoints in group.\n")));
return (BCM_E_NONE);
}
while (NULL != cur) {
ep_id = cur->ep_data_p->ep_id;
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: GID=%d EP:%d.\n"),
cur->ep_data_p->group_index, ep_id));
cur = cur->next;
rv = _bcm_kt2_oam_endpoint_destroy(unit, ep_id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint destroy (EP=%d) - "
"%s.\n"), ep_id, bcm_errmsg(rv)));
return (rv);
}
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_trunk_resolve
* Purpose:
* Resolve a trunk value to SGLP, DGLP value and Tx gport.
* Parameters:
* unit - (IN) BCM device number
* trunk_id - (IN) Pointer to trunk id
* trunk_index - (IN) Index in the trunk array to Tx pkts
* src_glp - (IN/OUT) Pointer to source generic logical port value.
* dst_glp - (IN/OUT) Pointer to destination generic logical port value.
* src_pp_port - (IN/OUT) Pointer to source pp port value.
* dst_pp_port - (IN/OUT) Pointer to destination pp port value.
* tx_gport - (OUT) Gport on which PDUs need to be sent
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_trunk_resolve(int unit, bcm_trunk_t trunk_id, int trunk_index,
bcm_port_t *src_pp_port, bcm_port_t *dst_pp_port,
uint32 *src_glp, uint32 *dst_glp, bcm_gport_t *tx_gport)
{
bcm_port_t port_id = 0;
bcm_port_t port = 0;
bcm_module_t module_id = 0;
int local_id;
bcm_trunk_member_t *member_array = NULL;
int member_count = 0;
int local_member_count = 0;
bcm_trunk_info_t trunk_info;
bcm_trunk_t tid = BCM_TRUNK_INVALID;
int rv = BCM_E_NONE;
if (BCM_TRUNK_INVALID == trunk_id) {
/* Has to be a valid Trunk. */
return (BCM_E_PARAM);
}
/* Construct Hw SGLP value. */
_BCM_KT2_OAM_MOD_PORT_TO_GLP(unit, module_id, port_id, 1,
trunk_id, *src_glp);
/* Get a member of the trunk belonging to this module */
if (BCM_SUCCESS(_bcm_esw_trunk_local_members_get(unit, trunk_id, 1,
&port_id,
&local_member_count))) {
BCM_IF_ERROR_RETURN(_bcm_kt2_pp_port_to_modport_get(unit, port_id,
&module_id, &port));
*src_pp_port = port_id;
}
/* Get count of ports in this trunk. */
BCM_IF_ERROR_RETURN(bcm_esw_trunk_get(unit, trunk_id, NULL, 0,
NULL, &member_count));
if (0 == member_count) {
/* No members have been added to the trunk group yet */
return BCM_E_PARAM;
}
_BCM_OAM_ALLOC(member_array, bcm_trunk_member_t,
sizeof(bcm_trunk_member_t) * member_count, "Trunk info");
if (NULL == member_array) {
return (BCM_E_MEMORY);
}
/* Get Trunk Info for the Trunk ID. */
rv = bcm_esw_trunk_get(unit, trunk_id, &trunk_info,
member_count, member_array, &member_count);
if (BCM_FAILURE(rv)) {
sal_free(member_array);
return (rv);
}
/* Check if the input trunk_index is valid. */
if (trunk_index >= member_count) {
sal_free(member_array);
return BCM_E_PARAM;
}
/* Get the Modid and Port value using Trunk Index value. */
rv = _bcm_esw_gport_resolve(unit, member_array[trunk_index].gport,
&module_id, &port_id, &tid, &local_id);
if (BCM_FAILURE(rv)) {
sal_free(member_array);
return (rv);
}
sal_free(member_array);
/* Construct Hw DGLP value. */
_BCM_KT2_OAM_MOD_PORT_TO_GLP(unit, module_id, port_id, 0, -1, *dst_glp);
/* Get HW PP port */
BCM_IF_ERROR_RETURN(_bcm_kt2_modport_to_pp_port_get(unit, module_id,
port_id, dst_pp_port));
*dst_glp = *src_glp;
*tx_gport = member_array[trunk_index].gport;
return BCM_E_NONE;
}
/*
* Function:
* _bcm_kt2_oam_endpoint_gport_resolve
* Purpose:
* Resolve an endpoint GPORT value to SGLP and DGLP value.
* Parameters:
* unit - (IN) BCM device number
* ep_info_p - (IN/OUT) Pointer to endpoint information.
* src_glp - (IN/OUT) Pointer to source generic logical port value.
* dst_glp - (IN/OUT) Pointer to destination generic logical port value.
* src_pp_port- (IN/OUT) Pointer to source pp port value.
* dst_pp_port- (IN/OUT) Pointer to destination pp port value.
* svp - (IN/OUT) Pointer to VP value
* trunk_id - (IN/OUT) Pointer to trunk id
* tx_gport - (OUT) Gport on which PDUs need to be sent
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_endpoint_gport_resolve(int unit, bcm_oam_endpoint_info_t *ep_info_p,
uint32 *src_glp, uint32 *dst_glp,
bcm_port_t *src_pp_port,
bcm_port_t *dst_pp_port, uint32 *svp,
bcm_trunk_t *trunk_id, int *is_vp_valid,
bcm_gport_t *tx_gport)
{
bcm_module_t module_id; /* Module ID */
bcm_port_t port_id; /* Port ID. */
bcm_port_t port; /* Port ID. */
int local_id; /* Hardware ID. */
int tx_enabled = 0; /* CCM Tx enabled. */
int local_member_count = 0;
int rv = BCM_E_NONE; /* Return status. */
uint8 glp_valid = 0; /* Logical port valid. */
int is_local = 0;
uint8 is_port_mep = 0;
is_port_mep = ((ep_info_p->vlan == 0) && (ep_info_p->inner_vlan == 0));
/* Get Trunk ID or (Modid + Port) value from Gport */
BCM_IF_ERROR_RETURN
(_bcm_esw_gport_resolve(unit, ep_info_p->gport, &module_id,
&port_id, trunk_id, &local_id));
/* Set CCM endpoint Tx status only for local endpoints. */
if (!(ep_info_p->flags & BCM_OAM_ENDPOINT_REMOTE)) {
tx_enabled
= (ep_info_p->ccm_period
!= BCM_OAM_ENDPOINT_CCM_PERIOD_DISABLED) ? 1 : 0;
}
/*
* If Gport is Trunk type, _bcm_esw_gport_resolve()
* sets trunk_id. Using Trunk ID, get Dst Modid and Port value.
*/
if (BCM_GPORT_IS_TRUNK(ep_info_p->gport)) {
/*
* CCM Tx is enabled on a trunk member port.
* trunk_index value is required to derive the Modid and Port info.
*/
if (1 == tx_enabled && _BCM_OAM_INVALID_INDEX == ep_info_p->trunk_index) {
/* Invalid Trunk member index passed. */
return (BCM_E_PORT);
}
BCM_IF_ERROR_RETURN(
_bcm_kt2_oam_trunk_resolve(unit, *trunk_id, ep_info_p->trunk_index,
src_pp_port, dst_pp_port,
src_glp, dst_glp, tx_gport));
glp_valid = 1;
}
/*
* Application can resolve the trunk and pass the desginated
* port as Gport value. Check if the Gport belongs to a trunk.
*/
if ((BCM_TRUNK_INVALID == (*trunk_id))
&& (BCM_GPORT_IS_MODPORT(ep_info_p->gport)
|| BCM_GPORT_IS_LOCAL(ep_info_p->gport))) {
/* When Gport is ModPort or Port type, _bcm_esw_gport_resolve()
* returns Modid and Port value. Use these values to make the DGLP
* value.
*/
_BCM_KT2_OAM_MOD_PORT_TO_GLP(unit, module_id, port_id, 0, -1,
*dst_glp);
/* get destination PP port */
BCM_IF_ERROR_RETURN
(_bcm_kt2_modport_to_pp_port_get(unit, module_id, port_id,
dst_pp_port));
/* Use the Modid, Port value and determine if the port
* belongs to a Trunk.
*/
if (!is_port_mep) {
/* Dont find all other trunk members if it is a port mep */
rv = bcm_esw_trunk_find(unit, module_id, port_id, trunk_id);
}
if (BCM_SUCCESS(rv) && !is_port_mep) {
/*
* Port is member of a valid trunk.
* Now create the SGLP value from Trunk ID.
*/
/* Get a member of the trunk belonging to this module */
if (BCM_SUCCESS(_bcm_esw_trunk_local_members_get(unit, *trunk_id, 1,
&port_id,
&local_member_count))) {
BCM_IF_ERROR_RETURN
(_bcm_kt2_pp_port_to_modport_get(unit, port_id,
&module_id, &port));
/* Get HW PP port */
*src_pp_port = port_id;
_BCM_KT2_OAM_MOD_PORT_TO_GLP(unit, module_id, port, 1,
*trunk_id, *src_glp);
*dst_glp = *src_glp;
} else {
*src_pp_port = *dst_pp_port;
}
} else {
/* Port not a member of trunk. DGLP and SGLP are the same. */
*src_glp = *dst_glp;
*src_pp_port = *dst_pp_port;
}
glp_valid = 1;
*tx_gport = ep_info_p->gport;
}
if ((SOC_GPORT_IS_MIM_PORT(ep_info_p->gport)) ||
(SOC_GPORT_IS_MPLS_PORT(ep_info_p->gport)) ||
(SOC_GPORT_IS_VLAN_PORT(ep_info_p->gport))) {
*svp = local_id;
if(BCM_TRUNK_INVALID == (*trunk_id)){
rv = _bcm_esw_modid_is_local(unit, module_id, &is_local);
if(BCM_SUCCESS(rv) && (is_local)) {
_BCM_KT2_OAM_MOD_PORT_TO_GLP(unit, module_id, port_id, 0,
-1, *dst_glp);
/* get destination PP port */
BCM_IF_ERROR_RETURN(
_bcm_kt2_modport_to_pp_port_get(unit,
module_id, port_id,
dst_pp_port));
*src_glp = *dst_glp;
*src_pp_port = *dst_pp_port;
}
*tx_gport = ep_info_p->gport;
} else {
/*
* CCM Tx is enabled on a trunk member port.
* trunk_index value is required to derive the Modid and Port info.
*/
if ((1 == tx_enabled) &&
(_BCM_OAM_INVALID_INDEX == ep_info_p->trunk_index)) {
/* Invalid Trunk member index passed. */
return (BCM_E_PORT);
}
BCM_IF_ERROR_RETURN(
_bcm_kt2_oam_trunk_resolve(unit, *trunk_id, ep_info_p->trunk_index,
src_pp_port, dst_pp_port,
src_glp, dst_glp, tx_gport));
}
glp_valid = 1;
*is_vp_valid = 1;
}
/* LinkPhy/CoE case */
if ((BCM_GPORT_IS_SUBPORT_PORT(ep_info_p->gport))) {
if ((_BCM_KT2_GPORT_IS_SUBTAG_SUBPORT_PORT(unit, ep_info_p->gport)) ||
(_BCM_KT2_GPORT_IS_LINKPHY_SUBPORT_PORT(unit, ep_info_p->gport))) {
BCM_IF_ERROR_RETURN
(_bcm_kt2_modport_to_pp_port_get(unit, module_id, port_id,
dst_pp_port));
*src_pp_port = *dst_pp_port;
_BCM_KT2_OAM_MOD_PORT_TO_GLP(unit, module_id, port_id, 0, -1,
*dst_glp);
/* Use the Modid, Port value and determine if the port
* belongs to a Trunk.
*/
if (!is_port_mep) {
rv = bcm_esw_trunk_find(unit, module_id, port_id, trunk_id);
}
if (BCM_SUCCESS(rv) && !is_port_mep) {
/*
* Port is member of a valid trunk.
* Now create the SGLP value from Trunk ID.
*/
/* Get a member of the trunk belonging to this module */
if (BCM_SUCCESS(_bcm_esw_trunk_local_members_get(unit,
*trunk_id, 1,
&port_id,
&local_member_count))) {
BCM_IF_ERROR_RETURN
(_bcm_kt2_pp_port_to_modport_get(unit, port_id,
&module_id, &port));
/* Get HW PP port */
*src_pp_port = port_id;
_BCM_KT2_OAM_MOD_PORT_TO_GLP(unit, module_id, port, 1,
*trunk_id, *src_glp);
*dst_glp = *src_glp;
}
} else {
*src_glp = *dst_glp;
}
glp_valid = 1;
BCM_GPORT_MODPORT_SET(*tx_gport, module_id, port_id);
}
}
/*
* At this point, both src_glp and dst_glp should be valid.
* Gport types other than TRUNK, MODPORT or LOCAL are not valid.
*/
if (0 == glp_valid) {
return (BCM_E_PORT);
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_mepid_validate
* Purpose:
* Validate an endpoint MEP id.
* Parameters:
* unit - (IN) BCM device number
* ep_info_p - (IN) Pointer to endpoint information.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_mepid_validate(int unit,
bcm_oam_endpoint_info_t *ep_info_p)
{
_bcm_oam_control_t *oc;
_bcm_oam_group_data_t *group_p; /* Pointer to group list. */
_bcm_oam_ep_list_t *cur; /* Current head node pointer. */
_bcm_oam_hash_data_t *h_data_p = NULL; /* Endpoint hash data pointer. */
/* Get OAM control structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Get the group memory pointer. */
group_p = &oc->group_info[ep_info_p->group];
if (1 != group_p->in_use) {
return BCM_E_NONE;
}
cur = *group_p->ep_list;
/* Traverse the list and delete the matching node. */
while (NULL != cur) {
h_data_p = cur->ep_data_p;
if (NULL == h_data_p) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Hash data is empty\n")));
return (BCM_E_INTERNAL);
}
if (ep_info_p->name == h_data_p->name) {
if (((ep_info_p->flags & BCM_OAM_ENDPOINT_REPLACE) &&
(cur->ep_data_p->ep_id == ep_info_p->id)) ||
((h_data_p->flags & BCM_OAM_ENDPOINT_REMOTE) !=
(ep_info_p->flags & BCM_OAM_ENDPOINT_REMOTE))) {
} else {
return BCM_E_PARAM;
}
}
cur = cur->next;
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_endpoint_params_validate
* Purpose:
* Validate an endpoint parameters.
* Parameters:
* unit - (IN) BCM device number
* oc - (IN) Pointer to OAM control structure.
* hash_key - (IN) Pointer to endpoint hash key value.
* ep_info_p - (IN) Pointer to endpoint information.
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_endpoint_params_validate(int unit,
_bcm_oam_control_t *oc,
_bcm_oam_hash_key_t *hash_key,
bcm_oam_endpoint_info_t *ep_info_p)
{
int rv; /* Operation return status. */
_bcm_oam_hash_data_t h_stored_data;
int remote = 0;
/* Get MEP remote endpoint status. */
remote = (ep_info_p->flags & BCM_OAM_ENDPOINT_REMOTE) ? 1 : 0;
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: "
"_bcm_kt2_oam_endpoint_params_validate.\n")));
/* Endpoint must be 802.1ag/Ethernet OAM type. */
if ((bcmOAMEndpointTypeEthernet != ep_info_p->type)
#if defined(INCLUDE_BHH)
&& /* BHH/Y.1731 OAM type */
(!BHH_EP_TYPE(ep_info_p))
#endif /* INCLUDE_BHH */
)
{
/* Other OAM types are not supported, return error. */
return BCM_E_UNAVAIL;
}
/* Supported MDL level is 0 - 7. */
if ((!remote) &&
((ep_info_p->level < 0) || (ep_info_p->level > _BCM_OAM_EP_LEVEL_MAX))) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EP Level should be in the range(0-%d).\n"),
_BCM_OAM_EP_LEVEL_MAX));
return (BCM_E_PARAM);
}
/* Supported MEPID EP Name range is 1 - 8191, Skipping range check for MIP */
if (((ep_info_p->name < _BCM_OAM_EP_NAME_MIN)
|| (ep_info_p->name > _BCM_OAM_EP_NAME_MAX)) &&
!(ep_info_p->flags & BCM_OAM_ENDPOINT_INTERMEDIATE)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MEP Name should be in the range(%d-%d).\n"),
_BCM_OAM_EP_NAME_MIN, _BCM_OAM_EP_NAME_MAX));
return (BCM_E_PARAM);
}
/*
* Check and return error if invalid flag bits are set for remote
* endpoint.
*/
if ((ep_info_p->flags & BCM_OAM_ENDPOINT_REMOTE)
&& (ep_info_p->flags & _BCM_OAM_REMOTE_EP_INVALID_FLAGS_MASK)) {
return (BCM_E_PARAM);
}
/*
* Check and return error if MEPID is not unique within the group
*/
if(_bcm_kt2_oam_mepid_validate(unit, ep_info_p)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MEPID:%x passed is not unique in group %x\n"),
ep_info_p->name, ep_info_p->group));
return (BCM_E_EXISTS);
}
/* For replace operation, endpoint ID is required. */
if ((ep_info_p->flags & BCM_OAM_ENDPOINT_REPLACE)
&& !(ep_info_p->flags & BCM_OAM_ENDPOINT_WITH_ID)) {
return (BCM_E_PARAM);
}
/* Port based MEP is supported only for down MEP */
if ((0 == ep_info_p->vlan) && (0 == ep_info_p->inner_vlan) &&
(ep_info_p->flags & BCM_OAM_ENDPOINT_UP_FACING) && (!remote)) {
return (BCM_E_PARAM);
}
/* If it is a Port + CVlan based MEP and the
* CVlan (inner_vlan) is 0, return E_PARAM
*/
if ((0 == ep_info_p->inner_vlan) &&
(ep_info_p->flags & BCM_OAM_ENDPOINT_MATCH_INNER_VLAN)) {
return (BCM_E_PARAM);
}
/* If it is a Port + S + CVlan based MEP and one of SVlan(vlan)
* or CVlan (inner_vlan) is 0, return E_PARAM
*/
if (((0 == ep_info_p->inner_vlan) || (0 == ep_info_p->vlan)) &&
(ep_info_p->flags & BCM_OAM_ENDPOINT_MATCH_OUTER_AND_INNER_VLAN)) {
return (BCM_E_PARAM);
}
/* Validate endpoint index value. */
if (ep_info_p->flags & BCM_OAM_ENDPOINT_WITH_ID) {
_BCM_OAM_EP_INDEX_VALIDATE(ep_info_p->id);
}
/* MIP's are not created with non-zero ccm period */
if ((ep_info_p->flags & BCM_OAM_ENDPOINT_INTERMEDIATE) &&
(ep_info_p->ccm_period != BCM_OAM_ENDPOINT_CCM_PERIOD_DISABLED)) {
return (BCM_E_PARAM);
}
/* Validate endpoint group id. */
_BCM_OAM_GROUP_INDEX_VALIDATE(ep_info_p->group);
rv = shr_idxres_list_elem_state(oc->group_pool, ep_info_p->group);
if (BCM_E_EXISTS != rv) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group (GID:%d) does not exist.\n"),
ep_info_p->group));
return (BCM_E_PARAM);
}
if(soc_feature(unit, soc_feature_bhh)) {
#if defined(INCLUDE_BHH)
/*
* BHH can have multiple LSPs/endpoint on same port/MDL
* BHH does not have h/w support. So, return here
*/
if ((bcmOAMEndpointTypeBHHMPLS == ep_info_p->type) ||
(bcmOAMEndpointTypeBHHMPLSVccv == ep_info_p->type)) {
return (BCM_E_NONE);
}
#endif
}
#if defined(KEY_PRINT)
_bcm_oam_hash_key_print(hash_key);
#endif
/*
* Lookup using hash key value.
* Last param value '0' specifies keep the match entry.
* Value '1' would mean remove the entry from the table.
* Matched Params:
* Group Name + Group ID + Endpoint Name + VLAN + MDL + Gport.
*/
rv = shr_htb_find(oc->ma_mep_htbl, *hash_key,
(shr_htb_data_t *)&h_stored_data, 0);
if (BCM_SUCCESS(rv)
&& !(ep_info_p->flags & BCM_OAM_ENDPOINT_REPLACE)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint ID=%d %s.\n"),
ep_info_p->id, bcm_errmsg(BCM_E_EXISTS)));
/* Endpoint must not be in use expect for Replace operation. */
return (BCM_E_EXISTS);
} else {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: Endpoint ID=%d Available. %s.\n"),
ep_info_p->id, bcm_errmsg(rv)));
}
return (BCM_E_NONE);
}
typedef struct _bcm_oam_flexible_oam_domain_vlan_ctrl_type1_s {
uint32 start_index;
uint32 end_index;
soc_field_t tag_source;
soc_field_t data_processing;
soc_field_t oam_processing;
} _bcm_oam_flexible_oam_domain_c_vlan_ctrl_type1_t;
typedef struct _bcm_oam_flexible_oam_domain_vlan_ctrl_type2_s {
uint32 start_index;
uint32 no_entries;
uint32 increment;
soc_field_t tag_source_1;
soc_field_t tag_source_2;
soc_field_t data_processing;
soc_field_t oam_processing;
} _bcm_oam_flexible_oam_domain_vlan_ctrl_type2_t;
/*
* Function:
* _bcm_kt2_oam_ing_flexible_oam_domain_ctrl_set
* Purpose:
* Set the default values for ingress flexible oam domain control
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
* Index to the ing flexible domain control table is
* index[5] = pars_inner_pri_tagged;
* index[4] = pars_inner_tagged;
* index[3] = pars_outer_tagged;
* index[2] = vxlt_inner_tagged;
* index[1] = vxlt_outer_tagged;
* index[0] = svp_valid;
*/
STATIC int
_bcm_kt2_oam_ing_flexible_oam_domain_ctrl_set(int unit)
{
int rv = BCM_E_NONE;
ing_oam_flexible_domain_control_entry_t *ent_buf;
ing_oam_flexible_domain_control_entry_t *ent;
int max_index = 0;
int index = 0;
int end_index = 0;
int loop_index = 0;
int entry_mem_size = 0;
int entry_index = 0;
uint32 action_set = 1;
_bcm_oam_flexible_oam_domain_c_vlan_ctrl_type1_t c_vlan_ctrl_info[] = {
/* Set values for C VLAN MEP pre-VXLT actions */
/* Action for incoming untagged packets */
{ 4, 7, CVLAN_DOMAIN_PRE_VXLT_CVLAN_TAG_SOURCEf,
CVLAN_DOMAIN_PRE_VXLT_DATA_PROCESSING_ENABLEf,
CVLAN_DOMAIN_PRE_VXLT_OAM_PROCESSING_ENABLEf },
/* Action for incoming single inner tagged but not
priority tagged packets */
{ 16, 23, CVLAN_DOMAIN_PRE_VXLT_CVLAN_TAG_SOURCEf,
CVLAN_DOMAIN_PRE_VXLT_DATA_PROCESSING_ENABLEf,
CVLAN_DOMAIN_PRE_VXLT_OAM_PROCESSING_ENABLEf },
/* Action for incoming single inner priority tagged packets */
{ 52, 55, CVLAN_DOMAIN_PRE_VXLT_CVLAN_TAG_SOURCEf,
CVLAN_DOMAIN_PRE_VXLT_DATA_PROCESSING_ENABLEf,
CVLAN_DOMAIN_PRE_VXLT_OAM_PROCESSING_ENABLEf },
/* Set values for C VLAN MEP post-VXLT actions */
/* Action for incoming untagged packets */
{ 4, 7, CVLAN_DOMAIN_POST_VXLT_CVLAN_TAG_SOURCEf,
CVLAN_DOMAIN_POST_VXLT_DATA_PROCESSING_ENABLEf,
CVLAN_DOMAIN_POST_VXLT_OAM_PROCESSING_ENABLEf },
/* Action for incoming single inner tagged but not
priority tagged packets */
{ 20, 23, CVLAN_DOMAIN_POST_VXLT_CVLAN_TAG_SOURCEf,
CVLAN_DOMAIN_POST_VXLT_DATA_PROCESSING_ENABLEf,
CVLAN_DOMAIN_POST_VXLT_OAM_PROCESSING_ENABLEf },
/* Action for incoming single inner priority tagged packets */
{ 52, 55, CVLAN_DOMAIN_POST_VXLT_CVLAN_TAG_SOURCEf,
CVLAN_DOMAIN_POST_VXLT_DATA_PROCESSING_ENABLEf,
CVLAN_DOMAIN_POST_VXLT_OAM_PROCESSING_ENABLEf },
{ 0 } /* table terminator */
};
_bcm_oam_flexible_oam_domain_vlan_ctrl_type2_t s_vlan_ctrl_info[] = {
/* Set values for S VLAN MEP actions */
/* Action for incoming packet with single outer tagged after
VXLT */
{ 2, 2, 8, SVLAN_DOMAIN_SVLAN_TAG_SOURCEf, 0,
SVLAN_DOMAIN_DATA_PROCESSING_ENABLEf,
SVLAN_DOMAIN_OAM_PROCESSING_ENABLEf},
/* Action for incoming packet with double tag after VXLT */
{ 6, 2, 8, SVLAN_DOMAIN_SVLAN_TAG_SOURCEf, 0,
SVLAN_DOMAIN_DATA_PROCESSING_ENABLEf,
SVLAN_DOMAIN_OAM_PROCESSING_ENABLEf },
/* Set values for S+C VLAN MEP actions */
/* Action for incoming packet with double tag after VXLT */
{ 6, 2, 8, SVLAN_CVLAN_DOMAIN_SVLAN_TAG_SOURCEf,
SVLAN_CVLAN_DOMAIN_CVLAN_TAG_SOURCEf,
SVLAN_CVLAN_DOMAIN_DATA_PROCESSING_ENABLEf,
SVLAN_CVLAN_DOMAIN_OAM_PROCESSING_ENABLEf },
{ 6, 2, 8, 0, 0,
SVLAN_DOMAIN_DATA_PROCESSING_ENABLEf,
SVLAN_DOMAIN_OAM_PROCESSING_ENABLEf },
{ 6, 2, 8, 0, 0,
CVLAN_DOMAIN_POST_VXLT_OAM_PROCESSING_ENABLEf,
CVLAN_DOMAIN_POST_VXLT_DATA_PROCESSING_ENABLEf },
{ 6, 2, 8, 0,
0,
CVLAN_DOMAIN_PRE_VXLT_OAM_PROCESSING_ENABLEf,
CVLAN_DOMAIN_PRE_VXLT_DATA_PROCESSING_ENABLEf },
{ 0, 0 } /* table terminator */
};
_bcm_oam_flexible_oam_domain_c_vlan_ctrl_type1_t *ctrl_info;
_bcm_oam_flexible_oam_domain_vlan_ctrl_type2_t *s_ctrl_info;
/* Read the table entries into the buffer. */
max_index = soc_mem_index_max(unit, ING_OAM_FLEXIBLE_DOMAIN_CONTROLm);
entry_mem_size = sizeof(ing_oam_flexible_domain_control_entry_t);
/* Allocate buffer to store the DMAed table entries. */
ent_buf = soc_cm_salloc(unit, entry_mem_size * (max_index + 1),
"OAM flexible domain control table entry buffer");
if (NULL == ent_buf) {
return (BCM_E_MEMORY);
}
/* Initialize the entry buffer. */
sal_memset(ent_buf, 0, sizeof(entry_mem_size) * (max_index + 1));
/* Set action for C vlan */
for (index = 0; ;index++) {
ctrl_info = &c_vlan_ctrl_info[index];
if(ctrl_info->start_index == 0) {
/* End of table */
break;
}
for (entry_index = ctrl_info->start_index;
entry_index <= ctrl_info->end_index; entry_index++) {
ent = soc_mem_table_idx_to_pointer
(unit, ING_OAM_FLEXIBLE_DOMAIN_CONTROLm,
ing_oam_flexible_domain_control_entry_t *,
ent_buf, entry_index);
soc_mem_field_set(unit, ING_OAM_FLEXIBLE_DOMAIN_CONTROLm,
(uint32 *)ent, ctrl_info->tag_source, &action_set);
soc_mem_field_set(unit, ING_OAM_FLEXIBLE_DOMAIN_CONTROLm,
(uint32 *)ent, ctrl_info->data_processing, &action_set);
soc_mem_field_set(unit, ING_OAM_FLEXIBLE_DOMAIN_CONTROLm,
(uint32 *)ent, ctrl_info->oam_processing, &action_set);
}
}
/* Set action for S and S+C vlan */
for (index = 0; ;index++) {
s_ctrl_info = &s_vlan_ctrl_info[index];
if(s_ctrl_info->start_index == 0) {
/* End of table */
break;
}
entry_index = s_ctrl_info->start_index;
loop_index = s_ctrl_info->start_index;
end_index = entry_index + s_ctrl_info->no_entries;
while (entry_index <= max_index) {
for (; loop_index < end_index; loop_index++) {
ent = soc_mem_table_idx_to_pointer(unit,
ING_OAM_FLEXIBLE_DOMAIN_CONTROLm,
ing_oam_flexible_domain_control_entry_t *,
ent_buf, loop_index);
if (s_ctrl_info->tag_source_1 != 0) {
soc_mem_field_set(unit, ING_OAM_FLEXIBLE_DOMAIN_CONTROLm,
(uint32 *)ent, s_ctrl_info->tag_source_1, &action_set);
}
if (s_ctrl_info->tag_source_2 != 0) {
soc_mem_field_set(unit, ING_OAM_FLEXIBLE_DOMAIN_CONTROLm,
(uint32 *)ent, s_ctrl_info->tag_source_2, &action_set);
}
soc_mem_field_set(unit, ING_OAM_FLEXIBLE_DOMAIN_CONTROLm,
(uint32 *)ent, s_ctrl_info->data_processing, &action_set);
soc_mem_field_set(unit, ING_OAM_FLEXIBLE_DOMAIN_CONTROLm,
(uint32 *)ent, s_ctrl_info->oam_processing, &action_set);
}
entry_index += s_ctrl_info->increment;
loop_index = entry_index;
end_index = (entry_index + s_ctrl_info->no_entries);
}
}
rv = soc_mem_write_range(unit, ING_OAM_FLEXIBLE_DOMAIN_CONTROLm,
MEM_BLOCK_ALL, 0, max_index, ent_buf);
if (BCM_FAILURE(rv)) {
if (ent_buf) {
soc_cm_sfree(unit, ent_buf);
}
return rv;
}
if (ent_buf) {
soc_cm_sfree(unit, ent_buf);
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_egr_flexible_oam_domain_ctrl_set
* Purpose:
* Set the default values for egress flexible oam domain control
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_egr_flexible_oam_domain_ctrl_set(int unit)
{
int rv = BCM_E_NONE;
egr_oam_flexible_domain_control_entry_t *egr_ent_buf;
egr_oam_flexible_domain_control_entry_t *ent;
int max_index = 0;
int index = 0;
int end_index = 0;
int loop_index = 0;
int entry_mem_size = 0;
int entry_index = 0;
uint32 action_set = 1;
_bcm_oam_flexible_oam_domain_vlan_ctrl_type2_t vlan_ctrl_info[] = {
/* CVLAN MEP before VXLT */
{ 16, 2, 4, 0, 0,
CVLAN_DOMAIN_PRE_VXLT_DATA_PROCESSING_ENABLEf,
CVLAN_DOMAIN_PRE_VXLT_OAM_PROCESSING_ENABLEf },
/* CVLAN MEP after VXLT */
{ 4, 2, 8, CVLAN_DOMAIN_POST_VXLT_CVLAN_TAG_SOURCEf, 0,
CVLAN_DOMAIN_POST_VXLT_DATA_PROCESSING_ENABLEf,
CVLAN_DOMAIN_POST_VXLT_OAM_PROCESSING_ENABLEf },
/* SVLAN MEP */
{ 8, 8, 16, 0, 0,
SVLAN_DOMAIN_DATA_PROCESSING_ENABLEf,
SVLAN_DOMAIN_OAM_PROCESSING_ENABLEf },
/* S+C VLAN MEP */
{ 14, 2, 18, SVLAN_CVLAN_DOMAIN_CVLAN_TAG_SOURCEf, 0,
SVLAN_CVLAN_DOMAIN_DATA_PROCESSING_ENABLEf,
SVLAN_CVLAN_DOMAIN_OAM_PROCESSING_ENABLEf },
{ 24, 8, 8, 0, 0,
SVLAN_CVLAN_DOMAIN_DATA_PROCESSING_ENABLEf,
SVLAN_CVLAN_DOMAIN_OAM_PROCESSING_ENABLEf },
{ 24, 8, 8, 0, 0,
CVLAN_DOMAIN_PRE_VXLT_DATA_PROCESSING_ENABLEf,
CVLAN_DOMAIN_PRE_VXLT_OAM_PROCESSING_ENABLEf },
{ 26, 2, 2, 0, 0,
SVLAN_DOMAIN_DATA_PROCESSING_ENABLEf,
SVLAN_DOMAIN_OAM_PROCESSING_ENABLEf },
{ 30, 2, 2, 0, 0,
SVLAN_DOMAIN_DATA_PROCESSING_ENABLEf,
SVLAN_DOMAIN_OAM_PROCESSING_ENABLEf },
{ 30, 2, 2, CVLAN_DOMAIN_POST_VXLT_CVLAN_TAG_SOURCEf, 0,
CVLAN_DOMAIN_POST_VXLT_DATA_PROCESSING_ENABLEf,
CVLAN_DOMAIN_POST_VXLT_OAM_PROCESSING_ENABLEf },
{ 0, 0 } /* table terminator */
};
_bcm_oam_flexible_oam_domain_vlan_ctrl_type2_t *ctrl_info;
/* Read the table entries into the buffer. */
max_index = soc_mem_index_max(unit, EGR_OAM_FLEXIBLE_DOMAIN_CONTROLm);
entry_mem_size = sizeof(egr_oam_flexible_domain_control_entry_t);
/* Allocate buffer to store the DMAed table entries. */
egr_ent_buf = soc_cm_salloc(unit, entry_mem_size * (max_index + 1),
"OAM flexible domain control table entry buffer");
if (NULL == egr_ent_buf) {
return (BCM_E_MEMORY);
}
/* Initialize the entry buffer. */
sal_memset(egr_ent_buf, 0, sizeof(entry_mem_size) * (max_index + 1));
/* Set action for C, S and S+C vlan */
for (index = 0; ;index++) {
ctrl_info = &vlan_ctrl_info[index];
if(ctrl_info->start_index == 0) {
/* End of table */
break;
}
entry_index = ctrl_info->start_index;
loop_index = ctrl_info->start_index;
end_index = entry_index + ctrl_info->no_entries;
while (entry_index <= max_index) {
for (; loop_index < end_index; loop_index++) {
ent = soc_mem_table_idx_to_pointer
(unit, EGR_OAM_FLEXIBLE_DOMAIN_CONTROLm,
egr_oam_flexible_domain_control_entry_t *,
egr_ent_buf, loop_index);
if (ctrl_info->tag_source_1 != 0) {
soc_mem_field_set(unit, EGR_OAM_FLEXIBLE_DOMAIN_CONTROLm,
(uint32 *)ent, ctrl_info->tag_source_1, &action_set);
}
if (ctrl_info->tag_source_2 != 0) {
soc_mem_field_set(unit, EGR_OAM_FLEXIBLE_DOMAIN_CONTROLm,
(uint32 *)ent, ctrl_info->tag_source_2, &action_set);
}
soc_mem_field_set(unit, EGR_OAM_FLEXIBLE_DOMAIN_CONTROLm,
(uint32 *)ent, ctrl_info->data_processing, &action_set);
soc_mem_field_set(unit, EGR_OAM_FLEXIBLE_DOMAIN_CONTROLm,
(uint32 *)ent, ctrl_info->oam_processing, &action_set);
}
entry_index += ctrl_info->increment;
loop_index = entry_index;
end_index = (entry_index + ctrl_info->no_entries);
}
}
rv = soc_mem_write_range(unit, EGR_OAM_FLEXIBLE_DOMAIN_CONTROLm,
MEM_BLOCK_ALL, 0, max_index, egr_ent_buf);
if (BCM_FAILURE(rv)) {
if (egr_ent_buf) {
soc_cm_sfree(unit, egr_ent_buf);
}
return rv;
}
if (egr_ent_buf) {
soc_cm_sfree(unit, egr_ent_buf);
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_flexible_oam_domain_ctrl_set
* Purpose:
* Set the default values for flexible oam domain control
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_flexible_oam_domain_ctrl_set(int unit)
{
int rv = BCM_E_NONE;
/* Set ingress flexible domain control */
rv = _bcm_kt2_oam_ing_flexible_oam_domain_ctrl_set(unit);
if (BCM_FAILURE(rv)) {
return (rv);
}
/* Set egress flexible domain control */
rv = _bcm_kt2_oam_egr_flexible_oam_domain_ctrl_set(unit);
if (BCM_FAILURE(rv)) {
return (rv);
}
return rv;
}
typedef struct _bcm_oam_ing_flexible_drop_ctrl_s {
soc_reg_t drop_ctrl_reg;
uint32 vfp_drop_ctrl;
uint32 olp_drop_ctrl;
uint32 spst_drop_ctrl;
uint32 tag_drop_ctrl;
uint32 cvlan_bfr_vxlt_drop_ctrl;
uint32 cvlan_after_vxlt_drop_ctrl;
uint32 e2e_drop_ctrl;
uint32 vlan_drop_ctrl;
uint32 disc_drop_ctrl;
} _bcm_oam_ing_flexible_drop_ctrl_t;
typedef struct _bcm_oam_egr_flexible_drop_ctrl_s {
soc_reg_t drop_ctrl_reg;
uint32 egr_drop_ctrl;
uint32 evxlt_payload_drop_ctrl;
uint32 cvlan_after_vxlt_drop_ctrl;
uint32 cvlan_before_vxlt_drop_ctrl;
} _bcm_oam_egr_flexible_drop_ctrl_t;
/*
* Function:
* _bcm_kt2_oam_ing_flexible_drop_ctrl_set
* Purpose:
* Set the default values for flexible ingress drop control
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_ing_flexible_drop_ctrl_set(int unit)
{
uint32 en_rval = 0;
int index = 0;
_bcm_oam_ing_flexible_drop_ctrl_t *flex_drop_info;
_bcm_oam_ing_flexible_drop_ctrl_t ing_flex_drop[] = {
{ OAM_PORT_INTERFACE_DROP_CONTROLr, 0, 1, 0, 0, 0, 0, 1, 0, 0xe01c1 },
{ OAM_C_INTERFACE_DROP_CONTROLr, 1, 1, 0, 0, 0, 1, 1, 0, 0xe01fd },
{ OAM_S_INTERFACE_DROP_CONTROLr, 1, 1, 0, 1, 1, 1, 1, 5, 0xe01fd },
{ OAM_S_C_INTERFACE_DROP_CONTROLr, 1, 1, 1, 1, 1, 1, 1, 7, 0xe01fd },
{ OAM_SVP_INTERFACE_DROP_CONTROLr, 1, 1, 1, 1, 1, 1, 1, 7, 0xe01fd },
{ 0 }, /* End of table */
};
for (index = 0; ;index++) {
flex_drop_info = &ing_flex_drop[index];
if(flex_drop_info->drop_ctrl_reg == 0) {
/* End of table */
break;
}
SOC_IF_ERROR_RETURN(soc_reg32_get(unit, flex_drop_info->drop_ctrl_reg,
REG_PORT_ANY, 0, &en_rval));
soc_reg_field_set(unit, flex_drop_info->drop_ctrl_reg,
&en_rval, VFP_DROP_CTRLf,
flex_drop_info->vfp_drop_ctrl);
soc_reg_field_set(unit, flex_drop_info->drop_ctrl_reg,
&en_rval, OLP_ERROR_DROP_CTRLf,
flex_drop_info->olp_drop_ctrl);
soc_reg_field_set(unit, flex_drop_info->drop_ctrl_reg,
&en_rval, SPST_NOT_IN_FORWARDING_STATE_DROP_CTRLf,
flex_drop_info->spst_drop_ctrl);
soc_reg_field_set(unit, flex_drop_info->drop_ctrl_reg,
&en_rval, DISCARD_TAG_UNTAG_CTRLf,
flex_drop_info->tag_drop_ctrl);
soc_reg_field_set(unit, flex_drop_info->drop_ctrl_reg,
&en_rval,
CVLAN_INTF_BEFORE_VXLT_VXLT_MISS_DROP_CTRLf,
flex_drop_info->cvlan_bfr_vxlt_drop_ctrl);
soc_reg_field_set(unit, flex_drop_info->drop_ctrl_reg,
&en_rval,
CVLAN_INTF_AFTER_VXLT_VXLT_MISS_DROP_CTRLf,
flex_drop_info->cvlan_after_vxlt_drop_ctrl);
soc_reg_field_set(unit, flex_drop_info->drop_ctrl_reg,
&en_rval, ERRORED_E2E_PKT_DROP_CTRLf,
flex_drop_info->e2e_drop_ctrl);
soc_reg_field_set(unit, flex_drop_info->drop_ctrl_reg,
&en_rval, VLAN_DROP_VECTOR_CTRLf,
flex_drop_info->vlan_drop_ctrl);
soc_reg_field_set(unit, flex_drop_info->drop_ctrl_reg,
&en_rval, DISC_DROP_VECTOR_CTRLf,
flex_drop_info->disc_drop_ctrl);
SOC_IF_ERROR_RETURN(soc_reg32_set(unit, flex_drop_info->drop_ctrl_reg,
REG_PORT_ANY, 0, en_rval));
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_egr_flexible_drop_ctrl_set
* Purpose:
* Set the default values for flexible egress drop control
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_egr_flexible_drop_ctrl_set(int unit)
{
uint64 en_rval, set_val;
int index = 0;
_bcm_oam_egr_flexible_drop_ctrl_t *flex_drop_info;
_bcm_oam_egr_flexible_drop_ctrl_t egr_flex_drop[] = {
{ EGR_OAM_PORT_INTERFACE_DROP_CONTROL_64r, 0x1e0787d, 1, 1, 1 },
{ EGR_OAM_C_INTERFACE_DROP_CONTROL_64r, 0x1e0787c, 1, 1, 0 },
{ EGR_OAM_S_INTERFACE_DROP_CONTROL_64r, 0x1e06848, 1, 0, 0 },
{ EGR_OAM_S_C_INTERFACE_DROP_CONTROL_64r, 0x1e06048, 1, 0, 0 },
{ EGR_OAM_DVP_INTERFACE_DROP_CONTROL_64r, 0x1e06048, 1, 0, 0 },
{ 0 }, /* End of table */
};
COMPILER_64_ZERO(en_rval);
for (index = 0; ;index++) {
flex_drop_info = &egr_flex_drop[index];
if(flex_drop_info->drop_ctrl_reg == 0) {
/* End of table */
break;
}
SOC_IF_ERROR_RETURN(soc_reg64_get(unit, flex_drop_info->drop_ctrl_reg,
REG_PORT_ANY, 0, &en_rval));
COMPILER_64_SET(set_val, 0, flex_drop_info->egr_drop_ctrl);
soc_reg64_field_set(unit, flex_drop_info->drop_ctrl_reg,
&en_rval, EGR_DROP_VECTOR_CTRLf,
set_val);
COMPILER_64_SET(set_val, 0, flex_drop_info->evxlt_payload_drop_ctrl);
soc_reg64_field_set(unit, flex_drop_info->drop_ctrl_reg,
&en_rval,
EVXLT_APPLIED_TO_PAYLOAD_VXLT_MISS_DROP_CTRLf,
set_val);
COMPILER_64_SET(set_val, 0, flex_drop_info->cvlan_after_vxlt_drop_ctrl);
soc_reg64_field_set(unit, flex_drop_info->drop_ctrl_reg,
&en_rval,
EVXLT_APPLIED_TO_OUTER_L2_CVLAN_INTF_AFTER_VXLT_VXLT_MISS_DROP_CTRLf,
set_val);
COMPILER_64_SET(set_val, 0, flex_drop_info->cvlan_before_vxlt_drop_ctrl);
soc_reg64_field_set(unit, flex_drop_info->drop_ctrl_reg,
&en_rval,
EVXLT_APPLIED_TO_OUTER_L2_CVLAN_INTF_BEFORE_VXLT_VXLT_MISS_DROP_CTRLf,
set_val);
SOC_IF_ERROR_RETURN(soc_reg64_set(unit, flex_drop_info->drop_ctrl_reg,
REG_PORT_ANY, 0, en_rval));
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_flexible_drop_ctrl_set
* Purpose:
* Set the default values for flexible ingress and egress drop control
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_flexible_drop_ctrl_set(int unit)
{
int rv = BCM_E_NONE;
/* Set ingress flexible drop control */
rv = _bcm_kt2_oam_ing_flexible_drop_ctrl_set(unit);
if (BCM_FAILURE(rv)) {
return (rv);
}
/* Set egress flexible drop control */
rv = _bcm_kt2_oam_egr_flexible_drop_ctrl_set(unit);
if (BCM_FAILURE(rv)) {
return (rv);
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_s_intf_passive_proc_ctrl_set
* Purpose:
* Set the default values for s-interface passive processing control
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_s_intf_passive_proc_ctrl_set(int unit)
{
uint32 en_rval = 0;
/* Set ingress s_intf passive processing control */
SOC_IF_ERROR_RETURN(
soc_reg32_get(unit, OAM_S_INTERFACE_PASSIVE_PROCESSING_CONTROLr,
REG_PORT_ANY, 0, &en_rval));
soc_reg_field_set(unit, OAM_S_INTERFACE_PASSIVE_PROCESSING_CONTROLr,
&en_rval, SPST_NOT_IN_FORWARDING_STATE_DROP_CTRLf,
0x1);
/* Set ENIFILTER_DROP, INVALID_TPID_DROP and INVALID_VLAN_DROP */
soc_reg_field_set(unit, OAM_S_INTERFACE_PASSIVE_PROCESSING_CONTROLr,
&en_rval, VLAN_DROP_VECTOR_CTRLf,
0x7);
SOC_IF_ERROR_RETURN(
soc_reg32_set(unit, OAM_S_INTERFACE_PASSIVE_PROCESSING_CONTROLr,
REG_PORT_ANY, 0, en_rval));
en_rval = 0;
/* Set egress s_intf passive processing control */
SOC_IF_ERROR_RETURN(
soc_reg32_get(unit, EGR_OAM_S_INTERFACE_PASSIVE_PROCESSING_CONTROLr,
REG_PORT_ANY, 0, &en_rval));
/* Set NOT_VLAN_MEMBER_DROP, STG_BLOCK_DROP, STG_DISABLE_DROP
For details - refer to EGR_DROP_VECTOR */
soc_reg_field_set(unit, EGR_OAM_S_INTERFACE_PASSIVE_PROCESSING_CONTROLr,
&en_rval, EGR_DROP_VECTOR_CTRLf,
0x1030);
SOC_IF_ERROR_RETURN(soc_reg32_set(unit,
EGR_OAM_S_INTERFACE_PASSIVE_PROCESSING_CONTROLr,
REG_PORT_ANY, 0, en_rval));
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_hg_olp_enable
* Purpose:
* Enable OLP handling on HG ports
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_hg_olp_enable(int unit)
{
bcm_pbmp_t ports;
bcm_port_t port;
iarb_ing_physical_port_entry_t entry;
BCM_PBMP_ASSIGN(ports, PBMP_PORT_ALL(unit));
PBMP_ITER(ports, port) {
if (IS_HG_PORT(unit, port)) {
SOC_IF_ERROR_RETURN(soc_mem_read(unit, IARB_ING_PHYSICAL_PORTm,
MEM_BLOCK_ANY, port, &entry));
soc_IARB_ING_PHYSICAL_PORTm_field32_set(unit, &entry,
OLP_ENABLEf, 1);
SOC_IF_ERROR_RETURN(soc_mem_write(unit, IARB_ING_PHYSICAL_PORTm,
MEM_BLOCK_ALL, port, &entry));
} else {
soc_IARB_ING_PHYSICAL_PORTm_field32_set(unit, &entry,
OLP_ENABLEf, 0);
}
SOC_IF_ERROR_RETURN(soc_mem_write(unit, IARB_ING_PHYSICAL_PORTm,
MEM_BLOCK_ALL, port, &entry));
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_olp_header_type_mapping_set
* Purpose:
* Set default olp header type mapping
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
* MEP_TYPE COMPRESSED_HDR_TYPE HDR_TYPE HDR_SUBTYPE
*
* ==========================================================================
* Downmep/IFP 3'b000 8'd1(L2_HDR) 8'd0(NULL)
* Downmep/IFP 3'b001 8'd0(OAM_HDR) 8'd2(CCM/BHH-CCM)
* Downmep/IFP 3'b010 8'd0(OAM_HDR) 8'd3(BFD)
* Downmep/IFP 3'b011 8'd0(OAM_HDR) 8'd4(LM/DM)
* Upmep 3'b000 N/A N/A
* Upmep 3'b001 8'd0(OAM_HDR) 8'd5(CCM)
* Upmep 3'b010 N/A N/A
* Upmep 3'b011 8'd0(OAM_HDR) 8'd6(LM/DM)
*/
STATIC int
_bcm_kt2_oam_olp_header_type_mapping_set(int unit)
{
int index = 0;
egr_olp_header_type_mapping_entry_t entry;
for (index = 0; index < kt2_olp_hdr_type_count; index++) {
if (kt2_olp_hdr_type_mapping[index].reuse) {
continue;
}
soc_EGR_OLP_HEADER_TYPE_MAPPINGm_field32_set(unit,
&entry,
HDR_TYPEf,
_BCM_KT2_OLP_OAM_HDR);
soc_EGR_OLP_HEADER_TYPE_MAPPINGm_field32_set(
unit,
&entry,
HDR_SUBTYPEf,
kt2_olp_hdr_type_mapping[index].subtype);
SOC_IF_ERROR_RETURN(
WRITE_EGR_OLP_HEADER_TYPE_MAPPINGm(
unit,
MEM_BLOCK_ALL,
kt2_olp_hdr_type_mapping[index].mem_index,
&entry));
}
/* Program L2 HDR at the first entry */
memset(&entry, 0, sizeof(entry));
soc_EGR_OLP_HEADER_TYPE_MAPPINGm_field32_set(unit,
&entry,
HDR_TYPEf,
_BCM_KT2_OLP_L2_HDR);
SOC_IF_ERROR_RETURN(
WRITE_EGR_OLP_HEADER_TYPE_MAPPINGm(unit,
MEM_BLOCK_ALL,
0,
&entry));
return BCM_E_NONE;
}
/*
* Function: _bcm_kt2_oam_olp_fp_hw_index_get
*
* Purpose:
* Get OLP_HDR_TYPE_COMPRESSED corresponding to subtype
* Parameters:
* unit - (IN) BCM device number
* Returns:
* BCM_E_XXX
*/
int
_bcm_kt2_oam_olp_fp_hw_index_get(int unit,
bcm_field_olp_header_type_t olp_hdr_type,
int *hwindex)
{
int index;
switch (olp_hdr_type) {
/* KT2 does not allow IFP to program UpMEP header type for
* ADD_OLP_HEADER action, as the OLP_HDR_TYPE_COMPRESSED in
* FP_POLICY_TABLE is not wide enough to index Up MEP subtypes
* in EGR_OLP_HEADER_TYPE_MAPPING
*/
case bcmFieldOlpHeaderTypeEthOamUpMepCcm:
case bcmFieldOlpHeaderTypeEthOamUpMepLm:
case bcmFieldOlpHeaderTypeEthOamUpMepDm:
return BCM_E_PARAM;
default:
break;
}
for (index = 0; index < kt2_olp_hdr_type_count; index++) {
if (kt2_olp_hdr_type_mapping[index].field_olp_hdr_type == olp_hdr_type) {
*hwindex = kt2_olp_hdr_type_mapping[index].mem_index;
return BCM_E_NONE;
}
}
return BCM_E_NOT_FOUND;
}
/*
* Function: _bcm_kt2_oam_olp_hw_index_olp_type_get
*
* Purpose:
* Get subtype corresponding to OLP_HDR_TYPE_COMPRESSED
* Parameters:
* unit - (IN) BCM device number
* Returns:
* BCM_E_XXX
*/
int
_bcm_kt2_oam_olp_hw_index_olp_type_get(int unit,
int hwindex,
bcm_field_olp_header_type_t *olp_hdr_type)
{
int index;
for (index = 0; index < kt2_olp_hdr_type_count; index++) {
if (kt2_olp_hdr_type_mapping[index].mem_index == hwindex) {
*olp_hdr_type =
kt2_olp_hdr_type_mapping[hwindex].field_olp_hdr_type;
return BCM_E_NONE;
}
}
return BCM_E_PARAM;
}
/*
* Function:
* _bcm_kt2_oam_drop_ctrl_set
* Purpose:
* Set OAM drop control to not to drop wrong version OAM packets
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_drop_ctrl_set(int unit)
{
uint32 drop_ctrl = 0;
soc_reg_field_set(unit, OAM_DROP_CONTROLr, &drop_ctrl,
IFP_OAM_UNKNOWN_OPCODE_VERSION_DROPf, 0);
SOC_IF_ERROR_RETURN(WRITE_OAM_DROP_CONTROLr(unit, drop_ctrl));
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_lm_cng_cpu_ctrl_set_default
* Purpose:
* Set OAM LM cng and cpu data control
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_lm_cng_cpu_ctrl_set_default(int unit)
{
uint32 rval = 0;
/* Set OAM LM CNG control such that
1. All colored packets are counted,
2. Use LM_COLOR_MODE bit to qualify pre FP CNG for UPMEP TX
counters in IP
3. Use LM_COLOR_MODE bit to qualify post FP CNG for DOWNMEP RX
counters in IP
*/
soc_reg_field_set(unit, OAM_LM_CNG_CONTROLr, &rval,
LM_COLOR_MODEf, 1);
soc_reg_field_set(unit, OAM_LM_CNG_CONTROLr, &rval,
UPMEP_TX_CNG_SOURCEf, 1);
soc_reg_field_set(unit, OAM_LM_CNG_CONTROLr, &rval,
DOWNMEP_RX_CNG_SOURCEf, 0);
SOC_IF_ERROR_RETURN(WRITE_OAM_LM_CNG_CONTROLr(unit, rval));
rval = 0;
/* include all colored packets for LM accounting on egress side */
soc_reg_field_set(unit, EGR_OAM_LM_CNG_CONTROLr, &rval,
LM_COLOR_MODEf, 1);
SOC_IF_ERROR_RETURN(WRITE_EGR_OAM_LM_CNG_CONTROLr(unit, rval));
rval = 0;
/* Set OAM_LM_CPU_DATA_CONTROL to not to count
CPU generated data packets for loss measurement */
soc_reg_field_set(unit, OAM_LM_CPU_DATA_CONTROLr, &rval,
OAM_LCPU_TX_CNT_DISABLEf, 1);
soc_reg_field_set(unit, OAM_LM_CPU_DATA_CONTROLr, &rval,
OAM_LCPU_RX_CNT_DISABLEf, 1);
soc_reg_field_set(unit, OAM_LM_CPU_DATA_CONTROLr, &rval,
OAM_SRCPORT0_TX_CNT_DISABLEf, 1);
soc_reg_field_set(unit, OAM_LM_CPU_DATA_CONTROLr, &rval,
OAM_SRCPORT0_RX_CNT_DISABLEf, 1);
SOC_IF_ERROR_RETURN(WRITE_OAM_LM_CPU_DATA_CONTROLr(unit, rval));
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_lm_cng_cpu_ctrl_set
* Purpose:
* Set OAM LM cng and cpu data control
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_lm_cng_cpu_ctrl_set (int unit, bcm_oam_endpoint_info_t *ep)
{
uint32 rval = 0;
uint32 egr_rval = 0;
int up_mep = 0;
int color_mode = 0;
int cng_source = 0;
int count_pkt = 1;
if (ep->lm_flags) {
/* read LM CNG register */
SOC_IF_ERROR_RETURN(READ_OAM_LM_CNG_CONTROLr(unit, &rval));
SOC_IF_ERROR_RETURN(READ_EGR_OAM_LM_CNG_CONTROLr(unit, &egr_rval));
if (ep->lm_flags & BCM_OAM_LOSS_COUNT_GREEN_AND_YELLOW) {
color_mode = 1;
}
soc_reg_field_set(unit, OAM_LM_CNG_CONTROLr, &rval,
LM_COLOR_MODEf, color_mode);
soc_reg_field_set(unit, EGR_OAM_LM_CNG_CONTROLr, &egr_rval,
LM_COLOR_MODEf, color_mode);
if (ep->flags & BCM_OAM_ENDPOINT_UP_FACING) {
up_mep = 1;
}
if (ep->lm_flags & BCM_OAM_LOSS_COUNT_POST_TRAFFIC_CONDITIONING) {
cng_source = 1;
}
if (up_mep) {
soc_reg_field_set(unit, OAM_LM_CNG_CONTROLr, &rval,
UPMEP_TX_CNG_SOURCEf, cng_source);
} else {
soc_reg_field_set(unit, OAM_LM_CNG_CONTROLr, &rval,
DOWNMEP_RX_CNG_SOURCEf, cng_source);
}
/* write back the registrs */
SOC_IF_ERROR_RETURN(WRITE_OAM_LM_CNG_CONTROLr(unit, rval));
SOC_IF_ERROR_RETURN(WRITE_EGR_OAM_LM_CNG_CONTROLr(unit, egr_rval));
/* Read LM CPU data control */
SOC_IF_ERROR_RETURN(READ_OAM_LM_CPU_DATA_CONTROLr(unit, &rval));
if (ep->lm_flags & BCM_OAM_LOSS_COUNT_CPU_RX_PKT) {
count_pkt = 0;
}
soc_reg_field_set(unit, OAM_LM_CPU_DATA_CONTROLr, &rval,
OAM_LCPU_RX_CNT_DISABLEf, count_pkt);
soc_reg_field_set(unit, OAM_LM_CPU_DATA_CONTROLr, &rval,
OAM_SRCPORT0_RX_CNT_DISABLEf, count_pkt);
if (ep->lm_flags & BCM_OAM_LOSS_COUNT_CPU_TX_PKT) {
count_pkt = 0;
} else {
count_pkt = 1;
}
soc_reg_field_set(unit, OAM_LM_CPU_DATA_CONTROLr, &rval,
OAM_LCPU_TX_CNT_DISABLEf, count_pkt);
soc_reg_field_set(unit, OAM_LM_CPU_DATA_CONTROLr, &rval,
OAM_SRCPORT0_TX_CNT_DISABLEf, count_pkt);
SOC_IF_ERROR_RETURN(WRITE_OAM_LM_CPU_DATA_CONTROLr(unit, rval));
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_olp_magic_port_set
* Purpose:
* Set Magic port used in OLP-XGS communication
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_olp_magic_port_set(int unit)
{
uint64 rval, set_val;
int modid;
COMPILER_64_ZERO(rval);
/* configure modid and the magic port */
BCM_IF_ERROR_RETURN(bcm_esw_stk_my_modid_get(unit, &modid));
SOC_IF_ERROR_RETURN(READ_IARB_OLP_CONFIG_1r(unit, &rval));
COMPILER_64_SET(set_val, 0, modid);
soc_reg64_field_set(unit, IARB_OLP_CONFIG_1r, &rval, MY_MODIDf, set_val);
COMPILER_64_SET(set_val, 0, _BCM_OAM_OLP_COMMUNICATION_PORT);
soc_reg64_field_set(unit, IARB_OLP_CONFIG_1r, &rval, MY_PORT_NUMf, set_val);
SOC_IF_ERROR_RETURN(WRITE_IARB_OLP_CONFIG_1r(unit, rval));
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_macsa_zero_check_disable
* Purpose:
* Disable MACSA Zero check
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_macsa_zero_check_disable(int unit)
{
uint32 rval = 0;
soc_reg_field_set(unit, OAM_CONTROLr, &rval,
MACSA_ZERO_CHECK_ENABLEf, 0);
SOC_IF_ERROR_RETURN(WRITE_OAM_CONTROLr(unit, rval));
soc_reg_field_set(unit, EGR_OAM_CONTROLr, &rval,
MACSA_ZERO_CHECK_ENABLEf, 0);
SOC_IF_ERROR_RETURN(WRITE_EGR_OAM_CONTROLr(unit, rval));
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_endpoint_cleanup
* Purpose:
* Free all the counters and the indexes allocated on endpoint create
* failure
* Parameters:
* unit - (IN) BCM device number
* upmep- (IN) UpMep/DownMep
* hash_key (IN)
* hash_data (IN) Pointer to endpoint hash data
* Retruns:
* BCM_E_XXX
*/
void
_bcm_kt2_oam_endpoint_cleanup(int unit, int upmep,
_bcm_oam_hash_key_t hash_key,
_bcm_oam_hash_data_t *hash_data)
{
_bcm_oam_control_t *oc; /* Pointer to control structure. */
int rv = BCM_E_NONE;
_bcm_oam_hash_data_t h_data_stored; /* Stored hash data. */
uint8 active_mdl = 0;
rv = _bcm_kt2_oam_control_get(unit, &oc);
if (BCM_FAILURE(rv)) {
return;
}
if (upmep) {
/* Delete l3_entry, if already added */
rv = _bcm_kt2_oam_egr_mp_group_entry_destroy(unit, hash_data,
&active_mdl);
/* Update Ingress MP group table(L3_ENTRY) */
rv = _bcm_kt2_oam_l3_entry_destroy(unit, hash_data, &active_mdl);
} else {
/* Update Ingress MP group table(L3_ENTRY) */
rv = _bcm_kt2_oam_l3_entry_destroy(unit, hash_data, &active_mdl);
/* Delete l3_entry, if already added */
rv = _bcm_kt2_oam_egr_mp_group_entry_destroy(unit, hash_data,
&active_mdl);
}
/* return rx & tx counters allocated if any */
rv =_bcm_kt2_oam_free_counter(unit, hash_data);
/* Clear opcode profile entry, if any */
if (hash_data->profile_index != _BCM_OAM_INVALID_INDEX) {
if (upmep) {
rv = soc_profile_mem_delete(unit,
&oc->egr_oam_opcode_control_profile,
hash_data->profile_index);
} else {
rv = soc_profile_mem_delete(unit, &oc->oam_opcode_control_profile,
hash_data->profile_index);
}
}
/* Clear dglp profile, if any */
if (hash_data->dglp1_profile_index != _BCM_OAM_INVALID_INDEX) {
if (upmep) {
rv = soc_profile_mem_delete(unit, &oc->egr_oam_dglp_profile,
hash_data->dglp1_profile_index);
} else {
rv = soc_profile_mem_delete(unit, &oc->ing_oam_dglp_profile,
hash_data->dglp1_profile_index);
}
}
if (hash_data->dglp2_profile_index != _BCM_OAM_INVALID_INDEX) {
if (upmep) {
rv = soc_profile_mem_delete(unit, &oc->egr_oam_dglp_profile,
hash_data->dglp2_profile_index);
} else {
rv = soc_profile_mem_delete(unit, &oc->ing_oam_dglp_profile,
hash_data->dglp2_profile_index);
}
}
/* Clear Service pri map profile, if configured */
if (hash_data->pri_map_index != _BCM_OAM_INVALID_INDEX) {
rv = soc_profile_mem_delete(unit, &oc->ing_service_pri_map,
hash_data->pri_map_index);
}
if (hash_data->egr_pri_map_index != _BCM_OAM_INVALID_INDEX) {
rv = soc_profile_mem_delete(unit, &oc->egr_service_pri_map,
hash_data->egr_pri_map_index);
}
/* Decrement TPID ref count, if already incremented */
if (hash_data->outer_tpid_profile_index != _BCM_OAM_INVALID_INDEX) {
rv = _bcm_kt2_tpid_entry_delete(unit,
hash_data->outer_tpid_profile_index,
BCM_OAM_TPID_TYPE_OUTER);
}
if (hash_data->subport_tpid_profile_index != _BCM_OAM_INVALID_INDEX) {
rv = _bcm_kt2_tpid_entry_delete(unit,
hash_data->subport_tpid_profile_index,
BCM_OAM_TPID_TYPE_SUBPORT);
}
if (hash_data->inner_tpid_profile_index != _BCM_OAM_INVALID_INDEX) {
rv = _bcm_kt2_tpid_entry_delete(unit,
hash_data->inner_tpid_profile_index,
BCM_OAM_TPID_TYPE_INNER);
}
/* It might already have been deleted as well.
* So just log it as debug, in case it fails */
rv = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_delete(unit, hash_data);
if (BCM_FAILURE(rv)) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Remove from ma_idx to ep id map (EP=%d) -"
" %s.\n"), unit, hash_data->ep_id, bcm_errmsg(rv)));
}
#if defined(INCLUDE_BHH)
if((hash_data->type == bcmOAMEndpointTypeBHHMPLS) ||
(hash_data->type == bcmOAMEndpointTypeBHHMPLSVccv)) {
shr_idxres_list_free(oc->bhh_pool,
BCM_OAM_BHH_GET_UKERNEL_EP(hash_data->ep_id));
} else
#endif
{
if (1 == hash_data->is_remote) {
/* If remote endpoint, return index to remp pool */
shr_idxres_list_free(oc->rmep_pool, hash_data->remote_index);
} else {
/* If local endpoint, return index to lmep pool */
shr_idxres_list_free(oc->lmep_pool, hash_data->local_tx_index);
}
/* return index to mep pool */
shr_idxres_list_free(oc->mep_pool, hash_data->ep_id);
}
/* Return entry to hash data entry to free pool. */
shr_htb_find(oc->ma_mep_htbl, hash_key,
(shr_htb_data_t *)&h_data_stored, 1);
/* Clear contents of hash data element. */
_BCM_OAM_HASH_DATA_CLEAR(hash_data);
}
#ifdef BCM_WARM_BOOT_SUPPORT_SW_DUMP
void
_bcm_kt2_oam_group_info_dump(int unit, bcm_oam_group_t group_index,
_bcm_oam_group_data_t *group_p)
{
int rv;
bcm_oam_group_info_t group_info;
/* Initialize the group information structure. */
bcm_oam_group_info_t_init(&group_info);
/* Retrieve group information and set in group_info structure. */
rv = _bcm_kt2_oam_get_group(unit, group_index, group_p, &group_info);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: _bcm_kt2_oam_group_info_dump"
" (GID=%d) - %s.\n"),
unit, group_index, bcm_errmsg(rv)));
return;
}
LOG_CLI((BSL_META_U(unit,"------------------------------------------\r\n")));
LOG_CLI((BSL_META_U(unit,"Group ID = %d details\r\n"), group_index));
LOG_CLI((BSL_META_U(unit,"Group Name = %s\r\n"), group_info.name));
LOG_CLI((BSL_META_U(unit,"Flags = %d\r\n"), group_info.flags));
LOG_CLI((BSL_META_U(unit,"Faults = %d\r\n"), group_info.faults));
LOG_CLI((BSL_META_U(unit,"Persistent_faults = %d\r\n"),
group_info.persistent_faults));
LOG_CLI((BSL_META_U(unit,"clear_persistent_faults = %d\r\n"),
group_info.clear_persistent_faults));
LOG_CLI((BSL_META_U(unit,"lowest_alarm_priority = %d\r\n"),
group_info.lowest_alarm_priority));
LOG_CLI((BSL_META_U(unit,"------------------------------------------\r\n")));
}
void
_bcm_kt2_oam_hash_data_dump(int unit, _bcm_oam_hash_data_t *h_data_p)
{
LOG_CLI((BSL_META_U(unit,"------------------------------------------\r\n")));
LOG_CLI((BSL_META_U(unit,"EP ID = %d\r\n"),h_data_p->ep_id));
LOG_CLI((BSL_META_U(unit,"Type = %d\r\n"),h_data_p->type));
LOG_CLI((BSL_META_U(unit,"In use = %d\r\n"),h_data_p->in_use));
LOG_CLI((BSL_META_U(unit,"Remote = %d\r\n"),h_data_p->is_remote));
LOG_CLI((BSL_META_U(unit,"Name = %d\r\n"),h_data_p->name));
LOG_CLI((BSL_META_U(unit,"Level = %d\r\n"),h_data_p->level));
LOG_CLI((BSL_META_U(unit,"Outer Vlan = %d\r\n"),h_data_p->vlan));
LOG_CLI((BSL_META_U(unit,"Inner Vlan = %d\r\n"),h_data_p->inner_vlan));
LOG_CLI((BSL_META_U(unit,"Gport = %d\r\n"),h_data_p->gport));
LOG_CLI((BSL_META_U(unit,"SGLP = %d\r\n"),h_data_p->sglp));
LOG_CLI((BSL_META_U(unit,"DGLP = %d\r\n"),h_data_p->dglp));
LOG_CLI((BSL_META_U(unit,"Tx ena = %d\r\n"),h_data_p->local_tx_enabled));
LOG_CLI((BSL_META_U(unit,"Rx ena = %d\r\n"),h_data_p->local_rx_enabled));
LOG_CLI((BSL_META_U(unit,"Grp idx = %d\r\n"),h_data_p->group_index));
LOG_CLI((BSL_META_U(unit,"Remote idx = %d\r\n"),h_data_p->remote_index));
LOG_CLI((BSL_META_U(unit,"LM ctr idx = %d\r\n"),h_data_p->lm_counter_index));
LOG_CLI((BSL_META_U(unit,"Pri map idx = %d\r\n"),h_data_p->pri_map_index));
LOG_CLI((BSL_META_U(unit,"Profile Idx = %d\r\n"),h_data_p->profile_index));
LOG_CLI((BSL_META_U(unit,"Local Tx index = %d\r\n"),h_data_p->local_tx_index));
LOG_CLI((BSL_META_U(unit,"Local Rx index = %d\r\n"),h_data_p->local_rx_index));
LOG_CLI((BSL_META_U(unit,"VP = %d\r\n"),h_data_p->vp));
LOG_CLI((BSL_META_U(unit,"Flags = 0x%x\r\n"),h_data_p->flags));
LOG_CLI((BSL_META_U(unit,"Flags2 = 0x%x\r\n"),h_data_p->flags2));
LOG_CLI((BSL_META_U(unit,"Vfp_entry = 0x%x\r\n"),h_data_p->vfp_entry));
LOG_CLI((BSL_META_U(unit,"Fp_entry_tx = 0x%x\r\n"),h_data_p->fp_entry_tx));
LOG_CLI((BSL_META_U(unit,"Fp_entry_rx = 0x%x\r\n"),h_data_p->fp_entry_rx));
LOG_CLI((BSL_META_U(unit,"Period = %d\r\n"),h_data_p->period));
LOG_CLI((BSL_META_U(unit,"Opcode flags = 0x%x\r\n"),h_data_p->opcode_flags));
LOG_CLI((BSL_META_U(unit,"TS format = %d\r\n"),h_data_p->ts_format));
LOG_CLI((BSL_META_U(unit,"Label = 0x%x\r\n"),h_data_p->label));
LOG_CLI((BSL_META_U(unit,"Vlan Pri = %d\r\n"),h_data_p->vlan_pri));
LOG_CLI((BSL_META_U(unit,"Egress if = 0x%x\r\n"),h_data_p->egress_if));
LOG_CLI((BSL_META_U(unit,"Int pri = %d\r\n"),h_data_p->int_pri));
LOG_CLI((BSL_META_U(unit,"Trunk index = %d\r\n"),h_data_p->trunk_index));
LOG_CLI((BSL_META_U(unit,"Domain = %d\r\n"),h_data_p->oam_domain));
LOG_CLI((BSL_META_U(unit,"Egr pri map idx = %d\r\n"),h_data_p->egr_pri_map_index));
LOG_CLI((BSL_META_U(unit,"Rx ctr = %d\r\n"),h_data_p->rx_ctr));
LOG_CLI((BSL_META_U(unit,"Tx ctr = %d\r\n"),h_data_p->tx_ctr));
LOG_CLI((BSL_META_U(unit,"src pp port = %d\r\n"),h_data_p->src_pp_port));
LOG_CLI((BSL_META_U(unit,"dst pp port = %d\r\n"),h_data_p->dst_pp_port));
LOG_CLI((BSL_META_U(unit,"DGLP1 = %d\r\n"),h_data_p->dglp1));
LOG_CLI((BSL_META_U(unit,"DGLP2 = %d\r\n"),h_data_p->dglp2));
LOG_CLI((BSL_META_U(unit,"DGLP1 Profile idx = %d\r\n"),h_data_p->dglp1_profile_index));
LOG_CLI((BSL_META_U(unit,"DGLP2 Profile idx = %d\r\n"),h_data_p->dglp2_profile_index));
LOG_CLI((BSL_META_U(unit,"Outer TPID = 0x%04X\r\n"),h_data_p->outer_tpid));
LOG_CLI((BSL_META_U(unit,"Inner TPID = 0x%04X\r\n"),h_data_p->inner_tpid));
LOG_CLI((BSL_META_U(unit,"SubPort TPID = %d\r\n"),h_data_p->subport_tpid));
LOG_CLI((BSL_META_U(unit,"Outer TPID profile idx = %d\r\n"),h_data_p->outer_tpid_profile_index));
LOG_CLI((BSL_META_U(unit,"Inner TPID profile idx = %d\r\n"),h_data_p->inner_tpid_profile_index));
LOG_CLI((BSL_META_U(unit,"SubPort TPID profile idx = %d\r\n"),h_data_p->subport_tpid_profile_index));
LOG_CLI((BSL_META_U(unit,"MA Base idx = %d\r\n"),h_data_p->ma_base_index));
LOG_CLI((BSL_META_U(unit,"Trunk ID = %d\r\n"),h_data_p->trunk_id));
LOG_CLI((BSL_META_U(unit,"Active MDL BITMAP = 0x%x\r\n"),h_data_p->active_mdl_bitmap));
LOG_CLI((BSL_META_U(unit,"Internal flags = 0x%x\r\n"),h_data_p->int_flags));
#if defined(INCLUDE_BHH)
LOG_CLI((BSL_META_U(unit,"Cpu qid = %d\r\n"),h_data_p->cpu_qid));
LOG_CLI((BSL_META_U(unit,"BHH Endpoint Index = %d\r\n"),h_data_p->bhh_endpoint_index));
LOG_CLI((BSL_META_U(unit,"BHH DM ENTRY= %d\r\n"),h_data_p->bhh_dm_entry_rx));
LOG_CLI((BSL_META_U(unit,"BHH LM ENTRY= %d\r\n"),h_data_p->bhh_lm_entry_rx));
LOG_CLI((BSL_META_U(unit,"BHH ENTRY PKT RX= %d\r\n"),h_data_p->bhh_entry_pkt_rx));
LOG_CLI((BSL_META_U(unit,"BHH ENTRY PKT TX= %d\r\n"),h_data_p->bhh_entry_pkt_tx));
LOG_CLI((BSL_META_U(unit,"Src Mac = %02X:%02X:%02X:%02X:%02X:%02X\r\n"),
h_data_p->src_mac_address[0], h_data_p->src_mac_address[1],
h_data_p->src_mac_address[2], h_data_p->src_mac_address[3],
h_data_p->src_mac_address[4], h_data_p->src_mac_address[5]));
LOG_CLI((BSL_META_U(unit,"Dst Mac = %02X:%02X:%02X:%02X:%02X:%02X\r\n"),
h_data_p->dst_mac_address[0], h_data_p->dst_mac_address[1],
h_data_p->dst_mac_address[2], h_data_p->dst_mac_address[3],
h_data_p->dst_mac_address[4], h_data_p->dst_mac_address[5]));
LOG_CLI((BSL_META_U(unit,"Inner vlan pri = %d\r\n"),h_data_p->inner_vlan_pri));
LOG_CLI((BSL_META_U(unit,"egr label= %d\r\n"),h_data_p->egr_label));
LOG_CLI((BSL_META_U(unit,"egr label exp= %d\r\n"),h_data_p->egr_label_exp));
LOG_CLI((BSL_META_U(unit,"egr label ttl= %d\r\n"),h_data_p->egr_label_ttl));
#endif
LOG_CLI((BSL_META_U(unit,"Vccv Type = %d\r\n"),h_data_p->vccv_type));
LOG_CLI((BSL_META_U(unit,"VPN = %d\r\n"),h_data_p->vpn));
LOG_CLI((BSL_META_U(unit,"PM profile attached = %d\r\n"),h_data_p->pm_profile_attached));
LOG_CLI((BSL_META_U(unit,"CTR Field id = %d\r\n"),h_data_p->ctr_field_id));
LOG_CLI((BSL_META_U(unit,"EGR CTR Field id = %d\r\n"),h_data_p->egr_ctr_field_id));
LOG_CLI((BSL_META_U(unit,"Resolved trunk gport based on trunk "
"index 0x%x\r\n"),
h_data_p->resolved_trunk_gport));
LOG_CLI((BSL_META_U(unit,"------------------------------------------\r\n")));
}
/*
* Function:
* _bcm_oam_sw_dump
* Purpose:
* Displays oam information maintained by software.
* Parameters:
* unit - Device unit number
* Returns:
* None
*/
void
_bcm_kt2_oam_sw_dump(int unit)
{
int rv;
bcm_oam_group_t grp_idx;
_bcm_oam_control_t *oc = NULL;
_bcm_oam_group_data_t *group_p = NULL;
_bcm_oam_ep_list_t *ep_list_p = NULL;
_bcm_oam_hash_data_t *ep_data_p = NULL;
LOG_CLI((BSL_META_U(unit,"OAM\n")));
/* Get OAM control structure. */
rv = _bcm_kt2_oam_control_get(unit, &oc);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Get oam control variable\n"),
unit));
return;
}
_BCM_OAM_LOCK(oc);
group_p = oc->group_info;
for (grp_idx = 0; grp_idx < oc->group_count; ++grp_idx) {
if (group_p[grp_idx].in_use == 1) {
ep_list_p = *(group_p[grp_idx].ep_list);
_bcm_kt2_oam_group_info_dump(unit, grp_idx, group_p);
while (ep_list_p != NULL) {
ep_data_p = ep_list_p->ep_data_p;
_bcm_kt2_oam_hash_data_dump(unit, ep_data_p);
ep_list_p = ep_list_p->next;
}
}
}
_BCM_OAM_UNLOCK(oc);
}
void _bcm_kt2_oam_ma_idx_to_ep_id_mapping_print(int unit)
{
_bcm_oam_control_t *oc = NULL; /* Pointer to control structure. */
int i = 0;
int rv = BCM_E_NONE;
rv = _bcm_kt2_oam_control_get(unit, &oc);
if ((BCM_FAILURE(rv)) || (oc == NULL)) {
return;
}
_BCM_OAM_LOCK(oc);
if (oc->_bcm_oam_ma_idx_to_ep_id_map != NULL) {
for (i = 0; i < (oc->ma_idx_count + oc->egr_ma_idx_count);i++) {
if (oc->_bcm_oam_ma_idx_to_ep_id_map[i] != _BCM_OAM_INVALID_INDEX) {
LOG_CLI((BSL_META_U(unit,"MA_IDX = %d EP_ID = %d\r\n"),
i, oc->_bcm_oam_ma_idx_to_ep_id_map[i]));
}
}
}
_BCM_OAM_UNLOCK(oc);
}
#endif
#if defined(BCM_WARM_BOOT_SUPPORT)
#ifdef _KATANA2_DEBUG
/*
* Function:
* _bcm_kt2_oam_tx_counter_recover
* Purpose:
* Recover Rx counter for MEP
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_tx_counter_recover(int unit, soc_mem_t mem,
uint32 *entry,
_bcm_oam_hash_data_t *h_data_p)
{
_bcm_oam_control_t *oc; /* Pointer to control structure. */
int rv = BCM_E_NONE;/* Operation return status. */
int up_mep = 0;
mep_ctr_info_t *ctr_info;
shr_aidxres_list_handle_t *ctr_pool;
int *map;
int ctr_type = 0;
int ctr_valid = 0;
int ctr_mdl = 0;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
if (mem == L3_ENTRY_IPV4_UNICASTm) {
up_mep = 1;
ctr_info = mep_ctr_info;
map = &h_data_p->pri_map_index;
} else if (mem == EGR_MP_GROUPm) {
ctr_info = egr_mep_ctr_info;
map = &h_data_p->egr_pri_map_index;
} else {
return BCM_E_INTERNAL;
}
ctr_pool = oc->ing_lm_ctr_pool;
ctr_valid = soc_mem_field32_get(unit, mem,
(uint32 *)entry, ctr_info[0].ctr_valid);
if (ctr_valid) {
ctr_type = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[0].ctr_mep_type);
ctr_mdl = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[0].ctr_mep_mdl);
if ((ctr_type == up_mep) && (ctr_mdl == h_data_p->level)) {
h_data_p->tx_ctr = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[0].ctr_base_ptr);
h_data_p->flags |= BCM_OAM_ENDPOINT_LOSS_MEASUREMENT;
*map = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[0].ctr_profile);
/* Allocate 8 consecutive couters from the pool */
rv = shr_aidxres_list_reserve_block(ctr_pool[0],
h_data_p->tx_ctr, 8);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block alloc failed "
"(EP=%d) - %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
}
}
ctr_valid = soc_mem_field32_get(unit, mem,
(uint32 *)entry, ctr_info[1].ctr_valid);
if (ctr_valid) {
ctr_type = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[1].ctr_mep_type);
ctr_mdl = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[1].ctr_mep_mdl);
if ((ctr_type == up_mep) && (ctr_mdl == h_data_p->level)) {
h_data_p->tx_ctr = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[1].ctr_base_ptr);
h_data_p->flags |= BCM_OAM_ENDPOINT_LOSS_MEASUREMENT;
*map = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[1].ctr_profile);
/* Allocate 8 consecutive couters from the pool */
rv = shr_aidxres_list_reserve_block(ctr_pool[1],
h_data_p->tx_ctr, 8);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block alloc failed "
"(EP=%d) - %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
}
}
return (rv);
}
#endif /* _KATANA2_DEBUG */
/*
* Function:
* _bcm_kt2_oam_rx_counter_recover
* Purpose:
* Recover Rx counter for MEP
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_rx_counter_recover(int unit, soc_mem_t mem,
uint32 *entry,
_bcm_oam_hash_data_t *h_data_p)
{
_bcm_oam_control_t *oc; /* Pointer to control structure. */
int rv = BCM_E_NONE;/* Operation return status. */
int up_mep = 0;
mep_ctr_info_t *ctr_info;
shr_aidxres_list_handle_t *ctr_pool;
int *map;
int ctr_valid = 0;
int ctr_mdl = 0;
int ctr_type = 0;
soc_profile_mem_t *pri_map_profile;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
if (mem == EGR_MP_GROUPm) {
up_mep = 1;
ctr_info = egr_mep_ctr_info;
map = &h_data_p->egr_pri_map_index;
pri_map_profile = &oc->egr_service_pri_map;
} else {
ctr_info = mep_ctr_info;
map = &h_data_p->pri_map_index;
pri_map_profile = &oc->ing_service_pri_map;
}
ctr_pool = oc->ing_lm_ctr_pool;
ctr_valid = soc_mem_field32_get(unit, mem,
(uint32 *)entry, ctr_info[0].ctr_valid);
if (ctr_valid) {
ctr_type = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[0].ctr_mep_type);
ctr_mdl = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[0].ctr_mep_mdl);
if ((ctr_type == up_mep) && (ctr_mdl == h_data_p->level)) {
h_data_p->rx_ctr = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[0].ctr_base_ptr);
h_data_p->flags |= BCM_OAM_ENDPOINT_LOSS_MEASUREMENT;
*map = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[0].ctr_profile);
rv = soc_profile_mem_reference(unit, pri_map_profile,
((*map) * BCM_OAM_INTPRI_MAX), BCM_OAM_INTPRI_MAX);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter profile ref count "
"increment failed (EP=%d) - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
/* Allocate 8 consecutive couters from the pool */
rv = shr_aidxres_list_reserve_block(ctr_pool[0],
h_data_p->rx_ctr, 8);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block alloc failed "
"(EP=%d) - %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
}
}
ctr_valid = soc_mem_field32_get(unit, mem,
(uint32 *)entry, ctr_info[1].ctr_valid);
if (ctr_valid) {
ctr_type = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[1].ctr_mep_type);
ctr_mdl = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[1].ctr_mep_mdl);
if ((ctr_type == up_mep) && (ctr_mdl == h_data_p->level)) {
h_data_p->rx_ctr = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[1].ctr_base_ptr);
h_data_p->flags |= BCM_OAM_ENDPOINT_LOSS_MEASUREMENT;
*map = soc_mem_field32_get(unit, mem, (uint32 *)entry,
ctr_info[1].ctr_profile);
rv = soc_profile_mem_reference(unit, pri_map_profile,
((*map) * BCM_OAM_INTPRI_MAX), BCM_OAM_INTPRI_MAX);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter profile ref count "
"increment failed (EP=%d) - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
/* Allocate 8 consecutive couters from the pool */
rv = shr_aidxres_list_reserve_block(ctr_pool[1],
h_data_p->rx_ctr, 8);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block alloc failed "
"(EP=%d) - %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
}
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_endpoint_alloc
* Purpose:
* Allocate an endpoint memory element.
* Parameters:
* ep_pp - (IN/OUT) Pointer to endpoint address pointer.
* Returns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_endpoint_alloc(bcm_oam_endpoint_info_t **ep_pp)
{
bcm_oam_endpoint_info_t *ep_p = NULL;
_BCM_OAM_ALLOC(ep_p, bcm_oam_endpoint_info_t,
sizeof(bcm_oam_endpoint_info_t), "Endpoint info");
if (NULL == ep_p) {
return (BCM_E_MEMORY);
}
*ep_pp = ep_p;
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_sync
* Purpose:
* Store OAM configuration to level two storage cache memory.
* Parameters:
* unit - (IN) Device unit number
* Returns:
* BCM_E_XXX
*/
int
_bcm_kt2_oam_sync(int unit)
{
int rv; /* Operation return status. */
_bcm_oam_control_t *oc;
int alloc_size = 0;
int stable_size;
soc_scache_handle_t scache_handle;
uint8 *oam_scache;
int grp_idx;
_bcm_oam_group_data_t *group_p; /* Pointer to group list. */
int group_count = 0;
_bcm_oam_ep_list_t *ep_list_p = NULL;
_bcm_oam_hash_data_t ep_data;
int ma_offset;
int idx = 0;
_bcm_oam_hash_data_t *h_data_p = NULL; /* Endpoint hash data pointer. */
/* Get OAM module storage size. */
SOC_IF_ERROR_RETURN(soc_stable_size_get(unit, &stable_size));
/* If level 2 store is not configured return from here. */
if (SOC_WARM_BOOT_SCACHE_IS_LIMITED(unit) || (stable_size == 0)) {
return BCM_E_NONE;
}
/* Get handle to control structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
/* Initialize to group array pointer. */
group_p = oc->group_info;
for (grp_idx = 0; grp_idx < oc->group_count; grp_idx++) {
/* Check if the group is in use. */
if (BCM_E_EXISTS
== shr_idxres_list_elem_state(oc->group_pool, grp_idx)) {
group_count++;
}
}
alloc_size += BCM_OAM_GROUP_NAME_LENGTH * (oc->group_count);
/* To store OAM group count. */
alloc_size += sizeof(int);
/* To store FP GID. */
alloc_size += 3 * sizeof(bcm_field_group_t);
SOC_SCACHE_HANDLE_SET(scache_handle, unit, BCM_MODULE_OAM, 0);
/* To store EP ids */
alloc_size += (_BCM_KT2_OAM_ENDPOINT_ID_SCACHE_SIZE);
/* Allocate memory to store oc->eth_oam_mp_group_vlan_key */
alloc_size += sizeof(uint8);
/* Check if memory has already been allocated */
rv = _bcm_esw_scache_ptr_get(unit,
scache_handle,
0,
alloc_size,
&oam_scache,
BCM_WB_DEFAULT_VERSION,
NULL
);
if (!SOC_WARM_BOOT(unit) && (BCM_E_NOT_FOUND == rv)) {
rv = _bcm_esw_scache_ptr_get(unit,
scache_handle,
1,
alloc_size,
&oam_scache,
BCM_WB_DEFAULT_VERSION,
NULL
);
if (BCM_FAILURE(rv)
|| (NULL == oam_scache)) {
goto cleanup;
}
} else if (BCM_FAILURE(rv)) {
goto cleanup;
}
/* Store the FP groups */
sal_memcpy(oam_scache, &oc->vfp_group, sizeof(bcm_field_group_t));
oam_scache += sizeof(bcm_field_group_t);
sal_memcpy(oam_scache, &oc->fp_vp_group, sizeof(bcm_field_group_t));
oam_scache += sizeof(bcm_field_group_t);
sal_memcpy(oam_scache, &oc->fp_glp_group, sizeof(bcm_field_group_t));
oam_scache += sizeof(bcm_field_group_t);
sal_memcpy(oam_scache, &group_count, sizeof(int));
oam_scache += sizeof(int);
for (grp_idx = 0; grp_idx < oc->group_count; ++grp_idx)
{
/* Check if the group is in use. */
if (BCM_E_EXISTS
== shr_idxres_list_elem_state(oc->group_pool, grp_idx)) {
sal_memcpy(oam_scache, group_p[grp_idx].name,
BCM_OAM_GROUP_NAME_LENGTH);
oam_scache += BCM_OAM_GROUP_NAME_LENGTH;
}
}
/* Store logical endpoint indexes
Following is scache allocation for different types of EPs
-----------------------------------------------------------------------
| Rx DownMEP indexes | Rx UpMEP indexes | RMEP indexes | Tx MEP indexes |
-----------------------------------------------------------------------
*/
sal_memset(oam_scache, 0, _BCM_KT2_OAM_ENDPOINT_ID_SCACHE_SIZE);
for (grp_idx = 0; grp_idx < oc->group_count; ++grp_idx)
{
if (group_p[grp_idx].in_use == 1) {
ep_list_p = *(group_p[grp_idx].ep_list);
while (ep_list_p != NULL) {
sal_memcpy(&ep_data, ep_list_p->ep_data_p, sizeof(_bcm_oam_hash_data_t));
if (ep_data.type == bcmOAMEndpointTypeEthernet) {
if (ep_data.is_remote) {
sal_memcpy(oam_scache +
(_BCM_KT2_OAM_RMEP_SCACHE_LOCATION(ep_data.remote_index)),
&(ep_data.ep_id), sizeof (bcm_oam_endpoint_t));
} else {
if (ep_data.local_tx_enabled) {
sal_memcpy(oam_scache +
(_BCM_KT2_OAM_TX_MEP_SCACHE_LOCATION(ep_data.local_tx_index)),
&(ep_data.ep_id), sizeof (bcm_oam_endpoint_t));
}
if (ep_data.local_rx_enabled) {
if (ep_data.flags & BCM_OAM_ENDPOINT_UP_FACING) {
sal_memcpy(oam_scache +
(_BCM_KT2_OAM_RX_UPMEP_SCACHE_LOCATION(ep_data.local_rx_index)),
&(ep_data.ep_id), sizeof (bcm_oam_endpoint_t));
} else {
_bcm_kt2_oam_ma_index_offset_get(unit, &ep_data, &ma_offset);
ep_data.local_rx_index = ep_data.ma_base_index + ma_offset;
sal_memcpy(oam_scache +
(_BCM_KT2_OAM_RX_DOWNMEP_SCACHE_LOCATION(ep_data.local_rx_index)),
&(ep_data.ep_id), sizeof (bcm_oam_endpoint_t));
}
}
}
}
ep_list_p = ep_list_p->next;
}
}
}
oam_scache += _BCM_KT2_OAM_ENDPOINT_ID_SCACHE_SIZE;
sal_memcpy(oam_scache, &(oc->eth_oam_mp_group_vlan_key), sizeof(uint8));
oam_scache += sizeof(uint8);
for (idx = 0; idx < oc->ep_count; idx++) {
h_data_p = &oc->oam_hash_data[idx];
if (ETH_TYPE(h_data_p->type) && (h_data_p->in_use)) {
sal_memcpy(oam_scache, &(h_data_p->flags), sizeof(uint32));
}
oam_scache += sizeof(uint32);
}
cleanup:
_BCM_OAM_UNLOCK(oc);
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_wb_group_recover
* Purpose:
* Recover OAM group configuration.
* Parameters:
* unit - (IN) BCM device number
* stable_size - (IN) OAM module Level2 storage size.
* oam_scache - (IN) Pointer to scache address pointer.
* Returns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_wb_group_recover(int unit, int stable_size, uint8 **oam_scache)
{
int index; /* Hw table index. */
_bcm_oam_group_data_t *group_p; /* Group info pointer. */
maid_reduction_entry_t maid_entry; /* Group entry info. */
ma_state_entry_t ma_state_ent; /* Group state info. */
int maid_reduction_valid = 0; /* Group valid. */
int ma_state_valid = 0; /* Group state valid. */
_bcm_oam_control_t *oc; /* Pointer to Control */
/* structure. */
int rv; /* Operation return */
/* status. */
/* Control lock taken by calling routine. */
/* Get OAM control structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
for (index = 0; index < oc->group_count; index++) {
rv = READ_MAID_REDUCTIONm(unit, MEM_BLOCK_ANY, index,
&maid_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: (GID=%d) MAID_REDUCTION table read"
" failed - %s.\n"), index, bcm_errmsg(rv)));
goto cleanup;
}
rv = READ_MA_STATEm(unit, MEM_BLOCK_ANY, index, &ma_state_ent);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: (GID=%d) MA_STATE table read"
" failed - %s.\n"), index, bcm_errmsg(rv)));
goto cleanup;
}
maid_reduction_valid
= soc_MAID_REDUCTIONm_field32_get(unit, &maid_entry, VALIDf);
ma_state_valid
= soc_MA_STATEm_field32_get(unit, &ma_state_ent, VALIDf);
if (maid_reduction_valid || ma_state_valid) {
/* Entry must be valid in both the tables, else return error. */
if (!maid_reduction_valid || !ma_state_valid) {
rv = BCM_E_INTERNAL;
goto cleanup;
}
/* Get the group memory pointer. */
group_p = &oc->group_info[index];
if (1 == group_p->in_use) {
rv = BCM_E_INTERNAL;
goto cleanup;
}
if (SOC_WARM_BOOT_SCACHE_IS_LIMITED(unit) || (stable_size == 0)) {
/* Set group name as zeros. */
sal_memset(group_p->name, 0, BCM_OAM_GROUP_NAME_LENGTH);
} else {
/* Get the group name from stored info. */
sal_memcpy(group_p->name, *oam_scache,
BCM_OAM_GROUP_NAME_LENGTH);
*oam_scache = (*oam_scache + BCM_OAM_GROUP_NAME_LENGTH);
}
/* Reserve the group index. */
rv = shr_idxres_list_reserve(oc->group_pool, index, index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: (GID=%d) Index reserve "
" failed - %s.\n"), index, bcm_errmsg(rv)));
rv = (rv == BCM_E_RESOURCE) ? (BCM_E_EXISTS) : rv;
goto cleanup;
}
/* Create the linked list to maintain group endpoint information. */
_BCM_OAM_ALLOC((group_p->ep_list), _bcm_oam_ep_list_t *,
sizeof(_bcm_oam_ep_list_t *), "EP list head");
if (NULL == group_p->ep_list) {
rv = BCM_E_MEMORY;
goto cleanup;
}
/* Initialize head node.*/
*group_p->ep_list = NULL;
}
}
return (BCM_E_NONE);
cleanup:
if (BCM_E_EXISTS
== shr_idxres_list_elem_state(oc->group_pool, index)) {
shr_idxres_list_free(oc->group_pool, index);
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_rmep_recover
* Purpose:
* Recover OAM remote endpoint configuration.
* Parameters:
* unit - (IN) BCM device number
* index - (IN) Remote MEP hardware index.
* l3_entry - (IN) RMEP view table entry pointer.
* Returns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_rmep_recover(int unit,
int index,
l3_entry_1_entry_t *l3_entry,
uint8 **scache,
uint16 recovered_ver)
{
rmep_entry_t rmep_entry; /* Remote table entry. */
_bcm_oam_hash_data_t *h_data_p; /* Endpoint hash data pointer. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
int rv; /* Operation return status. */
bcm_oam_endpoint_info_t *ep_info = NULL; /* Endpoint information. */
_bcm_oam_hash_key_t hash_key; /* Hash key buffer for lookup. */
int ep_id; /* Endpoint ID. */
int rmep_index; /* Index of RMEP table */
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_rmep_recover.\n")));
/*
* Get OAM control structure.
* Note: Lock taken by calling routine.
*/
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
rmep_index = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, l3_entry, RMEP__RMEP_PTRf);
if (recovered_ver >= BCM_WB_VERSION_1_3) {
/* Recover EP index from scache */
sal_memcpy(&ep_id, *scache + (_BCM_KT2_OAM_RMEP_SCACHE_LOCATION(rmep_index)),
sizeof(bcm_oam_endpoint_t));
rv = shr_idxres_list_reserve(oc->mep_pool, ep_id, ep_id);
if (BCM_FAILURE(rv)) {
rv = (rv == BCM_E_RESOURCE) ? (BCM_E_EXISTS) : rv;
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: (EP_ID=%d) Index reserve "
" failed - %s.\n"), ep_id, bcm_errmsg(rv)));
return (rv);
}
} else {
/* Allocate the next available endpoint index. */
rv = shr_idxres_list_alloc(oc->mep_pool,
(shr_idxres_element_t *)&ep_id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint alloc (EP=%d) - %s.\n"),
ep_id, bcm_errmsg(rv)));
return (rv);
}
}
h_data_p = &oc->oam_hash_data[ep_id];
if (1 == h_data_p->in_use) {
return(BCM_E_INTERNAL);
}
/*
* Clear the hash data element contents before
* storing values.
*/
_BCM_OAM_HASH_DATA_CLEAR(h_data_p);
h_data_p->tx_ctr = _BCM_OAM_INVALID_INDEX;
h_data_p->rx_ctr = _BCM_OAM_INVALID_INDEX;
h_data_p->pri_map_index = _BCM_OAM_INVALID_INDEX;
h_data_p->profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->dglp1_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->dglp2_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->egr_pri_map_index = _BCM_OAM_INVALID_INDEX;
h_data_p->outer_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->subport_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->inner_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->trunk_id = BCM_TRUNK_INVALID;
h_data_p->gport = 0;
/* Get RMEP table index from LMEP view entry. */
h_data_p->remote_index
= soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, l3_entry, RMEP__RMEP_PTRf);
/* Get RMEP table entry contents. */
rv = READ_RMEPm(unit, MEM_BLOCK_ANY, h_data_p->remote_index, &rmep_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: RMEP (index=%d) read failed - %s.\n"),
h_data_p->remote_index, bcm_errmsg(rv)));
goto cleanup;
}
/* Has to be a valid RMEP, return error otherwise. */
if (!soc_RMEPm_field32_get(unit, &rmep_entry, VALIDf)) {
rv = BCM_E_INTERNAL;
goto cleanup;
}
h_data_p->ep_id = ep_id;
h_data_p->is_remote = 1;
h_data_p->flags |= BCM_OAM_ENDPOINT_REMOTE;
h_data_p->group_index
= soc_RMEPm_field32_get(unit, &rmep_entry, MAID_INDEXf);
h_data_p->period
= _bcm_kt2_oam_ccm_hw_encode_to_msecs
((int) soc_RMEPm_field32_get(unit, &rmep_entry,
RMEP_RECEIVED_CCMf));
h_data_p->name
= soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, l3_entry, RMEP__MEPIDf);
h_data_p->local_tx_index = _BCM_OAM_INVALID_INDEX;
h_data_p->local_rx_index = _BCM_OAM_INVALID_INDEX;
h_data_p->rx_ctr = _BCM_OAM_INVALID_INDEX;
h_data_p->tx_ctr = _BCM_OAM_INVALID_INDEX;
rv = shr_idxres_list_reserve(oc->rmep_pool,
h_data_p->remote_index,
h_data_p->remote_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: (RMEP=%d) Index reserve failed - %s.\n"),
h_data_p->remote_index, bcm_errmsg(rv)));
rv = (rv == BCM_E_RESOURCE) ? (BCM_E_EXISTS) : rv;
goto cleanup;
}
h_data_p->in_use = 1;
rv = _bcm_kt2_oam_group_ep_list_add(unit, h_data_p->group_index, ep_id);
if (BCM_FAILURE(rv)) {
goto cleanup;
}
rv = _bcm_kt2_oam_endpoint_alloc(&ep_info);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_group_ep_list_remove(unit, h_data_p->group_index, ep_id);
goto cleanup;
}
bcm_oam_endpoint_info_t_init(ep_info);
/* Set up endpoint information for key construction. */
ep_info->group = h_data_p->group_index;
ep_info->name = h_data_p->name;
ep_info->gport = h_data_p->gport;
ep_info->id= h_data_p->ep_id;
/* Calculate hash key for hash table insert operation. */
_bcm_kt2_oam_ep_hash_key_construct(unit, oc, ep_info, &hash_key);
if ((recovered_ver < BCM_WB_VERSION_1_5)
|| (!ETH_TYPE(h_data_p->type))) {
rv = shr_htb_insert(oc->ma_mep_htbl, hash_key, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Hash table insert failed "
"EP=%d %s.\n"), h_data_p->ep_id, bcm_errmsg(rv)));
_bcm_kt2_oam_group_ep_list_remove(unit, h_data_p->group_index, ep_id);
goto cleanup;
} else {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: Hash Tbl (EP=%d) inserted"
" - %s.\n"), ep_id, bcm_errmsg(rv)));
}
}
/* Add the H/w index to logical index mapping for RMEP */
oc->remote_endpoints[h_data_p->remote_index] = ep_info->id;
sal_free(ep_info);
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_rmep_recover - done.\n")));
return (rv);
cleanup:
if (NULL != ep_info) {
sal_free(ep_info);
}
/* Release the endpoint ID if in use. */
if (BCM_E_EXISTS
== shr_idxres_list_elem_state(oc->mep_pool, ep_id)) {
shr_idxres_list_free(oc->mep_pool, ep_id);
}
/* Release the remote index if in use. */
if ((1 == h_data_p->in_use)
&& (BCM_E_EXISTS
== shr_idxres_list_elem_state(oc->rmep_pool,
h_data_p->remote_index))) {
shr_idxres_list_free(oc->rmep_pool, h_data_p->remote_index);
}
_BCM_OAM_HASH_DATA_CLEAR(h_data_p);
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_rmep_recover - error_done.\n")));
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_port_lmep_rx_config_recover
* Purpose:
* Recover OAM local endpoint Rx configuration for port based DownMEPs.
* Parameters:
* unit - (IN) BCM device number
* index - (IN) Port entry hardware index.
* port_entry - (IN) port table entry pointer.
* Returns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_port_lmep_rx_config_recover(int unit,
int index,
port_tab_entry_t *port_entry,
uint8 **scache,
uint16 recovered_ver)
{
_bcm_oam_hash_data_t *h_data_p = NULL; /* Endpoint hash data pointer. */
ma_index_entry_t ma_idx_entry; /* MA_INDEX table entry. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
uint8 mdl_bitmap; /* Endpoint domain level bitmap. */
uint8 mdl; /* Maintenance domain level. */
_bcm_oam_hash_key_t hash_key; /* Hash key buffer for lookup. */
int rv, tmp_rv; /* Operation return status. */
_bcm_gport_dest_t gport_dest; /* Gport specification. */
bcm_gport_t gport; /* Gport value. */
int ep_id; /* Endpoint ID. */
bcm_module_t my_modid;
int stm_index = 0;
source_trunk_map_table_entry_t stm_entry;
bcm_oam_endpoint_info_t *ep_info = NULL; /* Endpoint information. */
int ma_offset = 0;
int ma_idx_entry_count = 0;
shr_idxres_list_handle_t pool = NULL;
int ma_idx; /* MA_TABLE index */
int ma_base_idx = 0;
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_port_lmep_rx_config_recover .\n")));
/*
* Get OAM control structure.
* Note: Lock taken by calling routine.
*/
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* recover port based DownMEPs */
mdl_bitmap = soc_PORT_TABm_field32_get(unit, port_entry, MDL_BITMAPf);
for (mdl = 0; mdl < _BCM_OAM_EP_LEVEL_COUNT; mdl++) {
if (mdl_bitmap & (1 << mdl)) {
if (recovered_ver >= BCM_WB_VERSION_1_3) {
BCM_IF_ERROR_RETURN(bcm_esw_stk_my_modid_get(unit, &my_modid));
BCM_IF_ERROR_RETURN(_bcm_esw_src_mod_port_table_index_get(unit,
my_modid, index, &stm_index));
SOC_IF_ERROR_RETURN(READ_SOURCE_TRUNK_MAP_TABLEm(unit,
MEM_BLOCK_ANY, stm_index, &stm_entry));
ma_base_idx =
soc_SOURCE_TRUNK_MAP_TABLEm_field32_get(unit, &stm_entry,
MA_BASE_PTRf);
ma_idx = ma_base_idx + ma_offset;
sal_memcpy(&ep_id, *(scache) + (_BCM_KT2_OAM_RX_DOWNMEP_SCACHE_LOCATION(ma_idx)),
sizeof(bcm_oam_endpoint_t));
rv = shr_idxres_list_reserve(oc->mep_pool, ep_id, ep_id);
if (BCM_FAILURE(rv)) {
rv = (rv == BCM_E_RESOURCE) ? (BCM_E_EXISTS) : rv;
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: (EP_ID=%d) Index reserve "
" failed - %s.\n"), ep_id, bcm_errmsg(rv)));
return (rv);
}
} else {
/* Allocate the next available endpoint index. */
rv = shr_idxres_list_alloc(oc->mep_pool,
(shr_idxres_element_t *)&ep_id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint alloc (EP=%d) - %s.\n"),
ep_id, bcm_errmsg(rv)));
goto cleanup;
}
}
h_data_p = &oc->oam_hash_data[ep_id];
if (1 == h_data_p->in_use) {
rv = BCM_E_INTERNAL;
goto cleanup;
}
/*
* Clear the hash data element contents before
* storing values.
*/
_BCM_OAM_HASH_DATA_CLEAR(h_data_p);
_BCM_OAM_HASH_DATA_HW_IDX_INIT(h_data_p);
h_data_p->tx_ctr = _BCM_OAM_INVALID_INDEX;
h_data_p->rx_ctr = _BCM_OAM_INVALID_INDEX;
h_data_p->pri_map_index = _BCM_OAM_INVALID_INDEX;
h_data_p->profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->dglp1_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->dglp2_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->egr_pri_map_index = _BCM_OAM_INVALID_INDEX;
h_data_p->outer_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->subport_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->inner_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->ep_id = ep_id;
BCM_IF_ERROR_RETURN(bcm_esw_stk_my_modid_get(unit, &my_modid));
BCM_IF_ERROR_RETURN(_bcm_esw_src_mod_port_table_index_get(unit,
my_modid, index, &stm_index));
SOC_IF_ERROR_RETURN(READ_SOURCE_TRUNK_MAP_TABLEm(unit,
MEM_BLOCK_ANY, stm_index, &stm_entry));
h_data_p->ma_base_index =
soc_SOURCE_TRUNK_MAP_TABLEm_field32_get(unit, &stm_entry,
MA_BASE_PTRf);
h_data_p->local_rx_index = h_data_p->ma_base_index + ma_offset;
ma_offset++;
rv = READ_MA_INDEXm(unit, MEM_BLOCK_ANY,
h_data_p->local_rx_index, &ma_idx_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MA_INDEX(index:%d) "
"read failed -" " %s.\n"),
h_data_p->local_rx_index, bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->in_use = 1;
h_data_p->is_remote = 0;
h_data_p->local_rx_enabled = 1;
h_data_p->flags |= BCM_OAM_ENDPOINT_CCM_RX;
h_data_p->group_index
= soc_MA_INDEXm_field32_get(unit, &ma_idx_entry, MA_PTRf);
h_data_p->profile_index
= soc_MA_INDEXm_field32_get
(unit, &ma_idx_entry, OAM_OPCODE_CONTROL_PROFILE_PTRf);
rv = soc_profile_mem_reference(unit, &oc->oam_opcode_control_profile,
h_data_p->profile_index, 1);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Profile entry recover failed "
"(EP=%d) - %s.\n"), ep_id, bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->name = 0xffff;
h_data_p->level = mdl;
h_data_p->rx_ctr = _BCM_OAM_INVALID_INDEX;
h_data_p->tx_ctr = _BCM_OAM_INVALID_INDEX;
h_data_p->oam_domain = 0;
/* Generic logical port type, construct gport from GLP. */
_bcm_gport_dest_t_init(&gport_dest);
gport_dest.gport_type = _SHR_GPORT_TYPE_MODPORT;
gport_dest.modid = my_modid;
gport_dest.port = index;
h_data_p->sglp = index;
rv = _bcm_esw_gport_construct(unit, &gport_dest, &gport);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Gport construct failed -"
" %s.\n"), bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->gport = gport;
rv = _bcm_kt2_oam_group_ep_list_add(unit, h_data_p->group_index, ep_id);
if (BCM_FAILURE(rv)) {
goto cleanup;
}
rv = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_add(unit, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Remove mapping from ma_idx_to_ep_id list (EP=%d) -"
" %s.\n"), unit, ep_id, bcm_errmsg(rv)));
_bcm_kt2_oam_group_ep_list_remove(unit,
h_data_p->group_index,
ep_id);
goto cleanup;
}
rv = _bcm_kt2_oam_endpoint_alloc(&ep_info);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_group_ep_list_remove(unit,
h_data_p->group_index,
ep_id);
goto cleanup;
}
bcm_oam_endpoint_info_t_init(ep_info);
/* Set up endpoint information for key construction. */
ep_info->group = h_data_p->group_index;
ep_info->name = h_data_p->name;
ep_info->gport = h_data_p->gport;
ep_info->level = h_data_p->level;
ep_info->vlan = 0;
ep_info->inner_vlan = 0;
if ((recovered_ver < BCM_WB_VERSION_1_5)
|| (!ETH_TYPE(h_data_p->type))) {
/*
* Calculate hash key for hash table insert
* operation.
*/
_bcm_kt2_oam_ep_hash_key_construct(unit, oc, ep_info, &hash_key);
rv = shr_htb_insert(oc->ma_mep_htbl, hash_key, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Hash table insert"
" (EP=%d) failed - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
_bcm_kt2_oam_group_ep_list_remove(unit,
h_data_p->group_index,
ep_id);
goto cleanup;
} else {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: Hash Tbl (EP=%d)"
" inserted - %s.\n"), ep_id,
bcm_errmsg(rv)));
}
}
sal_free(ep_info);
ep_info = NULL;
}
} /* end of for loop */
if (mdl_bitmap) {
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_ma_idx_num_entries_and_pool_get(unit, h_data_p,
&ma_idx_entry_count,&pool));
rv = shr_idxres_list_reserve(pool,
h_data_p->ma_base_index,
h_data_p->ma_base_index + ma_idx_entry_count -1);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: (LMEP=%d) MA Index reserve "
"failed" " - %s.\n"), h_data_p->ma_base_index,
bcm_errmsg(rv)));
rv = (rv == BCM_E_RESOURCE) ? (BCM_E_EXISTS) : rv;
goto cleanup;
}
}
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_lmep_rx_config_recover"
" - done.\n")));
return (BCM_E_NONE);
cleanup:
/* Lets not check return value of delete during cleanup */
tmp_rv = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_delete(unit, h_data_p);
if (BCM_FAILURE(tmp_rv)) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Remove from ma_idx to ep id map (EP=%d) "
"during cleanup - %s.\n"), unit, h_data_p->ep_id, bcm_errmsg(rv)));
}
if (NULL != ep_info) {
sal_free(ep_info);
}
if (BCM_E_EXISTS
== shr_idxres_list_elem_state(oc->mep_pool, ep_id)) {
shr_idxres_list_free(oc->mep_pool, ep_id);
}
if (NULL != h_data_p
&& (BCM_E_EXISTS
== shr_idxres_list_elem_state(pool,
h_data_p->ma_base_index))) {
shr_idxres_list_free(pool, h_data_p->ma_base_index);
}
if (NULL != h_data_p) {
_BCM_OAM_HASH_DATA_CLEAR(h_data_p);
}
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_lmep_rx_config_recover"
" - error_done.\n")));
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_lmep_upmep_rx_config_recover
* Purpose:
* Recover OAM local endpoint Rx configuration for UpMEPS.
* Parameters:
* unit - (IN) BCM device number
* index - (IN) Remote MEP hardware index.
* egr_mp_grp_entry - (IN) Egress MP group table entry pointer.
* Returns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_lmep_upmep_rx_config_recover(int unit,
int index,
egr_mp_group_entry_t *egr_mp_grp_entry,
uint8 **scache,
uint16 recovered_ver)
{
_bcm_oam_hash_data_t *h_data_p = NULL; /* Endpoint hash data pointer. */
ma_index_entry_t ma_idx_entry; /* MA_INDEX table entry. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
uint8 mdl_bitmap; /* Endpoint domain level bitmap. */
uint8 mdl; /* Maintenance domain level. */
int rv, tmp_rv; /* Operation return status. */
_bcm_gport_dest_t gport_dest; /* Gport specification. */
bcm_gport_t gport; /* Gport value. */
uint16 glp; /* Generic logical port value. */
int ep_id; /* Endpoint ID. */
_bcm_oam_hash_key_t hash_key; /* Hash key buffer for lookup. */
bcm_oam_endpoint_info_t *ep_info = NULL; /* Endpoint information. */
int lmep_type = 0;
int timestamp_type = 0;
int ma_offset = 0;
int ma_base_idx = 0;
int ma_idx = 0;
l3_entry_1_entry_t l3_entry;
int l3_index = -1;
uint8 mdl_mip = 0;
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_lmep_upmep_rx_config_recover .\n")));
/*
* Get OAM control structure.
* Note: Lock taken by calling routine.
*/
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* recover UpMEPs */
mdl_bitmap
= soc_EGR_MP_GROUPm_field32_get(unit, egr_mp_grp_entry, MDL_BITMAP_ACTIVEf);
for (mdl = 0; mdl < _BCM_OAM_EP_LEVEL_COUNT; mdl++) {
if (mdl_bitmap & (1 << mdl)) {
if (recovered_ver >= BCM_WB_VERSION_1_3) {
ma_base_idx = soc_EGR_MP_GROUPm_field32_get(unit, egr_mp_grp_entry,
MA_BASE_PTRf);
ma_idx = ma_base_idx + ma_offset;
sal_memcpy(&ep_id, *(scache) + (_BCM_KT2_OAM_RX_UPMEP_SCACHE_LOCATION(ma_idx)),
sizeof(bcm_oam_endpoint_t));
rv = shr_idxres_list_reserve(oc->mep_pool, ep_id, ep_id);
if (BCM_FAILURE(rv)) {
rv = (rv == BCM_E_RESOURCE) ? (BCM_E_EXISTS) : rv;
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: (EP_ID=%d) Index reserve "
" failed - %s.\n"), ep_id, bcm_errmsg(rv)));
return (rv);
}
} else {
/* Allocate the next available endpoint index. */
rv = shr_idxres_list_alloc(oc->mep_pool,
(shr_idxres_element_t *)&ep_id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint alloc (EP=%d) - %s.\n"),
ep_id, bcm_errmsg(rv)));
goto cleanup;
}
}
h_data_p = &oc->oam_hash_data[ep_id];
if (1 == h_data_p->in_use) {
rv = BCM_E_INTERNAL;
goto cleanup;
}
/*
* Clear the hash data element contents before
* storing values.
*/
_BCM_OAM_HASH_DATA_CLEAR(h_data_p);
_BCM_OAM_HASH_DATA_HW_IDX_INIT(h_data_p);
h_data_p->tx_ctr = _BCM_OAM_INVALID_INDEX;
h_data_p->rx_ctr = _BCM_OAM_INVALID_INDEX;
h_data_p->pri_map_index = _BCM_OAM_INVALID_INDEX;
h_data_p->profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->dglp1_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->dglp2_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->egr_pri_map_index = _BCM_OAM_INVALID_INDEX;
h_data_p->outer_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->subport_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->inner_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->trunk_id = BCM_TRUNK_INVALID;
h_data_p->ep_id = ep_id;
h_data_p->ma_base_index =
(soc_EGR_MP_GROUPm_field32_get(unit, egr_mp_grp_entry,
MA_BASE_PTRf));
h_data_p->local_rx_index = h_data_p->ma_base_index + ma_offset;
ma_offset++;
rv = READ_EGR_MA_INDEXm(unit, MEM_BLOCK_ANY,
h_data_p->local_rx_index, &ma_idx_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EGR_MA_INDEX(index:%d) "
"read failed -" " %s.\n"),
h_data_p->local_rx_index, bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->flags |= BCM_OAM_ENDPOINT_UP_FACING;
h_data_p->in_use = 1;
h_data_p->is_remote = 0;
h_data_p->local_rx_enabled = 1;
h_data_p->flags |= BCM_OAM_ENDPOINT_CCM_RX;
h_data_p->group_index
= soc_EGR_MA_INDEXm_field32_get(unit, &ma_idx_entry, MA_PTRf);
h_data_p->profile_index
= soc_EGR_MA_INDEXm_field32_get
(unit, &ma_idx_entry, OAM_OPCODE_CONTROL_PROFILE_PTRf);
rv = soc_profile_mem_reference(unit, &oc->egr_oam_opcode_control_profile,
h_data_p->profile_index, 1);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Profile entry recover failed "
"(EP=%d) - %s.\n"), ep_id, bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->dglp1_profile_index =
soc_MA_INDEXm_field32_get
(unit, &ma_idx_entry, DGLP1_PROFILE_PTRf);
rv = soc_profile_mem_reference(unit, &oc->egr_oam_dglp_profile,
h_data_p->dglp1_profile_index, 1);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: DGLP profile ref count "
"increment failed (EP=%d) - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->dglp2_profile_index =
soc_MA_INDEXm_field32_get
(unit, &ma_idx_entry, DGLP1_PROFILE_PTRf);
rv = soc_profile_mem_reference(unit, &oc->egr_oam_dglp_profile,
h_data_p->dglp2_profile_index, 1);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: DGLP profile ref count "
"increment failed (EP=%d) - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
goto cleanup;
}
#if defined(INCLUDE_BHH)
h_data_p->cpu_qid
= soc_EGR_MA_INDEXm_field32_get(unit, &ma_idx_entry, INT_PRIf);
#endif
h_data_p->name = 0xffff;
h_data_p->level = mdl;
h_data_p->rx_ctr = _BCM_OAM_INVALID_INDEX;
h_data_p->tx_ctr = _BCM_OAM_INVALID_INDEX;
/* recover counters */
rv = _bcm_kt2_oam_rx_counter_recover(unit, EGR_MP_GROUPm,
(uint32 *)egr_mp_grp_entry, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM RX counter recover failed "
"(EP=%d) - %s.\n"), ep_id, bcm_errmsg(rv)));
goto cleanup;
}
/* rx and tx counters are located at same index, but on diff mem */
if (h_data_p->rx_ctr != _BCM_OAM_INVALID_INDEX) {
h_data_p->tx_ctr = h_data_p->rx_ctr;
}
/* time stamp */
timestamp_type = soc_EGR_MP_GROUPm_field32_get(unit,
egr_mp_grp_entry, TIMESTAMP_TYPEf);
if (timestamp_type) {
h_data_p->ts_format = timestamp_type;
}
h_data_p->oam_domain = soc_EGR_MP_GROUPm_field32_get(unit,
egr_mp_grp_entry, KEY_TYPEf);
switch (h_data_p->oam_domain) {
case _BCM_OAM_DOMAIN_CVLAN:
h_data_p->flags |= BCM_OAM_ENDPOINT_MATCH_INNER_VLAN;
h_data_p->inner_vlan = soc_EGR_MP_GROUPm_field32_get(unit,
egr_mp_grp_entry, CVIDf);
break;
case _BCM_OAM_DOMAIN_SVLAN:
h_data_p->vlan = soc_EGR_MP_GROUPm_field32_get(unit,
egr_mp_grp_entry, SVIDf);
break;
case _BCM_OAM_DOMAIN_S_PLUS_CVLAN:
h_data_p->vlan = soc_EGR_MP_GROUPm_field32_get(unit,
egr_mp_grp_entry, SVIDf);
h_data_p->inner_vlan = soc_EGR_MP_GROUPm_field32_get(unit,
egr_mp_grp_entry, CVIDf);
h_data_p->flags |= BCM_OAM_ENDPOINT_MATCH_OUTER_AND_INNER_VLAN;
break;
case _BCM_OAM_DOMAIN_VP:
h_data_p->vp = soc_EGR_MP_GROUPm_field32_get(unit,
egr_mp_grp_entry, DVPf);
h_data_p->int_flags |= _BCM_OAM_ENDPOINT_IS_VP_BASED;
break;
default :
break;
}
glp = soc_EGR_MP_GROUPm_field32_get(unit, egr_mp_grp_entry, DGLPf);
lmep_type = soc_EGR_MP_GROUPm_field32_get(unit, egr_mp_grp_entry,
KEY_TYPEf);
if (lmep_type == _BCM_OAM_DOMAIN_VP) {
#if defined(INCLUDE_L3)
if (_bcm_vp_used_get(unit, glp, _bcmVpTypeMim)) {
BCM_GPORT_MIM_PORT_ID_SET(h_data_p->gport, h_data_p->vp);
} else if (_bcm_vp_used_get(unit, glp, _bcmVpTypeMpls)) {
BCM_GPORT_MPLS_PORT_ID_SET(h_data_p->gport, h_data_p->vp);
} else {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Invalid Virtual Port (SVP=%d)"
" - %s.\n"), h_data_p->vp,
bcm_errmsg(BCM_E_INTERNAL)));
return (BCM_E_INTERNAL);
}
#endif
} else {
/*
* Generic logical port type, construct gport from GLP.
*/
h_data_p->dglp = glp;
_bcm_gport_dest_t_init(&gport_dest);
if (_BCM_OAM_GLP_TRUNK_BIT_GET(glp)) {
gport_dest.tgid = _BCM_OAM_GLP_TRUNK_ID_GET(glp);
gport_dest.gport_type = _SHR_GPORT_TYPE_TRUNK;
h_data_p->trunk_id = _BCM_OAM_GLP_TRUNK_ID_GET(glp);
} else {
gport_dest.gport_type = _SHR_GPORT_TYPE_MODPORT;
gport_dest.modid = _BCM_OAM_GLP_MODULE_ID_GET(glp);
gport_dest.port = _BCM_OAM_GLP_PORT_GET(glp);
}
rv = _bcm_esw_gport_construct(unit, &gport_dest, &gport);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Gport construct failed -"
" %s.\n"), bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->gport = gport;
}
h_data_p->dglp = glp;
h_data_p->sglp = glp;
sal_memset(&l3_entry, 0, sizeof(l3_entry_1_entry_t));
L3_LOCK(unit);
if (BCM_SUCCESS
(_bcm_kt2_oam_find_lmep(unit, h_data_p,
&l3_index, &l3_entry))) {
mdl_mip = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, &l3_entry,
LMEP__MDL_BITMAP_PASSIVEf);
}
L3_UNLOCK(unit);
if (!(mdl_mip & (1 << h_data_p->level))) {
/* Set the MIP flag */
h_data_p->flags |= BCM_OAM_ENDPOINT_INTERMEDIATE;
}
rv = _bcm_kt2_oam_group_ep_list_add(unit, h_data_p->group_index, ep_id);
if (BCM_FAILURE(rv)) {
goto cleanup;
}
rv = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_add(unit, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Add mapping from ma_idx_to_ep_id list (EP=%d) -"
" %s.\n"), unit, ep_id, bcm_errmsg(rv)));
_bcm_kt2_oam_group_ep_list_remove(unit,
h_data_p->group_index,
ep_id);
goto cleanup;
}
rv = _bcm_kt2_oam_endpoint_alloc(&ep_info);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_group_ep_list_remove(unit,
h_data_p->group_index,
ep_id);
goto cleanup;
}
bcm_oam_endpoint_info_t_init(ep_info);
/* Set up endpoint information for key construction. */
ep_info->group = h_data_p->group_index;
ep_info->name = h_data_p->name;
ep_info->gport = h_data_p->gport;
ep_info->level = h_data_p->level;
ep_info->vlan = h_data_p->vlan;
ep_info->inner_vlan = h_data_p->inner_vlan;
ep_info->flags = h_data_p->flags;
if ((recovered_ver < BCM_WB_VERSION_1_5)
|| (!ETH_TYPE(h_data_p->type))) {
/*
* Calculate hash key for hash table insert
* operation.
*/
_bcm_kt2_oam_ep_hash_key_construct(unit, oc, ep_info, &hash_key);
rv = shr_htb_insert(oc->ma_mep_htbl, hash_key, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Hash table insert"
" (EP=%d) failed - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
_bcm_kt2_oam_group_ep_list_remove(unit,
h_data_p->group_index,
ep_id);
goto cleanup;
} else {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: Hash Tbl (EP=%d)"
" inserted - %s.\n"), ep_id,
bcm_errmsg(rv)));
}
}
sal_free(ep_info);
ep_info = NULL;
}
} /* end of for loop */
if (mdl_bitmap) {
rv = shr_idxres_list_reserve(oc->egr_ma_idx_pool,
h_data_p->ma_base_index,
h_data_p->ma_base_index + 7);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: (LMEP=%d) EGR MA Index reserve "
"failed" " - %s.\n"), h_data_p->remote_index,
bcm_errmsg(rv)));
rv = (rv == BCM_E_RESOURCE) ? (BCM_E_EXISTS) : rv;
goto cleanup;
}
}
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_lmep_rx_config_recover"
" - done.\n")));
return (BCM_E_NONE);
cleanup:
/* Lets not check return value of delete during cleanup */
tmp_rv = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_delete(unit, h_data_p);
if (BCM_FAILURE(tmp_rv)) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Remove from ma_idx to ep id map (EP=%d) "
"during cleanup - %s.\n"), unit, h_data_p->ep_id, bcm_errmsg(rv)));
}
if (NULL != ep_info) {
sal_free(ep_info);
}
if (BCM_E_EXISTS
== shr_idxres_list_elem_state(oc->mep_pool, ep_id)) {
shr_idxres_list_free(oc->mep_pool, ep_id);
}
/* return rx & tx counters allocated if any */
if (NULL != h_data_p) {
rv =_bcm_kt2_oam_free_counter(unit, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block "
"free failed (EP=%d) - %s.\n"),
ep_id, bcm_errmsg(rv)));
}
}
if (NULL != h_data_p
&& (BCM_E_EXISTS
== shr_idxres_list_elem_state(oc->egr_ma_idx_pool,
h_data_p->ma_base_index))) {
shr_idxres_list_free(oc->egr_ma_idx_pool, h_data_p->ma_base_index);
}
if (NULL != h_data_p) {
_BCM_OAM_HASH_DATA_CLEAR(h_data_p);
}
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_lmep_rx_config_recover"
" - error_done.\n")));
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_lmep_downmep_rx_config_recover
* Purpose:
* Recover OAM local endpoint Rx configuration for DownMEPS.
* Parameters:
* unit - (IN) BCM device number
* index - (IN) Remote MEP hardware index.
* l3_entry - (IN) LMEP view table entry pointer.
* Returns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_lmep_downmep_rx_config_recover(int unit,
int index,
l3_entry_1_entry_t *l3_entry,
uint8 **scache,
uint16 recovered_ver)
{
_bcm_oam_hash_data_t *h_data_p = NULL; /* Endpoint hash data pointer. */
ma_index_entry_t ma_idx_entry; /* IA_INDEX table entry. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
uint8 mdl_bitmap; /* Endpoint domain level bitmap. */
uint8 mdl; /* Maintenance domain level. */
int rv, tmp_rv; /* Operation return status. */
_bcm_gport_dest_t gport_dest; /* Gport specification. */
bcm_gport_t gport; /* Gport value. */
uint16 glp; /* Generic logical port value. */
int ep_id; /* Endpoint ID. */
_bcm_oam_hash_key_t hash_key; /* Hash key buffer for lookup. */
bcm_oam_endpoint_info_t *ep_info = NULL; /* Endpoint information. */
int lmep_type = 0;
int timestamp_type = 0;
int ma_offset = 0;
int ma_idx_entry_count = 0;
shr_idxres_list_handle_t pool = NULL;
int ma_base_idx = 0;
int ma_idx = 0;
egr_mp_group_entry_t egr_mp_group;
int mp_grp_index = 0;
uint8 mdl_mip = 0;
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: "
"_bcm_kt2_oam_lmep_downmep_rx_config_recover .\n")));
/*
* Get OAM control structure.
* Note: Lock taken by calling routine.
*/
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* recover DownMEPs */
mdl_bitmap = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit,
l3_entry, LMEP__MDL_BITMAP_ACTIVEf);
ma_base_idx = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, l3_entry, LMEP__MA_BASE_PTRf);
for (mdl = 0; mdl < _BCM_OAM_EP_LEVEL_COUNT; mdl++) {
if (mdl_bitmap & (1 << mdl)) {
if (recovered_ver >= BCM_WB_VERSION_1_3) {
ma_idx = ma_base_idx + ma_offset;
sal_memcpy(&ep_id, *(scache) + (_BCM_KT2_OAM_RX_DOWNMEP_SCACHE_LOCATION(ma_idx)),
sizeof(bcm_oam_endpoint_t));
rv = shr_idxres_list_reserve(oc->mep_pool, ep_id, ep_id);
if (BCM_FAILURE(rv)) {
rv = (rv == BCM_E_RESOURCE) ? (BCM_E_EXISTS) : rv;
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: (EP_ID=%d) Index reserve "
" failed - %s.\n"), ep_id, bcm_errmsg(rv)));
return (rv);
}
} else {
/* Allocate the next available endpoint index. */
rv = shr_idxres_list_alloc(oc->mep_pool,
(shr_idxres_element_t *)&ep_id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint alloc (EP=%d) - %s.\n"),
ep_id, bcm_errmsg(rv)));
goto cleanup;
}
}
h_data_p = &oc->oam_hash_data[ep_id];
if (1 == h_data_p->in_use) {
return(BCM_E_INTERNAL);
}
/*
* Clear the hash data element contents before
* storing values.
*/
_BCM_OAM_HASH_DATA_CLEAR(h_data_p);
_BCM_OAM_HASH_DATA_HW_IDX_INIT(h_data_p);
h_data_p->tx_ctr = _BCM_OAM_INVALID_INDEX;
h_data_p->rx_ctr = _BCM_OAM_INVALID_INDEX;
h_data_p->pri_map_index = _BCM_OAM_INVALID_INDEX;
h_data_p->egr_pri_map_index = _BCM_OAM_INVALID_INDEX;
h_data_p->outer_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->subport_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->inner_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->trunk_id = BCM_TRUNK_INVALID;
h_data_p->ep_id = ep_id;
h_data_p->ma_base_index =
soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, l3_entry,
LMEP__MA_BASE_PTRf);
h_data_p->local_rx_index = h_data_p->ma_base_index + ma_offset;
ma_offset++;
rv = READ_MA_INDEXm
(unit, MEM_BLOCK_ANY, h_data_p->local_rx_index,
&ma_idx_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MA_INDEX (index:%d) read failed -"
" %s.\n"), h_data_p->local_rx_index,
bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->in_use = 1;
h_data_p->is_remote = 0;
h_data_p->local_rx_enabled = 1;
h_data_p->flags |= BCM_OAM_ENDPOINT_CCM_RX;
h_data_p->group_index
= soc_MA_INDEXm_field32_get(unit, &ma_idx_entry, MA_PTRf);
h_data_p->int_pri = soc_MA_INDEXm_field32_get(unit, &ma_idx_entry, INT_PRIf);
h_data_p->profile_index
= soc_MA_INDEXm_field32_get
(unit, &ma_idx_entry, OAM_OPCODE_CONTROL_PROFILE_PTRf);
rv = soc_profile_mem_reference(unit, &oc->oam_opcode_control_profile,
h_data_p->profile_index, 1);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Profile entry recover failed "
"(EP=%d) - %s.\n"), ep_id, bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->dglp1_profile_index =
soc_MA_INDEXm_field32_get
(unit, &ma_idx_entry, DGLP1_PROFILE_PTRf);
rv = soc_profile_mem_reference(unit, &oc->ing_oam_dglp_profile,
h_data_p->dglp1_profile_index, 1);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: DGLP profile ref count "
"increment failed (EP=%d) - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->dglp2_profile_index =
soc_MA_INDEXm_field32_get
(unit, &ma_idx_entry, DGLP1_PROFILE_PTRf);
rv = soc_profile_mem_reference(unit, &oc->ing_oam_dglp_profile,
h_data_p->dglp2_profile_index, 1);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: DGLP profile ref count "
"increment failed (EP=%d) - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->name = 0xffff;
h_data_p->level = mdl;
h_data_p->oam_domain = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit,
l3_entry,
LMEP__OAM_LMEP_KEY_SUBTYPEf);
switch (h_data_p->oam_domain) {
case _BCM_OAM_DOMAIN_CVLAN:
h_data_p->flags |= BCM_OAM_ENDPOINT_MATCH_INNER_VLAN;
h_data_p->inner_vlan =
soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit,
l3_entry, LMEP__CVIDf);
break;
case _BCM_OAM_DOMAIN_SVLAN:
h_data_p->vlan = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit,
l3_entry, LMEP__SVIDf);
break;
case _BCM_OAM_DOMAIN_S_PLUS_CVLAN:
h_data_p->vlan = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit,
l3_entry, LMEP__SVIDf);
h_data_p->inner_vlan =
soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit,
l3_entry, LMEP__CVIDf);
h_data_p->flags |=
BCM_OAM_ENDPOINT_MATCH_OUTER_AND_INNER_VLAN;
break;
case _BCM_OAM_DOMAIN_VP:
h_data_p->vp = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit,
l3_entry, LMEP__SVPf);
h_data_p->int_flags |= _BCM_OAM_ENDPOINT_IS_VP_BASED;
break;
default :
break;
}
rv = _bcm_kt2_oam_rx_counter_recover(unit,
L3_ENTRY_IPV4_UNICASTm, (uint32 *)l3_entry, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter recover failed "
"(EP=%d) - %s.\n"), ep_id, bcm_errmsg(rv)));
goto cleanup;
}
/* rx and tx counters are located at same index, but on diff mem */
if (h_data_p->rx_ctr != _BCM_OAM_INVALID_INDEX) {
h_data_p->tx_ctr = h_data_p->rx_ctr;
}
/* time stamp */
timestamp_type = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit,
l3_entry, LMEP__TIMESTAMP_TYPEf);
if (timestamp_type) {
h_data_p->ts_format = timestamp_type;
}
glp = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, l3_entry,
LMEP__SGLPf);
lmep_type = soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, l3_entry,
LMEP__OAM_LMEP_KEY_SUBTYPEf);
if (lmep_type == _BCM_OAM_DOMAIN_VP) {
#if defined(INCLUDE_L3)
if (_bcm_vp_used_get(unit, glp, _bcmVpTypeMim)) {
BCM_GPORT_MIM_PORT_ID_SET(h_data_p->gport, h_data_p->vp);
} else if (_bcm_vp_used_get(unit, glp, _bcmVpTypeMpls)) {
BCM_GPORT_MPLS_PORT_ID_SET(h_data_p->gport, h_data_p->vp);
} else {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Invalid Virtual Port (SVP=%d)"
" - %s.\n"), h_data_p->vp,
bcm_errmsg(BCM_E_INTERNAL)));
return (BCM_E_INTERNAL);
}
#endif
} else {
/*
* Generic logical port type, construct gport from GLP.
*/
_bcm_gport_dest_t_init(&gport_dest);
if (_BCM_OAM_GLP_TRUNK_BIT_GET(glp)) {
gport_dest.tgid = _BCM_OAM_GLP_TRUNK_ID_GET(glp);
gport_dest.gport_type = _SHR_GPORT_TYPE_TRUNK;
h_data_p->trunk_id = _BCM_OAM_GLP_TRUNK_ID_GET(glp);
} else {
gport_dest.gport_type = _SHR_GPORT_TYPE_MODPORT;
gport_dest.modid = _BCM_OAM_GLP_MODULE_ID_GET(glp);
gport_dest.port = _BCM_OAM_GLP_PORT_GET(glp);
}
rv = _bcm_esw_gport_construct(unit, &gport_dest, &gport);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Gport construct failed -"
" %s.\n"), bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->gport = gport;
}
h_data_p->dglp = glp;
h_data_p->sglp = glp;
sal_memset(&egr_mp_group, 0, sizeof(egr_mp_group_entry_t));
if (BCM_SUCCESS
(_bcm_kt2_oam_find_egr_lmep(unit, h_data_p,
&mp_grp_index,
&egr_mp_group))) {
mdl_mip = soc_EGR_MP_GROUPm_field32_get(unit, &egr_mp_group,
MDL_BITMAP_PASSIVEf);
}
if (!(mdl_mip & (1 << h_data_p->level))) {
/* Set the MIP flag */
h_data_p->flags |= BCM_OAM_ENDPOINT_INTERMEDIATE;
}
rv = _bcm_kt2_oam_group_ep_list_add(unit, h_data_p->group_index, ep_id);
if (BCM_FAILURE(rv)) {
goto cleanup;
}
rv = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_add(unit, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Add mapping from ma_idx_to_ep_id list (EP=%d) -"
" %s.\n"), unit, ep_id, bcm_errmsg(rv)));
_bcm_kt2_oam_group_ep_list_remove(unit,
h_data_p->group_index,
ep_id);
goto cleanup;
}
rv = _bcm_kt2_oam_endpoint_alloc(&ep_info);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_group_ep_list_remove(unit,
h_data_p->group_index,
ep_id);
goto cleanup;
}
bcm_oam_endpoint_info_t_init(ep_info);
/* Set up endpoint information for key construction. */
ep_info->group = h_data_p->group_index;
ep_info->name = h_data_p->name;
ep_info->gport = h_data_p->gport;
ep_info->level = h_data_p->level;
ep_info->vlan = h_data_p->vlan;
ep_info->inner_vlan = h_data_p->inner_vlan;
if ((recovered_ver < BCM_WB_VERSION_1_5)
|| (!ETH_TYPE(h_data_p->type))) {
/*
* Calculate hash key for hash table insert
* operation.
*/
_bcm_kt2_oam_ep_hash_key_construct(unit, oc, ep_info, &hash_key);
rv = shr_htb_insert(oc->ma_mep_htbl, hash_key, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Hash table insert"
" (EP=%d) failed - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
_bcm_kt2_oam_group_ep_list_remove(unit,
h_data_p->group_index,
ep_id);
goto cleanup;
} else {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: Hash Tbl (EP=%d)"
" inserted - %s.\n"), ep_id,
bcm_errmsg(rv)));
}
}
sal_free(ep_info);
ep_info = NULL;
}
} /* end of for loop */
if (mdl_bitmap) {
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_ma_idx_num_entries_and_pool_get(unit,
h_data_p, &ma_idx_entry_count, &pool));
rv = shr_idxres_list_reserve(pool,
h_data_p->ma_base_index,
h_data_p->ma_base_index + ma_idx_entry_count -1);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: (RMEP=%d) Index reserve failed"
" - %s.\n"), h_data_p->remote_index,
bcm_errmsg(rv)));
rv = (rv == BCM_E_RESOURCE) ? (BCM_E_EXISTS) : rv;
goto cleanup;
}
}
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_lmep_rx_config_recover"
" - done.\n")));
return (BCM_E_NONE);
cleanup:
tmp_rv = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_delete(unit, h_data_p);
if (BCM_FAILURE(tmp_rv)) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Remove from ma_idx to ep id map (EP=%d) "
"during cleanup - %s.\n"), unit, h_data_p->ep_id, bcm_errmsg(rv)));
}
if (NULL != ep_info) {
sal_free(ep_info);
}
if (BCM_E_EXISTS
== shr_idxres_list_elem_state(oc->mep_pool, ep_id)) {
shr_idxres_list_free(oc->mep_pool, ep_id);
}
/* return rx & tx counters allocated if any */
if (NULL != h_data_p) {
rv =_bcm_kt2_oam_free_counter(unit, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block "
"free failed (EP=%d) - %s.\n"),
ep_id, bcm_errmsg(rv)));
}
}
if (NULL != h_data_p && (BCM_E_EXISTS
== shr_idxres_list_elem_state(pool,
h_data_p->ma_base_index))) {
shr_idxres_list_free(pool, h_data_p->ma_base_index);
}
if (NULL != h_data_p) {
_BCM_OAM_HASH_DATA_CLEAR(h_data_p);
}
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_lmep_rx_config_recover"
" - error_done.\n")));
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_lmep_tx_config_recover
* Purpose:
* Recover OAM local endpoint Tx configuration.
* Parameters:
* unit - (IN) BCM device number
* Returns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_lmep_tx_config_recover(int unit,
uint8 **scache,
uint16 recovered_ver)
{
_bcm_gport_dest_t gport_dest; /* Gport specification. */
bcm_gport_t gport; /* Gport value. */
int index; /* Hardware index. */
lmep_entry_t lmep_entry; /* LMEP table entry buffer. */
lmep_1_entry_t lmep_1_entry; /* LMEP table entry buffer. */
maid_reduction_entry_t maid_red_ent; /* MAID_REDUCTION table entry buf. */
_bcm_oam_hash_key_t hash_key; /* Hash key buffer for lookup. */
_bcm_oam_group_data_t *g_info_p; /* Group information pointer. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
bcm_module_t modid; /* Module ID. */
bcm_port_t port_id; /* Port ID. */
bcm_trunk_t trunk_id; /* Trunk ID. */
uint32 grp_idx; /* Group index. */
uint16 glp; /* Generic logical port. */
uint16 vlan; /* VLAN ID. */
uint16 inner_vlan; /* Inner VLAN ID. */
uint8 level; /* Maintenance domain level. */
_bcm_oam_ep_list_t *cur; /* Current head node pointer. */
int ep_id = -1; /* Endpoint ID. */
uint8 match_found = 0; /* Matching endpoint found. */
int rv; /* Operation return status. */
bcm_oam_endpoint_info_t *ep_info = NULL; /* Endpoint information. */
_bcm_oam_hash_data_t *h_data_p = NULL; /* Endpoint hash data pointer. */
_bcm_oam_hash_data_t sh_data; /* Endpoint hash data pointer. */
uint32 ccm_period = 0;
egr_port_entry_t egr_port_entry;
uint8 mdl_mip = 0;
int trunk_index = -1;
int tpid_index = 0;
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_lmep_tx_config_recover.\n")));
/*
* Get OAM control structure.
* Note: Lock taken by calling routine.
*/
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
rv = _bcm_kt2_oam_endpoint_alloc(&ep_info);
if (BCM_FAILURE(rv)) {
return (rv);
}
/*
* At this point, Remote MEP and Local MEP Rx config has been
* recovered. Now, recover the Tx config for Local MEPs.
*/
for (index = 0; index < oc->lmep_count; index++) {
/* Get the LMEP table entry. */
rv = READ_LMEPm(unit, MEM_BLOCK_ANY, index, &lmep_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LMEP table read (index=%d) failed "
"- %s.\n"), index, bcm_errmsg(rv)));
goto cleanup;
}
/* Get the LMEP_1 table entry. */
rv = READ_LMEP_1m(unit, MEM_BLOCK_ANY, index, &lmep_1_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LMEP_1 table read (index=%d) failed "
"- %s.\n"), index, bcm_errmsg(rv)));
goto cleanup;
}
ccm_period = _bcm_kt2_oam_ccm_hw_encode_to_msecs
((int) soc_LMEPm_field32_get(unit, &lmep_entry,
CCM_INTERVALf));
if (ccm_period == BCM_OAM_ENDPOINT_CCM_PERIOD_DISABLED) {
continue;
}
grp_idx = soc_LMEPm_field32_get(unit, &lmep_entry, MAID_INDEXf);
rv = READ_MAID_REDUCTIONm(unit, MEM_BLOCK_ANY, grp_idx, &maid_red_ent);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MAID_REDU read (GID=%d) failed "
"- %s.\n"), grp_idx, bcm_errmsg(rv)));
goto cleanup;
}
if (soc_MAID_REDUCTIONm_field32_get(unit, &maid_red_ent, VALIDf)) {
/* Get pointer to group memory. */
g_info_p = &oc->group_info[grp_idx];
vlan = soc_LMEPm_field32_get(unit, &lmep_entry, VLAN_IDf);
inner_vlan = soc_LMEPm_field32_get(unit, &lmep_entry, CVLAN_TAGf);
/* Only extract the VLAN ID portion of the tag. */
inner_vlan = inner_vlan & 0xfff;
rv = READ_LMEP_1m(unit, MEM_BLOCK_ANY, index, &lmep_1_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LMEP_1 table read (index=%d) "
"failed " "- %s.\n"), index, bcm_errmsg(rv)));
goto cleanup;
}
glp = soc_LMEP_1m_field32_get(unit, &lmep_1_entry, PP_PORTf);
rv = _bcm_kt2_pp_port_to_modport_get(unit, glp,
&modid, &port_id);
if (BCM_FAILURE(rv)) {
goto cleanup;
}
level = soc_LMEPm_field32_get(unit, &lmep_entry, MDLf);
trunk_id = BCM_TRUNK_INVALID;
if (vlan != 0 || inner_vlan != 0) {
rv = bcm_esw_trunk_find(unit, modid, port_id, &trunk_id);
if (BCM_FAILURE(rv)
&& (BCM_E_NOT_FOUND != rv)) {
goto cleanup;
}
}
_bcm_gport_dest_t_init(&gport_dest);
if (BCM_TRUNK_INVALID != trunk_id) {
gport_dest.tgid = trunk_id;
gport_dest.gport_type = _SHR_GPORT_TYPE_TRUNK;
rv = _bcm_esw_oam_lmep_tx_trunk_config_recover(unit,
trunk_id,
port_id,
&trunk_index);
if(BCM_FAILURE(rv)) {
goto cleanup;
}
} else {
gport_dest.gport_type = _SHR_GPORT_TYPE_MODPORT;
gport_dest.modid = _BCM_OAM_GLP_MODULE_ID_GET(glp);
gport_dest.port = _BCM_OAM_GLP_PORT_GET(glp);
}
rv = _bcm_esw_gport_construct(unit, &gport_dest, &gport);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Gport construct failed"
" - %s.\n"), bcm_errmsg(rv)));
goto cleanup;
}
/* Get the endpoint list head pointer. */
cur = *(g_info_p->ep_list);
if (NULL != cur) {
while (NULL != cur) {
h_data_p = cur->ep_data_p;
if (NULL == h_data_p) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group (GID=%d) NULL"
" endpoint list.\n"), grp_idx));
rv = BCM_E_INTERNAL;
goto cleanup;
}
if (vlan == h_data_p->vlan &&
inner_vlan == h_data_p->inner_vlan
&& gport == h_data_p->gport
&& level == h_data_p->level
&& 1 == h_data_p->local_rx_enabled) {
match_found = 1;
break;
}
cur = cur->next;
}
}
if (1 == match_found) {
bcm_oam_endpoint_info_t_init(ep_info);
/* Set up endpoint information for key construction. */
ep_info->group = h_data_p->group_index;
ep_info->name = h_data_p->name;
ep_info->gport = h_data_p->gport;
ep_info->level = h_data_p->level;
ep_info->vlan = h_data_p->vlan;
ep_info->inner_vlan = h_data_p->inner_vlan;
ep_info->flags = h_data_p->flags;
if ((recovered_ver < BCM_WB_VERSION_1_5)
|| (!ETH_TYPE(h_data_p->type))) {
/*
* Calculate hash key for hash table insert
* operation.
*/
_bcm_kt2_oam_ep_hash_key_construct(unit, oc, ep_info,
&hash_key);
/*
* Delete insert done by Local Rx endpoint recovery code.
* Endpoint name has been recovered and will result
* in a different hash index.
*/
rv = shr_htb_find(oc->ma_mep_htbl, hash_key,
(shr_htb_data_t *)&sh_data, 1);
if (BCM_E_NOT_FOUND == rv) {
goto cleanup;
}
}
} else {
if (recovered_ver >= BCM_WB_VERSION_1_3) {
sal_memcpy(&ep_id,
*(scache) + _BCM_KT2_OAM_TX_MEP_SCACHE_LOCATION(index),
sizeof(bcm_oam_endpoint_t));
rv = shr_idxres_list_reserve(oc->mep_pool, ep_id, ep_id);
if (BCM_FAILURE(rv)) {
rv = (rv == BCM_E_RESOURCE) ? (BCM_E_EXISTS) : rv;
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: (EP_ID=%d) Index reserve "
" failed - %s.\n"), ep_id, bcm_errmsg(rv)));
goto cleanup;
}
} else {
/* Allocate the next available endpoint index. */
rv = shr_idxres_list_alloc(oc->mep_pool,
(shr_idxres_element_t *)&ep_id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint alloc (EP=%d)"
" - %s.\n"), ep_id, bcm_errmsg(rv)));
goto cleanup;
}
}
h_data_p = &oc->oam_hash_data[ep_id];
if (1 == h_data_p->in_use) {
rv = BCM_E_INTERNAL;
goto cleanup;
}
_BCM_OAM_HASH_DATA_CLEAR(h_data_p);
_BCM_OAM_HASH_DATA_HW_IDX_INIT(h_data_p);
rv = _bcm_kt2_oam_group_ep_list_add(unit, grp_idx, ep_id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Adding (EP=%d)"
" to (GID=%d) failed - %s.\n"),
ep_id, grp_idx, bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->ep_id = ep_id;
h_data_p->group_index = grp_idx;
h_data_p->local_rx_enabled = 0;
h_data_p->vlan = vlan;
h_data_p->inner_vlan = inner_vlan;
h_data_p->gport = gport;
h_data_p->level = level;
if ((h_data_p->vlan == 0) && (h_data_p->inner_vlan == 0)) {
h_data_p->oam_domain = _BCM_OAM_DOMAIN_PORT;
}
h_data_p->tx_ctr = _BCM_OAM_INVALID_INDEX;
h_data_p->rx_ctr = _BCM_OAM_INVALID_INDEX;
h_data_p->pri_map_index = _BCM_OAM_INVALID_INDEX;
h_data_p->profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->dglp1_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->dglp2_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->egr_pri_map_index = _BCM_OAM_INVALID_INDEX;
h_data_p->outer_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->subport_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->inner_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
h_data_p->trunk_id = BCM_TRUNK_INVALID;
}
h_data_p->src_pp_port = glp;
h_data_p->dst_pp_port = glp;
h_data_p->is_remote = 0;
h_data_p->in_use = 1;
h_data_p->local_tx_enabled = 1;
h_data_p->local_tx_index = index;
if((-1) != trunk_index) {
h_data_p->trunk_index = trunk_index;
}
h_data_p->name
= soc_LMEPm_field32_get(unit, &lmep_entry, MEPIDf);
h_data_p->period
= _bcm_kt2_oam_ccm_hw_encode_to_msecs
((int) soc_LMEPm_field32_get(unit, &lmep_entry,
CCM_INTERVALf));
if (BCM_PBMP_MEMBER(SOC_INFO(unit).general_pp_port_pbm, glp)) {
h_data_p->flags2 |= BCM_OAM_ENDPOINT_FLAGS2_VLAN_VP_UP_MEP_IN_HW;
}
if (h_data_p->oam_domain != _BCM_OAM_DOMAIN_PORT) {
rv = _bcm_kt2_oam_port_table_key_update(unit, PORT_TABm, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Unable to increment key ref count "
" - %s.\n"), bcm_errmsg(rv)));
goto cleanup;
}
rv = _bcm_kt2_oam_port_table_key_update(unit, EGR_PORTm, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Unable to increment key ref count "
" - %s.\n"), bcm_errmsg(rv)));
goto cleanup;
}
} else {
rv = soc_mem_read(unit, EGR_PORTm, MEM_BLOCK_ANY,
h_data_p->dst_pp_port, &egr_port_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EGR_PORT Table read failed"
" - %s.\n"), bcm_errmsg(rv)));
goto cleanup;
}
mdl_mip = soc_EGR_PORTm_field32_get(unit, &egr_port_entry,
MDL_BITMAP_PASSIVEf);
if (!(mdl_mip & (1 << h_data_p->level))) {
/* Set the MIP flag */
h_data_p->flags |= BCM_OAM_ENDPOINT_INTERMEDIATE;
}
}
/* For ethernet CCM TX endpoints, fetch TPIDs from HW.
*/
if (h_data_p->vlan != 0) {
tpid_index = soc_LMEPm_field32_get(unit, &lmep_entry,
SVLAN_TPID_INDEXf);
rv = _bcm_kt2_oam_tpid_get(unit, BCM_OAM_TPID_TYPE_OUTER,
tpid_index, &(h_data_p->outer_tpid));
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Outer TPID fetch failed"
" EP=%d %s.\n"), unit, ep_id,
bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->outer_tpid_profile_index = tpid_index;
KT2_OAM_OUTER_TPID_REF_COUNT(unit, tpid_index)++;
}
if (h_data_p->inner_vlan != 0) {
rv = _bcm_kt2_oam_tpid_get(unit, BCM_OAM_TPID_TYPE_INNER,
0, &(h_data_p->inner_tpid));
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Inner TPID fetch failed"
" EP=%d %s.\n"), unit, ep_id,
bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->inner_tpid_profile_index = 0;
KT2_OAM_INNER_TPID_REF_COUNT(unit, 0)++;
}
if ((BCM_GPORT_IS_SUBPORT_PORT(h_data_p->gport)) &&
(_BCM_KT2_GPORT_IS_SUBTAG_SUBPORT_PORT(unit, h_data_p->gport))) {
tpid_index = soc_LMEPm_field32_get(unit, &lmep_entry,
SUBPORT_TAG_TPID_INDEXf);
rv = _bcm_kt2_oam_tpid_get(unit, BCM_OAM_TPID_TYPE_SUBPORT,
tpid_index, &(h_data_p->subport_tpid));
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Subport TPID fetch failed"
" EP=%d %s.\n"), unit, ep_id,
bcm_errmsg(rv)));
goto cleanup;
}
h_data_p->subport_tpid_profile_index = tpid_index;
KT2_OAM_SUBPORT_TPID_REF_COUNT(unit, tpid_index)++;
}
rv = shr_idxres_list_reserve(oc->lmep_pool,
h_data_p->local_tx_index,
h_data_p->local_tx_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Tx index=%d reserve failed"
" - %s.\n"), index, bcm_errmsg(rv)));
_bcm_kt2_oam_group_ep_list_remove(unit,
h_data_p->group_index,
h_data_p->ep_id);
rv = (rv == BCM_E_RESOURCE) ? (BCM_E_EXISTS) : rv;
goto cleanup;
}
bcm_oam_endpoint_info_t_init(ep_info);
/* Set up endpoint information for key construction. */
ep_info->group = h_data_p->group_index;
ep_info->name = h_data_p->name;
ep_info->gport = h_data_p->gport;
ep_info->level = h_data_p->level;
ep_info->vlan = h_data_p->vlan;
ep_info->inner_vlan = h_data_p->inner_vlan;
ep_info->flags = h_data_p->flags;
if ((recovered_ver < BCM_WB_VERSION_1_5)
|| (!ETH_TYPE(h_data_p->type))) {
/*
* Calculate hash key for hash table insert
* operation.
*/
_bcm_kt2_oam_ep_hash_key_construct(unit, oc, ep_info, &hash_key);
rv = shr_htb_insert(oc->ma_mep_htbl, hash_key, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Hash table insert"
" (EP=%d) failed - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
_bcm_kt2_oam_group_ep_list_remove(unit,
h_data_p->group_index,
h_data_p->ep_id);
goto cleanup;
} else {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: Hash Tbl (EP=%d)"
" inserted - %s.\n"), h_data_p->ep_id,
bcm_errmsg(rv)));
}
}
}
match_found = 0;
h_data_p = NULL;
}
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_lmep_tx_config_recover"
" - done.\n")));
sal_free(ep_info);
return (rv);
cleanup:
if (NULL != ep_info) {
sal_free(ep_info);
}
if (0 == match_found && NULL != h_data_p) {
/* Return endpoint index to MEP pool. */
if (BCM_E_EXISTS
== shr_idxres_list_elem_state(oc->mep_pool, ep_id)) {
shr_idxres_list_free(oc->mep_pool, ep_id);
}
_BCM_OAM_HASH_DATA_CLEAR(h_data_p);
}
if (BCM_E_EXISTS
== shr_idxres_list_elem_state(oc->lmep_pool, index)) {
shr_idxres_list_free(oc->lmep_pool, index);
}
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: _bcm_kt2_oam_lmep_tx_config_recover"
" - error_done.\n")));
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_wb_endpoints_recover
* Purpose:
* Recover OAM local endpoint Rx configuration.
* Parameters:
* unit - Device unit number.
* stable_size - OAM module Level2 memory size.
* oam_scache - Pointer to secondary storage buffer pointer.
* Returns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_wb_endpoints_recover(int unit,
int stable_size,
uint8 **oam_scache,
uint16 recovered_ver)
{
int index; /* Hardware index. */
uint32 entry_count; /* Max entries in L3_ENTRY_1/
EGR_MP_GROUP table. */
l3_entry_1_entry_t l3_entry; /* L3 table entry. */
_bcm_oam_control_t *oc; /* Pointer to control structure. */
int rv; /* Operation return status. */
egr_mp_group_entry_t egr_mp_grp_entry;/* Egr MP group tbl entry buffer */
port_tab_entry_t port_entry; /* Port table entry buffer */
/*
* Get OAM control structure.
* Note: Lock taken by calling routine.
*/
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Get number of L3 table entries. */
entry_count = soc_mem_index_count(unit, L3_ENTRY_IPV4_UNICASTm);
/* Now get valid OAM endpoint entries. */
for (index = 0; index < entry_count; index++) {
rv = READ_L3_ENTRY_IPV4_UNICASTm(unit, MEM_BLOCK_ANY, index, &l3_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: L3_ENTRY (index=%d) read"
" failed - %s.\n"), index, bcm_errmsg(rv)));
return (rv);
}
if (soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, &l3_entry, VALIDf)) {
switch (soc_L3_ENTRY_IPV4_UNICASTm_field32_get(unit, &l3_entry,
KEY_TYPEf)) {
case SOC_MEM_KEY_L3_ENTRY_RMEP:
rv = _bcm_kt2_oam_rmep_recover(unit, index,
&l3_entry, oam_scache, recovered_ver);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Remote endpoint"
" (index=%d) reconstruct failed - %s.\n"),
index, bcm_errmsg(rv)));
return (rv);
}
break;
/* Recover all downMEP's */
case SOC_MEM_KEY_L3_ENTRY_LMEP:
rv = _bcm_kt2_oam_lmep_downmep_rx_config_recover(unit,
index,
&l3_entry,
oam_scache,
recovered_ver);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Local endpoint DownMEP"
" (index=%d) reconstruct failed - %s.\n"),
index, bcm_errmsg(rv)));
return (rv);
}
break;
default:
/* Not an OAM entry. */
continue;
}
}
}
/* Recover all UpMEPs */
/* Get number of EGR_MP_GROUP table entries. */
entry_count = soc_mem_index_count(unit, EGR_MP_GROUPm);
/* Now get valid OAM endpoint entries. */
for (index = 0; index < entry_count; index++) {
rv = READ_EGR_MP_GROUPm(unit, MEM_BLOCK_ANY, index, &egr_mp_grp_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EGR_MP_GROUP ENTRY (index=%d) read"
" failed - %s.\n"), index, bcm_errmsg(rv)));
return (rv);
}
if (soc_EGR_MP_GROUPm_field32_get(unit, &egr_mp_grp_entry, VALIDf)) {
rv = _bcm_kt2_oam_lmep_upmep_rx_config_recover(unit, index,
&egr_mp_grp_entry, oam_scache, recovered_ver);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Local endpoint UpMEP"
" (index=%d) reconstruct failed - %s.\n"),
index, bcm_errmsg(rv)));
return (rv);
}
}
}
/* recover endpoints from Port table */
/* Get number of PORT_TABLE table entries. */
entry_count = soc_mem_index_count(unit, PORT_TABm);
/* Now get valid OAM endpoint entries. */
for (index = 0; index < entry_count; index++) {
rv = READ_PORT_TABm(unit, MEM_BLOCK_ANY, index, &port_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Port table ENTRY (index=%d) read"
" failed - %s.\n"), index, bcm_errmsg(rv)));
return (rv);
}
if (soc_PORT_TABm_field32_get(unit, &port_entry, OAM_ENABLEf)) {
rv = _bcm_kt2_oam_port_lmep_rx_config_recover(unit, index,
&port_entry,
oam_scache, recovered_ver);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Port based local endpoint "
" (index=%d) reconstruct failed - %s.\n"),
index, bcm_errmsg(rv)));
return (rv);
}
}
}
rv = _bcm_kt2_oam_lmep_tx_config_recover(unit, oam_scache, recovered_ver);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint Tx config recovery"
" failed - %s.\n"), bcm_errmsg(rv)));
return (rv);
}
return (BCM_E_NONE);
}
/*
* Function:
* bcm_kt2_oam_scache_alloc
* Purpose:
* Allocate scache memory for OAM module
* Parameters:
* unit - (IN) BCM device number
* Returns:
* BCM_E_UNIT - Invalid BCM unit number.
* BCM_E_UNAVAIL - OAM not support on this device.
* BCM_E_MEMORY - Allocation failure
* CM_E_XXX - Error code from bcm_XX_oam_init()
* BCM_E_NONE - Success
*/
STATIC int
bcm_kt2_oam_scache_alloc(int unit) {
_bcm_oam_control_t *oc;
soc_scache_handle_t scache_handle;
uint8 *oam_scache;
int alloc_sz = 0;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
alloc_sz = BCM_OAM_GROUP_NAME_LENGTH * (oc->group_count);
/* Number of oam groups */
alloc_sz += sizeof(int);
/* VFP group, IFP VP group, IFP GLP group */
alloc_sz += 3 * sizeof(bcm_field_group_t);
/* Allocate memory to store logical endpoint Ids */
alloc_sz += (_BCM_KT2_OAM_ENDPOINT_ID_SCACHE_SIZE);
/* Allocate memory to store oc->eth_oam_mp_group_vlan_key */
alloc_sz += sizeof(uint8);
/* FLAGS are stored for all ethernet OAM endpoints */
alloc_sz += (sizeof(uint32) * oc->ep_count);
SOC_SCACHE_HANDLE_SET(scache_handle, unit, BCM_MODULE_OAM, 0);
BCM_IF_ERROR_RETURN(_bcm_esw_scache_ptr_get(unit, scache_handle, 1,
alloc_sz, &oam_scache, BCM_WB_DEFAULT_VERSION, NULL));
return BCM_E_NONE;
}
/* Given h_data_p, this function fills the endpoint_info required for
* hash_key generation and inserts it at appropriate location.
*
* oc->lock is already assumed to be taken by caller */
int _bcm_kt2_oam_hash_tbl_insert_hash_data_entry(int unit, _bcm_oam_hash_data_t *h_data_p)
{
bcm_oam_endpoint_info_t ep_info;
_bcm_oam_control_t *oc = NULL; /* Pointer to OAM control structure. */
_bcm_oam_hash_key_t hash_key; /* Hash key buffer for lookup. */
int rv = BCM_E_NONE;
if (h_data_p == NULL) {
return BCM_E_INTERNAL;
}
/* Get OAM control structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Initialize endpoint info structure. */
bcm_oam_endpoint_info_t_init(&ep_info);
/* Set up endpoint information for key construction. */
ep_info.group = h_data_p->group_index;
ep_info.name = h_data_p->name;
ep_info.gport = h_data_p->gport;
ep_info.level = h_data_p->level;
ep_info.vlan = h_data_p->vlan;
ep_info.inner_vlan = h_data_p->inner_vlan;
ep_info.flags = h_data_p->flags;
/* Construct hash key for lookup */
_bcm_kt2_oam_ep_hash_key_construct(unit, oc, &ep_info, &hash_key);
/* Insert entry from hash table. */
rv = shr_htb_insert(oc->ma_mep_htbl, hash_key, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Hash table insert failed "
"EP=%d %s.\n"), unit, h_data_p->ep_id, bcm_errmsg(rv)));
} else {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Info: Hash Tbl (EP=%d) inserted"
" - %s.\n"), unit, h_data_p->ep_id, bcm_errmsg(rv)));
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_reinit
* Purpose:
* Reconstruct OAM module software state.
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_reinit(int unit)
{
uint32 group_count = 0; /* Stored OAM group count. */
int stable_size = 0; /* Secondary storage size. */
uint8 *oam_scache; /* Pointer to scache memory. */
soc_scache_handle_t scache_handle; /* Scache memory handler. */
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
int rv; /* Operation return status. */
uint16 recovered_ver = 0;
int realloc_size = 0;
int idx = 0;
_bcm_oam_hash_data_t *h_data_p = NULL; /* Endpoint hash data pointer. */
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: OAM warm boot recovery.....\n")));
SOC_IF_ERROR_RETURN(soc_stable_size_get(unit, &stable_size));
/* Get OAM control structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (!SOC_WARM_BOOT_SCACHE_IS_LIMITED(unit) && (stable_size > 0)) {
SOC_SCACHE_HANDLE_SET(scache_handle, unit, BCM_MODULE_OAM, 0);
rv = _bcm_esw_scache_ptr_get(unit, scache_handle, 0, 0,
&oam_scache, BCM_WB_DEFAULT_VERSION,
&recovered_ver);
if (BCM_E_NOT_FOUND == rv) {
/* Upgrading from SDK release that does not have warmboot state */
bcm_kt2_oam_scache_alloc(unit);
_BCM_OAM_UNLOCK(oc);
return BCM_E_NONE;
} else if (BCM_FAILURE(rv)) {
goto cleanup;
} else {
if (recovered_ver < BCM_WB_VERSION_1_2 && MA_INDEX_ALLOC_SCHEME_SEPARATE_POOL) {
LOG_WARN(BSL_LS_BCM_OAM, (BSL_META_U(unit, "OAM Warn:"
" Warm upgrade from a older version which does not support "
"flexible endpoint hw index allocation. Reverting to older "
"scheme.\n")));
oc->ma_idx_alloc_scheme = _BCM_OAM_MA_IDX_ALLOC_COMBINED_POOL;
/* Destroy the previous allocation and re-allocate based on this scheme */
rv = _bcm_kt2_oam_ma_idx_pool_destroy(oc);
if (BCM_FAILURE(rv)) {
goto cleanup;
}
rv = _bcm_kt2_oam_ma_idx_pool_create(oc);
if (BCM_FAILURE(rv)) {
goto cleanup;
}
}
/* Recover the FP groups */
sal_memcpy(&oc->vfp_group, oam_scache, sizeof(bcm_field_group_t));
oam_scache += sizeof(bcm_field_group_t);
sal_memcpy(&oc->fp_vp_group, oam_scache, sizeof(bcm_field_group_t));
oam_scache += sizeof(bcm_field_group_t);
sal_memcpy(&oc->fp_glp_group, oam_scache, sizeof(bcm_field_group_t));
oam_scache += sizeof(bcm_field_group_t);
/* Recover the OAM groups */
sal_memcpy(&group_count, oam_scache, sizeof(int));
oam_scache += sizeof(int);
}
} else {
rv = BCM_E_NONE;
goto cleanup;
}
rv = _bcm_kt2_oam_wb_group_recover(unit, stable_size, &oam_scache);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group recovery failed - %s.\n"),
bcm_errmsg(rv)));
goto cleanup;
}
rv = _bcm_kt2_oam_wb_endpoints_recover(unit, stable_size, &oam_scache, recovered_ver);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint recovery failed - %s.\n"),
bcm_errmsg(rv)));
goto cleanup;
}
if(recovered_ver >= BCM_WB_VERSION_1_3) {
oam_scache += (_BCM_KT2_OAM_ENDPOINT_ID_SCACHE_SIZE);
}
if (recovered_ver >= BCM_WB_VERSION_1_4) {
sal_memcpy(&oc->eth_oam_mp_group_vlan_key, oam_scache, sizeof(uint8));
oam_scache += sizeof(uint8);
} else {
oc->eth_oam_mp_group_vlan_key =
_BCM_KT2_ETH_OAM_MP_GROUP_KEY_DOMAIN_INDEPENDANT;
}
if (recovered_ver >= BCM_WB_VERSION_1_5) {
for (idx = 0; idx < oc->ep_count; idx++) {
h_data_p = &oc->oam_hash_data[idx];
if (ETH_TYPE(h_data_p->type) && (h_data_p->in_use)) {
sal_memcpy(&(h_data_p->flags), oam_scache, sizeof(uint32));
rv = _bcm_kt2_oam_hash_tbl_insert_hash_data_entry(unit, h_data_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Endpoint %d hash tbl insert failed - %s.\n"),
unit, h_data_p->ep_id, bcm_errmsg(rv)));
goto cleanup;
}
}
oam_scache += sizeof(uint32);
}
}
/* In BCM_WB_VERSION_1_1 onwards we allocate memory for max OAM groups and
not just the groups created, as such while upgrading from
BCM_WB_VERSION_1_0 to BCM_WB_VERSION_1_1 or greater we must add the
differential.
*/
if (!SOC_WARM_BOOT_SCACHE_IS_LIMITED(unit) && (stable_size > 0)) {
if (recovered_ver < BCM_WB_VERSION_1_1) {
realloc_size += (BCM_OAM_GROUP_NAME_LENGTH * (oc->group_count - group_count));
}
/* BCM_WB_VERSION_1_3 onwards addition memory is used to store
* software endpoint ids */
if(recovered_ver < BCM_WB_VERSION_1_3) {
realloc_size += (_BCM_KT2_OAM_ENDPOINT_ID_SCACHE_SIZE);
}
if (recovered_ver < BCM_WB_VERSION_1_4) {
realloc_size += sizeof(uint8);
}
if (recovered_ver < BCM_WB_VERSION_1_5) {
realloc_size += (sizeof(uint32) * oc->ep_count);
}
/* Added the check to avoid any like-to-like recovery where realloc is not required */
if (realloc_size) {
SOC_SCACHE_HANDLE_SET(scache_handle, unit, BCM_MODULE_OAM, 0);
rv = soc_scache_realloc(unit, scache_handle, realloc_size);
if (SOC_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: scache alloc failed"
" - %s.\n"),
unit, bcm_errmsg(rv)));
goto cleanup;
}
}
}
cleanup:
_BCM_OAM_UNLOCK(oc);
return (rv);
}
#if defined(INCLUDE_BHH)
/*
* Function:
* _bcm_kt2_oam_bhh_cos_map_recover
* Purpose:
* Reconstruct CoS Map config done for BHH
* Parameters:
* unit - (IN) BCM device number
* Retruns:
* BCM_E_XXX
*/
STATIC int
_bcm_kt2_oam_bhh_cos_map_recover(int unit)
{
int rv = BCM_E_NONE;
_bcm_oam_control_t *oc;
int index;
int cosq_map_size;
bcm_rx_reasons_t reasons, reasons_mask;
bcm_rx_reasons_t ach_error_reasons, invalid_error_reasons;
bcm_rx_reasons_t bhh_lb_reasons;
uint8 int_prio, int_prio_mask;
uint32 packet_type, packet_type_mask;
bcm_cos_queue_t cosq;
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (oc->ukernel_not_ready) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_NONE;
}
rv = bcm_esw_rx_cosq_mapping_size_get(unit, &cosq_map_size);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error:hw init. cosq maps size %s.\n"),
bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
BCM_RX_REASON_CLEAR_ALL(ach_error_reasons);
BCM_RX_REASON_SET(ach_error_reasons, bcmRxReasonBHHOAM);
BCM_RX_REASON_SET(ach_error_reasons, bcmRxReasonOAMCCMSlowpath);
BCM_RX_REASON_CLEAR_ALL(invalid_error_reasons);
BCM_RX_REASON_SET(invalid_error_reasons, bcmRxReasonBHHOAM);
BCM_RX_REASON_SET(invalid_error_reasons, bcmRxReasonOAMLMDM);
BCM_RX_REASON_CLEAR_ALL(bhh_lb_reasons);
BCM_RX_REASON_SET(bhh_lb_reasons, bcmRxReasonBHHOAM);
BCM_RX_REASON_SET(bhh_lb_reasons, bcmRxReasonOAMSlowpath);
oc->cpu_cosq_ach_error_index = -1;
oc->cpu_cosq_invalid_error_index = -1;
oc->bhh_lb_index = -1;
for (index = 0; index < cosq_map_size; index++) {
rv = bcm_esw_rx_cosq_mapping_get(unit, index,
&reasons, &reasons_mask,
&int_prio, &int_prio_mask,
&packet_type, &packet_type_mask,
&cosq);
if (rv == BCM_E_NONE) {
if (BCM_RX_REASON_EQ(reasons, ach_error_reasons) &&
BCM_RX_REASON_EQ(reasons_mask, ach_error_reasons)) {
oc->cpu_cosq_ach_error_index = index;
oc->cpu_cosq = cosq;
} else if (BCM_RX_REASON_EQ(reasons, invalid_error_reasons) &&
BCM_RX_REASON_EQ(reasons_mask, invalid_error_reasons)) {
oc->cpu_cosq_invalid_error_index = index;
} else if (BCM_RX_REASON_EQ(reasons, bhh_lb_reasons) &&
BCM_RX_REASON_EQ(reasons_mask, bhh_lb_reasons)) {
oc->bhh_lb_index = index;
}
}
if( -1 != oc->cpu_cosq_ach_error_index &&
-1 != oc->cpu_cosq_invalid_error_index &&
-1 != oc->bhh_lb_index) {
break;
}
}
if (index >= cosq_map_size) {
LOG_VERBOSE(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error: Could not recover cos map entry \n")));
}
_BCM_OAM_UNLOCK(oc);
return rv;
}
#endif /* INCLUDE_BHH */
#endif /* BCM_WARM_BOOT_SUPPORT */
STATIC int
_bcm_kt2_oam_ma_idx_num_entries_and_pool_get(int unit,
_bcm_oam_hash_data_t *h_data_p,
int *num_ma_idx_entries,
shr_idxres_list_handle_t *pool)
{
_bcm_oam_control_t *oc = NULL;
/* Get OAM control structure handle. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
if ((h_data_p == NULL) || (h_data_p->in_use == 0)) {
return BCM_E_PARAM;
}
switch (oc->ma_idx_alloc_scheme) {
case _BCM_OAM_MA_IDX_ALLOC_COMBINED_POOL:
*num_ma_idx_entries = 8;
*pool = oc->ma_idx_pool;
return BCM_E_NONE;
break;
case _BCM_OAM_MA_IDX_ALLOC_SEPARATE_POOL:
if (h_data_p->type == bcmOAMEndpointTypeEthernet) {
*num_ma_idx_entries = 8;
*pool = oc->ma_idx_pool;
} else if (BHH_EP_TYPE(h_data_p)) {
*num_ma_idx_entries = 1;
*pool = oc->mpls_oam_ma_idx_pool;
}
return BCM_E_NONE;
break;
default:
/* Should not hit */
break;
}
return BCM_E_INTERNAL;
}
STATIC int
_bcm_kt2_oam_ma_idx_pool_create(_bcm_oam_control_t *oc)
{
int rv = BCM_E_NONE;
if (MA_INDEX_ALLOC_SCHEME_COMBINED_POOL) {
rv = shr_idxres_list_create(&oc->ma_idx_pool, 0,
oc->ma_idx_count - 1,
0, oc->ma_idx_count -1, "ma_idx pool");
if (BCM_FAILURE(rv)) {
return (rv);
}
} else if (MA_INDEX_ALLOC_SCHEME_SEPARATE_POOL) {
if (_BCM_ETH_OAM_NUM_SESSIONS != 0) {
rv = shr_idxres_list_create(&oc->ma_idx_pool, 0, _BCM_ETH_OAM_NUM_SESSIONS-1,
0, _BCM_ETH_OAM_NUM_SESSIONS-1, "ma_idx pool");
if (BCM_FAILURE(rv)) {
return (rv);
}
}
if (_BCM_MPLS_OAM_NUM_SESSIONS != 0) {
rv = shr_idxres_list_create(&oc->mpls_oam_ma_idx_pool,
_BCM_ETH_OAM_NUM_SESSIONS,
_BCM_ETH_OAM_NUM_SESSIONS + _BCM_MPLS_OAM_NUM_SESSIONS-1,
_BCM_ETH_OAM_NUM_SESSIONS,
_BCM_ETH_OAM_NUM_SESSIONS + _BCM_MPLS_OAM_NUM_SESSIONS-1,
"mpls oam ma_idx pool");
if (BCM_FAILURE(rv)) {
return (rv);
}
}
} else {
return (BCM_E_INTERNAL);
}
return rv;
}
STATIC int
_bcm_kt2_oam_ma_idx_pool_destroy(_bcm_oam_control_t *oc)
{
if (NULL != oc->ma_idx_pool) {
shr_idxres_list_destroy(oc->ma_idx_pool);
oc->ma_idx_pool = NULL;
}
if (NULL != oc->mpls_oam_ma_idx_pool) {
shr_idxres_list_destroy(oc->mpls_oam_ma_idx_pool);
oc->mpls_oam_ma_idx_pool = NULL;
}
return BCM_E_NONE;
}
/* * * * * * * * * * * * * * * * * * * *
* OAM BCM APIs *
*/
/*
* Function: bcm_kt2_oam_init
*
* Purpose:
* Initialize OAM module.
* Parameters:
* unit - (IN) BCM device number
* Returns:
* BCM_E_UNIT - Invalid BCM unit number.
* BCM_E_UNAVAIL - OAM not support on this device.
* BCM_E_MEMORY - Allocation failure
* CM_E_XXX - Error code from bcm_XX_oam_init()
* BCM_E_NONE - Success
*/
int
bcm_kt2_oam_init(int unit)
{
_bcm_oam_control_t *oc = NULL; /* OAM control structure. */
int rv; /* Operation return value. */
uint32 size; /* Size of memory allocation. */
bcm_port_t port; /* Port number. */
int index = 0;
bcm_oam_endpoint_t ep_index; /* Endpoint index. */
bcm_pbmp_t all_pbmp;
#if defined(INCLUDE_BHH)
int uc;
uint8 carrier_code[] = {0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF};
char *carrier_code_str;
uint32 node_id = 0;
bhh_sdk_msg_ctrl_init_t msg_init;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
int priority;
int ukernel_not_ready = 0;
uint8 bhh_consolidate_final_event = 0;
#endif
/* Ensure that the unit has OAM support. */
if (!soc_feature(unit, soc_feature_oam)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: OAM not supported \n")));
return (BCM_E_UNAVAIL);
}
/* Detach first if the module has been previously initialized. */
if (NULL != _kt2_oam_control[unit]) {
_kt2_oam_control[unit]->init = FALSE;
rv = bcm_kt2_oam_detach(unit);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Module deinit - %s.\n"),
bcm_errmsg(rv)));
return (rv);
}
}
/* Allocate OAM control memeory for this unit. */
_BCM_OAM_ALLOC(oc, _bcm_oam_control_t, sizeof (_bcm_oam_control_t),
"OAM control");
if (NULL == oc) {
return (BCM_E_MEMORY);
}
if (soc_feature(unit, soc_feature_bhh)) {
#ifdef INCLUDE_BHH
/*
* Initialize uController side
*/
/*
* Start BHH application in BTE (Broadcom Task Engine) uController,
* if not already running (warm boot).
* Determine which uController is running BHH by choosing the first
* uC that returns successfully.
*/
for (uc = 0; uc < SOC_INFO(unit).num_ucs; uc++) {
if (!soc_uc_in_reset(unit, uc)) {
rv = soc_cmic_uc_appl_init(unit, uc, MOS_MSG_CLASS_BHH,
_BHH_UC_MSG_TIMEOUT_USECS,
BHH_SDK_VERSION,
BHH_UC_MIN_VERSION,
_bcm_oam_bhh_appl_callback, NULL);
if (SOC_E_NONE == rv) {
/* BHH started successfully */
oc->uc_num = uc;
break;
}
}
}
if (uc >= SOC_INFO(unit).num_ucs) { /* Could not find or start BHH appl */
ukernel_not_ready = 1;
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"uKernel Not Ready, bhh not started\n")));
}
#endif
}
/* Get number of endpoints and groups supported by this unit. */
rv = _bcm_kt2_oam_group_endpoint_count_init(unit, oc);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
if (soc_feature(unit, soc_feature_bhh)) {
#if defined(INCLUDE_BHH)
oc->unit = unit;
oc->ukernel_not_ready = ukernel_not_ready;
/* Get SOC properties for BHH */
oc->bhh_endpoint_count = soc_property_get(unit, spn_BHH_NUM_SESSIONS, 128);
oc->bhh_max_encap_length =
soc_property_get(unit,
spn_BHH_ENCAP_MAX_LENGTH,
_BCM_OAM_BHH_DEFAULT_ENCAP_LENGTH);
if(oc->bhh_max_encap_length > _BCM_OAM_BHH_MAX_ENCAP_LENGTH) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"Invalid max encap_length=%u\n"),
oc->bhh_max_encap_length));
return BCM_E_CONFIG;
}
carrier_code_str = soc_property_get_str(unit, spn_BHH_CARRIER_CODE);
if (carrier_code_str != NULL) {
/*
* Note that the carrier code is specified in colon separated
* MAC address format.
*/
if (_shr_parse_macaddr(carrier_code_str, carrier_code) < 0) {
_bcm_kt2_oam_control_free(unit, oc);
return (BCM_E_INTERNAL);
}
}
node_id = soc_property_get(unit, spn_BHH_NODE_ID, 0);
bhh_consolidate_final_event = soc_property_get(unit,
spn_BHH_CONSOLIDATED_FINAL_EVENT, 0);
oc->ep_count += oc->bhh_endpoint_count;
if(oc->ep_count >= _BCM_OAM_KATANA2_ENDPOINT_MAX) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: OAM EP count %d not supported \n"),
oc->ep_count));
_bcm_kt2_oam_control_free(unit, oc);
return (BCM_E_PARAM);
}
#endif /* INCLUDE_BHH */
}
_BCM_MPLS_OAM_NUM_SESSIONS = soc_property_get(unit, spn_MPLS_OAM_NUM_SESSIONS,
_BCM_MPLS_OAM_NUM_SESSIONS_INVALID);
if(_BCM_MPLS_OAM_NUM_SESSIONS ==
_BCM_MPLS_OAM_NUM_SESSIONS_INVALID) {
/* Entry does not exist. Revert to old scheme */
oc->ma_idx_alloc_scheme = _BCM_OAM_MA_IDX_ALLOC_COMBINED_POOL;
} else if (_BCM_MPLS_OAM_NUM_SESSIONS > soc_mem_index_max(unit, MA_INDEXm)){
/* If it is greater than max index of MA_INDEX table,
* set it to max_index value and set it to newer index alloc scheme.
*/
_BCM_MPLS_OAM_NUM_SESSIONS = soc_mem_index_max(unit, MA_INDEXm);
_BCM_ETH_OAM_NUM_SESSIONS = 0;
oc->ma_idx_alloc_scheme = _BCM_OAM_MA_IDX_ALLOC_SEPARATE_POOL;
} else {
_BCM_ETH_OAM_NUM_SESSIONS =
soc_mem_index_count(unit, MA_INDEXm) - _BCM_MPLS_OAM_NUM_SESSIONS;
oc->ma_idx_alloc_scheme = _BCM_OAM_MA_IDX_ALLOC_SEPARATE_POOL;
}
/* Mem_1: Allocate hash data memory */
/* size = sizeof(_bcm_oam_hash_data_t) * oc->ep_count; */
size = sizeof(_bcm_oam_hash_data_t) * _BCM_OAM_KATANA2_ENDPOINT_MAX;
_BCM_OAM_ALLOC(oc->oam_hash_data, _bcm_oam_hash_data_t, size, "Hash data");
if (NULL == oc->oam_hash_data) {
_bcm_kt2_oam_control_free(unit, oc);
return (BCM_E_MEMORY);
}
/* Mem_2: Allocate group memory */
size = sizeof(_bcm_oam_group_data_t) * oc->group_count;
_BCM_OAM_ALLOC(oc->group_info, _bcm_oam_group_data_t, size, "Group Info");
if (NULL == oc->group_info) {
_bcm_kt2_oam_control_free(unit, oc);
return (BCM_E_MEMORY);
}
/* Allocate RMEP H/w to logical index mapping memory */
size = sizeof(bcm_oam_endpoint_t) * oc->rmep_count;
_BCM_OAM_ALLOC(oc->remote_endpoints, bcm_oam_endpoint_t, size, "RMEP Mapping");
if (NULL == oc->remote_endpoints) {
_bcm_kt2_oam_control_free(unit, oc);
return (BCM_E_MEMORY);
}
/* Initialize the mapping to BCM_OAM_ENDPOINT_INVALID */
for (ep_index = 0; ep_index < oc->rmep_count; ++ep_index) {
oc->remote_endpoints[ep_index] = BCM_OAM_ENDPOINT_INVALID;
}
/* Mem_3: Create application endpoint list. */
rv = shr_idxres_list_create(&oc->mep_pool, 0, oc->ep_count - 1,
0, oc->ep_count -1, "endpoint pool");
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Mem_4: Create local MEP endpoint list. */
rv = shr_idxres_list_create(&oc->lmep_pool, 0, oc->lmep_count - 1,
0, oc->lmep_count -1, "lmep pool");
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Mem_5: Create Remote MEP endpoint list. */
rv = shr_idxres_list_create(&oc->rmep_pool, 0, oc->rmep_count - 1,
0, oc->rmep_count -1, "rmep pool");
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Mem_6: Create MEP Rx state tracker table index endpoint list - DnMEP */
rv =_bcm_kt2_oam_ma_idx_pool_create(oc);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Mem_7: Create MEP Rx state tracker table index endpoint list -UpMEP */
rv = shr_idxres_list_create(&oc->egr_ma_idx_pool, 0,
oc->egr_ma_idx_count - 1,
0, oc->egr_ma_idx_count -1, "egr_ma_idx pool");
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Initialize to domain specific key by default, WB recovery might
* overwrite it, depending on older SDK.
*/
oc->eth_oam_mp_group_vlan_key = _BCM_KT2_ETH_OAM_MP_GROUP_KEY_DOMAIN_SPECIFIC;
if(soc_feature(unit, soc_feature_bhh)) {
#if defined(INCLUDE_BHH)
/* Create BHH endpoint list. */
rv = shr_idxres_list_create(&oc->bhh_pool, 0, oc->bhh_endpoint_count - 1,
0, oc->bhh_endpoint_count - 1, "bhh pool");
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Creating BHH pool failed \n")));
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Reserve ep pool from mep_pool for BHH, manange BHH mep using bhh_pool */
rv = shr_idxres_list_reserve(oc->mep_pool, _BCM_OAM_BHH_KT2_ENDPOINT_OFFSET,
_BCM_OAM_BHH_KT2_ENDPOINT_OFFSET + oc->bhh_endpoint_count - 1);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Reserving BHH endpoints from mep_pool "
"failed \n")));
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
#endif /* INCLUDE_BHH */
}
/* Mem_4: Create group list. */
rv = shr_idxres_list_create(&oc->group_pool, 0, oc->group_count - 1,
0, oc->group_count - 1, "group pool");
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
if(soc_feature(unit, soc_feature_bhh)) {
#if defined(INCLUDE_BHH)
if(ukernel_not_ready == 0){
int bhh_num_lm_sec_meps = soc_property_get(unit, spn_BHH_NUM_LM_ENABLED_SECTION_MEPS, 0);
int bhh_num_lm_sec_counters = bhh_num_lm_sec_meps * 8;
if( bhh_num_lm_sec_counters > oc->lm_counter_cnt) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Number of LM enabled section meps cannot be more than "
"available lm counter count \n")));
_bcm_kt2_oam_control_free(unit, oc);
return (BCM_E_PARAM);
}
if (bhh_num_lm_sec_counters) {
oc->lm_counter_cnt = oc->lm_counter_cnt - bhh_num_lm_sec_counters;
rv = shr_aidxres_list_create(&oc->ing_lm_sec_mep_ctr_pool, 0,
bhh_num_lm_sec_counters - 1, 0,
bhh_num_lm_sec_counters - 1, 8, "ing_lm_sec_mep_ctr_pool");
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
}
}
#endif
}
/* Index management for Ingress LM counter pool 1 */
rv = shr_aidxres_list_create(&oc->ing_lm_ctr_pool[0], 0,
oc->lm_counter_cnt - 1, 0,
oc->lm_counter_cnt - 1, 8, "ing_lm_idx pool1");
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Index management for Ingress LM counter pool 2 */
rv = shr_aidxres_list_create(&oc->ing_lm_ctr_pool[1], 0,
oc->lm_counter_cnt - 1, 0,
oc->lm_counter_cnt - 1, 8, "ing_lm_idx pool2");
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Mem_5: Create MA group and MEP hash table. */
rv = shr_htb_create(&oc->ma_mep_htbl, _BCM_OAM_KATANA2_ENDPOINT_MAX,
sizeof(_bcm_oam_hash_key_t), "MA/MEP Hash");
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Create protection mutex. */
oc->oc_lock = sal_mutex_create("oam_control.lock");
if (NULL == oc->oc_lock) {
_bcm_kt2_oam_control_free(unit, oc);
return (BCM_E_MEMORY);
}
/* Register device OAM interrupt handler & SER handler call back routine.*/
soc_kt2_oam_handler_register(unit, _bcm_kt2_oam_handle_interrupt);
soc_kt2_oam_ser_handler_register(unit, _bcm_kt2_oam_ser_handler);
/* Set up the unit OAM control structure. */
_kt2_oam_control[unit] = oc;
#if defined (INCLUDE_BHH)
if (soc_feature(unit, soc_feature_oam_pm)) {
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_pm_init(unit));
}
#endif
/* Set up OAM module related profile tables. */
rv = _bcm_kt2_oam_profile_tables_init(unit, oc);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Initialize all the TPID tables */
rv = _bcm_kt2_outer_tpid_init(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
rv = _bcm_kt2_inner_tpid_init(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
rv = _bcm_kt2_subport_tpid_init(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
rv = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_init(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return rv;
}
#if defined(BCM_WARM_BOOT_SUPPORT)
if(!SOC_WARM_BOOT(unit)) {
bcm_kt2_oam_scache_alloc(unit);
}
if (SOC_WARM_BOOT(unit)) {
rv = _bcm_kt2_oam_reinit(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
} else
#endif
{
BCM_PBMP_CLEAR(all_pbmp);
BCM_PBMP_ASSIGN(all_pbmp, PBMP_ALL(unit));
if (soc_feature(unit, soc_feature_flex_port)) {
BCM_IF_ERROR_RETURN(_bcm_kt2_flexio_pbmp_update(unit, &all_pbmp));
}
if (soc_feature(unit, soc_feature_linkphy_coe) ||
soc_feature(unit, soc_feature_subtag_coe)) {
_bcm_kt2_subport_pbmp_update(unit, &all_pbmp);
}
/* Enable OAM processing on all ports of this unit. */
PBMP_ITER(all_pbmp, port) {
rv = bcm_esw_port_control_set(unit, port, bcmPortControlOAMEnable,
TRUE);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
}
/* Enable CCM Rx timeouts. */
rv = _bcm_kt2_oam_ccm_rx_timeout_set(unit, 1);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Enable CCM Tx control. */
rv = _bcm_kt2_oam_ccm_tx_config_set(unit, 1);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/*
* Misc config: Enable IFP lookup on the CPU port.
*/
rv = _bcm_kt2_oam_misc_config(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Set all the non CFM opcodes in opcode group -1 */
oc->opcode_grp_bmp[0] = ~(_BCM_OAM_OPCODE_TYPE_CFM_MASK);
oc->opcode_grp_1_bmp[0] = 0;
oc->opcode_grp_2_bmp[0] = 0;
for (index = 1; index < 8; index ++) {
oc->opcode_grp_bmp[index] = _BCM_OAM_OPCODE_TYPE_NON_CFM_MASK;
oc->opcode_grp_1_bmp[index] = 0;
oc->opcode_grp_2_bmp[index] = 0;
}
/* Init OAM opcode group */
rv =_bcm_kt2_oam_opcode_group_init(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Set default values for flexible oam domain control */
rv = _bcm_kt2_oam_flexible_oam_domain_ctrl_set(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Set default values for flexible ingress and egress drop control */
rv = _bcm_kt2_oam_flexible_drop_ctrl_set(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Set default values for s-interface passive processing control */
rv = _bcm_kt2_oam_s_intf_passive_proc_ctrl_set(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Enable OLP handling on HG ports */
rv = _bcm_kt2_oam_hg_olp_enable(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Set default olp header type mapping */
rv = _bcm_kt2_oam_olp_header_type_mapping_set(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Set OAM drop control to not to drop wrong version OAM packets */
rv = _bcm_kt2_oam_drop_ctrl_set(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Set OAM LM cng and cpu data control*/
rv = _bcm_kt2_oam_lm_cng_cpu_ctrl_set_default(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Set Magic port used in OLP-XGS communication */
rv = _bcm_kt2_oam_olp_magic_port_set(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
/* Disable MACSA Zero check */
rv = _bcm_kt2_oam_macsa_zero_check_disable(unit);
if (BCM_FAILURE(rv)) {
_bcm_kt2_oam_control_free(unit, oc);
return (rv);
}
}
/*
* BHH init
*/
if(soc_feature(unit, soc_feature_bhh)) {
#if defined(INCLUDE_BHH)
if(ukernel_not_ready == 0){
/*
* Initialize HOST side
*/
oc->cpu_cosq = soc_property_get(unit, spn_BHH_COSQ, BHH_COSQ_INVALID);
/*
* Allocate DMA buffers
*
* DMA buffer will be used to send and receive 'long' messages
* between SDK Host and uController (BTE).
*/
oc->dma_buffer_len = sizeof(shr_bhh_msg_ctrl_t);
/* Adding feature specific for dma_buf_len as currently
* required buffer length for faults_get can be configurable
* by user during init. So, taking the max value of either of
* them for the purpose.Putting it under fature_check for
* giving flexibility to user.
*/
#ifdef INCLUDE_OAM_FAULTS_MULTI_GET
if(soc_feature(unit, soc_feature_faults_multi_ep_get)) {
oc->dma_buffer_len = MAX((oc->bhh_endpoint_count * sizeof(uint32)), oc->dma_buffer_len);
}
#endif
oc->dma_buffer = soc_cm_salloc(unit, oc->dma_buffer_len,
"BHH DMA buffer");
if (!oc->dma_buffer) {
_bcm_kt2_oam_control_free(unit, oc);
return (BCM_E_MEMORY);
}
sal_memset(oc->dma_buffer, 0, oc->dma_buffer_len);
oc->dmabuf_reply = soc_cm_salloc(unit, oc->dma_buffer_len,
"BHH uC reply");
if (!oc->dmabuf_reply) {
_bcm_kt2_oam_control_free(unit, oc);
return (BCM_E_MEMORY);
}
sal_memset(oc->dmabuf_reply, 0, oc->dma_buffer_len);
/* RX DMA channel (0..3) local to the uC */
oc->rx_channel = BCM_KT_BHH_RX_CHANNEL;
#if defined(BCM_WARM_BOOT_SUPPORT)
if (!SOC_WARM_BOOT(unit)) {
#endif
/* Set control message data */
sal_memset(&msg_init, 0, sizeof(msg_init));
msg_init.num_sessions = oc->bhh_endpoint_count;
msg_init.rx_channel = oc->rx_channel;
msg_init.node_id = node_id;
sal_memcpy(msg_init.carrier_code, carrier_code, SHR_BHH_CARRIER_CODE_LEN);
msg_init.max_encap_length = oc->bhh_max_encap_length;
if (bhh_consolidate_final_event) {
msg_init.flags |= BHH_SDK_MSG_CTRL_INIT_ONLY_FINAL_EVENT;
}
msg_init.data_collection_mode = oc->pm_bhh_lmdm_data_collection_mode;
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = bhh_sdk_msg_ctrl_init_pack(unit, buffer, &msg_init);
buffer_len = buffer_ptr - buffer;
/* Send BHH Init message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_INIT,
buffer_len, 0,
MOS_MSG_SUBCLASS_BHH_INIT_REPLY,
&reply_len);
if (BCM_FAILURE(rv) || (reply_len != 0)) {
_bcm_kt2_oam_control_free(unit, oc);
return (BCM_E_INTERNAL);
}
#if defined(BCM_WARM_BOOT_SUPPORT)
}
#endif
/*
* Start event message callback thread
*/
priority = soc_property_get(unit, spn_BHH_THREAD_PRI, BHH_THREAD_PRI_DFLT);
if (oc->event_thread_id == NULL) {
if ((oc->event_thread_id =
sal_thread_create("bcmBHH", SAL_THREAD_STKSZ,
priority,
_bcm_kt2_oam_bhh_callback_thread,
(void*)oc)) == SAL_THREAD_ERROR) {
oc->event_thread_id = NULL;
BCM_IF_ERROR_RETURN(bcm_kt2_oam_detach(unit));
return (BCM_E_MEMORY);
}
}
/*
* End BHH init
*/
}
/*
* Initialize HW
*/
#ifdef BCM_WARM_BOOT_SUPPORT
if (SOC_WARM_BOOT(unit)) {
rv = _bcm_kt2_oam_bhh_cos_map_recover(unit);
} else
#endif
{
rv = _bcm_kt2_oam_bhh_hw_init(unit);
}
if (BCM_FAILURE(rv)) {
BCM_IF_ERROR_RETURN(bcm_kt2_oam_detach(unit));
return rv;
}
#endif /* INCLUDE_BHH */
}
/* OAM initialization complete. */
_kt2_oam_control[unit]->init = TRUE;
return (BCM_E_NONE);
}
/*
* Function:
* bcm_kt2_oam_detach
* Purpose:
* Shut down OAM subsystem
* Parameters:
* unit - (IN) BCM device number
* Returns:
* BCM_E_XXX
*/
int
bcm_kt2_oam_detach(int unit)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
bcm_port_t port; /* Port number. */
int rv; /* Operation return status. */
bcm_pbmp_t all_pbmp;
#if defined(INCLUDE_BHH)
uint16 reply_len;
sal_usecs_t timeout = 0;
#endif
/* Get the device OAM module handle. */
oc = _kt2_oam_control[unit];
if (NULL == oc) {
/* Module already uninitialized. */
return (BCM_E_NONE);
}
/* Unregister all OAM interrupt event handlers and SER handler. */
soc_kt2_oam_handler_register(unit, NULL);
soc_kt2_oam_ser_handler_register(unit, NULL);
if (NULL != oc->oc_lock) {
_BCM_OAM_LOCK(oc);
}
rv = _bcm_kt2_oam_events_unregister(unit, oc);
if (BCM_FAILURE(rv)) {
if (NULL != oc->oc_lock) {
_BCM_OAM_UNLOCK(oc);
}
return (rv);
}
/* Disable CCM Rx Timeouts. */
rv = _bcm_kt2_oam_ccm_rx_timeout_set(unit, 0);
if (BCM_FAILURE(rv)) {
if (NULL != oc->oc_lock) {
_BCM_OAM_UNLOCK(oc);
}
return (rv);
}
/* Disable CCM Tx control for the device. */
rv = _bcm_kt2_oam_ccm_tx_config_set(unit, 0);
if (BCM_FAILURE(rv)) {
if (NULL != oc->oc_lock) {
_BCM_OAM_UNLOCK(oc);
}
return (rv);
}
BCM_PBMP_CLEAR(all_pbmp);
BCM_PBMP_ASSIGN(all_pbmp, PBMP_ALL(unit));
if (soc_feature(unit, soc_feature_flex_port)) {
rv = _bcm_kt2_flexio_pbmp_update(unit, &all_pbmp);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
return rv;
}
}
if (soc_feature(unit, soc_feature_linkphy_coe) ||
soc_feature(unit, soc_feature_subtag_coe)) {
_bcm_kt2_subport_pbmp_update(unit, &all_pbmp);
}
/* Disable OAM PDU Rx processing on all ports. */
PBMP_ITER(all_pbmp, port) {
rv = bcm_esw_port_control_set(unit, port,
bcmPortControlOAMEnable, 0);
if (BCM_FAILURE(rv)) {
if (NULL != oc->oc_lock) {
_BCM_OAM_UNLOCK(oc);
}
return (rv);
}
}
/*
* BHH specific
*/
if (soc_feature(unit, soc_feature_bhh)) {
#if defined(INCLUDE_BHH)
if (oc->ukernel_not_ready == 0) {
/*
* Event Handler thread exit signal
*/
timeout = sal_time_usecs() + 5000000;
while (NULL != oc->event_thread_id) {
soc_cmic_uc_msg_receive_cancel(unit, oc->uc_num,
MOS_MSG_CLASS_BHH_EVENT);
if (sal_time_usecs() < timeout) {
/*give some time to already running bhh callback thread
* to schedule and exit */
sal_usleep(10000);
} else {
/*timeout*/
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH event thread did not exit.\n")));
_BCM_OAM_UNLOCK(oc);
return BCM_E_INTERNAL;
}
}
/*
* Send BHH Uninit message to uC
* Ignore error since that may indicate uKernel was reloaded.
*/
#if defined(BCM_WARM_BOOT_SUPPORT)
if (!SOC_WARM_BOOT(unit)) {
#endif
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_UNINIT,
0, 0,
MOS_MSG_SUBCLASS_BHH_UNINIT_REPLY,
&reply_len);
if (BCM_SUCCESS(rv) && (reply_len != 0)) {
if (NULL != oc->oc_lock) {
_BCM_OAM_UNLOCK(oc);
}
return BCM_E_INTERNAL;
}
#if defined(BCM_WARM_BOOT_SUPPORT)
}
#endif
/*
* Delete CPU COS queue mapping entries for BHH packets
*/
if (!SOC_WARM_BOOT(unit)) {
if (oc->cpu_cosq_ach_error_index >= 0) {
rv = bcm_esw_rx_cosq_mapping_delete(unit,
oc->cpu_cosq_ach_error_index);
if (BCM_FAILURE(rv)) {
if (NULL != oc->oc_lock) {
_BCM_OAM_UNLOCK(oc);
}
return (rv);
}
oc->cpu_cosq_ach_error_index = -1;
}
if (oc->cpu_cosq_invalid_error_index >= 0) {
rv = bcm_esw_rx_cosq_mapping_delete(unit,
oc->cpu_cosq_invalid_error_index);
if (BCM_FAILURE(rv)) {
if (NULL != oc->oc_lock) {
_BCM_OAM_UNLOCK(oc);
}
return (rv);
}
oc->cpu_cosq_invalid_error_index = -1;
}
if (oc->bhh_lb_index >= 0) {
rv = bcm_esw_rx_cosq_mapping_delete(unit,
oc->bhh_lb_index);
if (BCM_FAILURE(rv)) {
if (NULL != oc->oc_lock) {
_BCM_OAM_UNLOCK(oc);
}
return (rv);
}
oc->bhh_lb_index = -1;
}
}
}
#endif /* INCLUDE_BHH */
}
/* Destroy all groups and assoicated endpoints and free the resources. */
rv = bcm_kt2_oam_group_destroy_all(unit);
if (BCM_FAILURE(rv)) {
if (NULL != oc->oc_lock) {
_BCM_OAM_UNLOCK(oc);
}
return (rv);
}
rv = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_destroy(unit);
if (BCM_FAILURE(rv)) {
if (NULL != oc->oc_lock) {
_BCM_OAM_UNLOCK(oc);
}
return (rv);
}
/* Release the protection mutex. */
_BCM_OAM_UNLOCK(oc);
/* Free OAM module allocated resources. */
_bcm_kt2_oam_control_free(unit, oc);
return (BCM_E_NONE);
}
/*
* Function:
* bcm_kt2_oam_group_create
* Purpose:
* Create or replace an OAM group object
* Parameters:
* unit - (IN) BCM device number
* group_info - (IN/OUT) Pointer to an OAM group information.
* Returns:
* BCM_E_XXX
*/
int
bcm_kt2_oam_group_create(int unit, bcm_oam_group_info_t *group_info)
{
ma_state_entry_t ma_state_entry; /* MA State table entry. */
uint8 grp_name_hw_buf[BCM_OAM_GROUP_NAME_LENGTH]; /* Grp */
/* name. */
maid_reduction_entry_t maid_reduction_entry; /* MA ID reduction table */
_bcm_oam_group_data_t *ma_group; /* Pointer to group info. */
_bcm_oam_control_t *oc; /* OAM control structure. */
int rv; /* Operation return status. */
/* entry. */
uint8 sw_rdi; /* Remote defect indicator. */
#if defined(BCM_KATANA2_SUPPORT) && defined(INCLUDE_BHH)
bhh_sdk_msg_ctrl_sess_set_t msg_sess;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
uint32 session_flags = 0;
_bcm_oam_hash_data_t *h_data_p;
_bcm_oam_ep_list_t *cur;
#endif
/* Validate input parameter. */
if (NULL == group_info) {
return (BCM_E_PARAM);
}
/* Get OAM control structure handle. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Validate group id. */
if (group_info->flags & BCM_OAM_GROUP_WITH_ID) {
_BCM_OAM_GROUP_INDEX_VALIDATE(group_info->id);
}
_BCM_OAM_LOCK(oc);
/*
* If MA group create is called with replace flag bit set.
* - Check and return error if a group does not exist with the ID.
*/
if (group_info->flags & BCM_OAM_GROUP_REPLACE) {
if (group_info->flags & BCM_OAM_GROUP_WITH_ID) {
/* Search the list with the MA Group ID value. */
rv = shr_idxres_list_elem_state(oc->group_pool, group_info->id);
if (BCM_E_EXISTS != rv) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group does not exist.\n")));
return (BCM_E_PARAM);
}
} else {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Replace command needs a "
"valid Group ID.\n")));
return (BCM_E_PARAM);
}
} else if (group_info->flags & BCM_OAM_GROUP_WITH_ID) {
/*
* If MA group create is called with ID flag bit set.
* - Check and return error if the ID is already in use.
*/
rv = shr_idxres_list_reserve(oc->group_pool, group_info->id,
group_info->id);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
return ((rv == BCM_E_RESOURCE) ? (BCM_E_EXISTS) : rv);
}
} else {
/* Reserve the next available group index. */
rv = shr_idxres_list_alloc(oc->group_pool,
(shr_idxres_element_t *) &group_info->id);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group allocation (GID=%d)"
" %s\n"), group_info->id, bcm_errmsg(rv)));
return (rv);
}
}
/* Get this group memory to store group information. */
ma_group = &oc->group_info[group_info->id];
/* Store the group name. */
sal_memcpy(&ma_group->name, &group_info->name, BCM_OAM_GROUP_NAME_LENGTH);
/* Store Lowest Alarm Priority */
ma_group->lowest_alarm_priority = group_info->lowest_alarm_priority;
if(!(group_info->flags & BCM_OAM_GROUP_REPLACE)) {
_BCM_OAM_ALLOC((ma_group->ep_list),_bcm_oam_ep_list_t *,
sizeof(_bcm_oam_ep_list_t *), "EP list head");
/* Initialize head to NULL.*/
*ma_group->ep_list = NULL;
}
/*
* Maintenance Association ID - Reduction table update.
*/
/* Prepare group name for hardware write. */
_bcm_kt2_oam_group_name_mangle(ma_group->name, grp_name_hw_buf);
sal_memset(&maid_reduction_entry, 0, sizeof(maid_reduction_entry_t));
/* Calculate CRC32 value for the group name string and set in entry. */
soc_MAID_REDUCTIONm_field32_set
(unit, &maid_reduction_entry, REDUCED_MAIDf,
soc_draco_crc32(grp_name_hw_buf, BCM_OAM_GROUP_NAME_LENGTH));
/* Check if software RDI flag bit needs to be set in hardware. */
sw_rdi = ((group_info->flags & BCM_OAM_GROUP_REMOTE_DEFECT_TX) ? 1 : 0);
/* Set RDI status for out going CCM PDUs. */
soc_MAID_REDUCTIONm_field32_set(unit, &maid_reduction_entry, SW_RDIf,
sw_rdi);
if (soc_feature(unit, soc_feature_bhh)) {
#if defined(BCM_KATANA2_SUPPORT) && defined(INCLUDE_BHH)
/* Send message to uC to set the Soft RDI */
if (group_info->flags & BCM_OAM_GROUP_REMOTE_DEFECT_TX) {
/* Get the endpoint list head pointer. */
cur = *(ma_group->ep_list);
while (NULL != cur) {
h_data_p = cur->ep_data_p;
if (NULL == h_data_p) {
LOG_ERROR(BSL_LS_BCM_OAM, (BSL_META_U(unit,
"OAM Error: Group=%d endpoints access failed -"
" %s.\n"), group_info->id,
bcm_errmsg(BCM_E_INTERNAL)));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_INTERNAL);
}
if (h_data_p->oam_domain == _BCM_OAM_DOMAIN_BHH) {
/* Set the RDI flag in session bits */
session_flags |= SHR_BHH_SESS_SET_F_RDI;
/* Get the session is from endpoint */
msg_sess.sess_id =
BCM_OAM_BHH_GET_UKERNEL_EP(h_data_p->ep_id);
/* Pack control message data into DMA buffer */
msg_sess.flags = session_flags;
buffer = oc->dma_buffer;
buffer_ptr =
bhh_sdk_msg_ctrl_sess_set_pack(buffer, &msg_sess);
buffer_len = buffer_ptr - buffer;
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_SESS_SET,
buffer_len, 0,
MOS_MSG_SUBCLASS_BHH_SESS_SET_REPLY,
&reply_len);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM, (BSL_META_U(unit,
"OAM Error: ukernel msg failed for"
"%s.\n"), bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_INTERNAL);
}
}
cur = cur->next;
}
}
#endif
}
/* Enable hardware lookup for this entry. */
soc_MAID_REDUCTIONm_field32_set(unit, &maid_reduction_entry, VALIDf, 1);
/* Write entry to hardware. */
rv = WRITE_MAID_REDUCTIONm(unit, MEM_BLOCK_ALL, group_info->id,
&maid_reduction_entry);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
return rv;
}
/*
* Maintenance Association State table update.
*/
sal_memset(&ma_state_entry, 0, sizeof(ma_state_entry_t));
/*
* If it is a group info replace operation, retain previous group
* defect status.
*/
if (group_info->flags & BCM_OAM_GROUP_REPLACE) {
rv = READ_MA_STATEm(unit, MEM_BLOCK_ALL, group_info->id,
&ma_state_entry);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
return rv;
}
}
/* Set the lowest alarm priority info. */
soc_MA_STATEm_field32_set(unit, &ma_state_entry, LOWESTALARMPRIf,
group_info->lowest_alarm_priority);
/* Mark the entry as valid. */
soc_MA_STATEm_field32_set(unit, &ma_state_entry, VALIDf, 1);
/* Write group information to hardware table. */
rv = WRITE_MA_STATEm(unit, MEM_BLOCK_ALL, group_info->id,
&ma_state_entry);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
return rv;
}
/* Make the group as in used status. */
ma_group->in_use = 1;
_BCM_OAM_UNLOCK(oc);
#ifdef BCM_WARM_BOOT_SUPPORT
SOC_CONTROL_LOCK(unit);
SOC_CONTROL(unit)->scache_dirty = 1;
SOC_CONTROL_UNLOCK(unit);
#endif
return BCM_E_NONE;
}
#if defined(INCLUDE_BHH)
/*
* Function:
* _bcm_kt2_oam_get_group_sw_rdi
* Purpose:
* Get OAM group SW RDI information.
* Parameters:
* unit - (IN) BCM device number
* group_index - (IN) Group hardware table index
* group_sw_rdi - (OUT) Pointer to group SW RDI
* Returns:
* BCM_E_NONE - No errors.
* BCM_E_XXX - Otherwise.
*/
STATIC int
_bcm_kt2_oam_get_group_sw_rdi(int unit, bcm_oam_group_t group_index,
uint8 *group_sw_rdi)
{
maid_reduction_entry_t maid_reduction_entry; /* MAID reduction entry. */
BCM_IF_ERROR_RETURN(READ_MAID_REDUCTIONm(unit, MEM_BLOCK_ANY,
group_index,
&maid_reduction_entry));
if (1 == soc_MAID_REDUCTIONm_field32_get(unit, &maid_reduction_entry,
SW_RDIf)) {
*group_sw_rdi = 1;
}
return (BCM_E_NONE);
}
#endif
/*
* Function:
* bcm_kt2_oam_group_get
* Purpose:
* Get an OAM group object
* Parameters:
* unit - (IN) BCM device number
* group - (IN) OAM Group ID.
* group_info - (OUT) Pointer to group information buffer.
* Returns:
* BCM_E_XXX
*/
int
bcm_kt2_oam_group_get(int unit, bcm_oam_group_t group,
bcm_oam_group_info_t *group_info)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
_bcm_oam_group_data_t *group_p; /* Pointer to group list. */
int rv; /* Operation return status. */
#if defined(INCLUDE_BHH)
bhh_sdk_msg_ctrl_sess_get_t msg;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
_bcm_oam_hash_data_t *h_data_p;
_bcm_oam_ep_list_t *cur = NULL;
int sess_id = 0;
#endif
/* Validate input parameter. */
if (NULL == group_info) {
return (BCM_E_PARAM);
}
/* Get OAM device control structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Validate group index. */
_BCM_OAM_GROUP_INDEX_VALIDATE(group);
_BCM_OAM_LOCK(oc);
/* Check if the group is in use. */
rv = shr_idxres_list_elem_state(oc->group_pool, group);
if (BCM_E_EXISTS != rv) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: GID=%d - %s.\n"),
group, bcm_errmsg(rv)));
return (rv);
}
/* Get pointer to this group in the group list. */
group_p = oc->group_info;
/* Get the group information. */
rv = _bcm_kt2_oam_get_group(unit, group, group_p, group_info);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: bcm_kt2_oam_group_get Group ID=%d "
"- Failed.\n"), group));
return (rv);
}
if (soc_feature(unit, soc_feature_bhh)) {
#if defined(INCLUDE_BHH)
if (!oc->ukernel_not_ready) {
group_p = &oc->group_info[group_info->id];
/* Get the endpoint list head pointer. */
if (group_p->ep_list != NULL) {
cur = *(group_p->ep_list);
}
while (NULL != cur) {
h_data_p = cur->ep_data_p;
if (NULL == h_data_p) {
LOG_ERROR(BSL_LS_BCM_OAM, (BSL_META_U(unit,
"OAM(unit %d) Error: Group=%d endpoints access failed -"
" %s.\n"), unit, group_info->id,
bcm_errmsg(BCM_E_INTERNAL)));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_INTERNAL);
}
if ((h_data_p->oam_domain == _BCM_OAM_DOMAIN_BHH) &&
!(h_data_p->flags2 & BCM_OAM_ENDPOINT_FLAGS2_REDIRECT_TO_CPU)) {
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(h_data_p->ep_id);
rv = _bcm_kt2_oam_bhh_msg_send_receive(
unit,
MOS_MSG_SUBCLASS_BHH_SESS_GET,
sess_id, 0,
MOS_MSG_SUBCLASS_BHH_SESS_GET_REPLY,
&reply_len);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: ukernel msg failed for"
"EP=%d %s.\n"), unit, h_data_p->ep_id,
bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = bhh_sdk_msg_ctrl_sess_get_unpack(buffer, &msg);
buffer_len = buffer_ptr - buffer;
if (reply_len != buffer_len) {
rv = BCM_E_INTERNAL;
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: ukernel msg failed for"
" EP=%d %s.\n"), unit, h_data_p->ep_id,
bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
} else {
if(msg.fault_flags & BHH_BTE_EVENT_CCM_TIMEOUT){
group_info->faults |= BCM_OAM_BHH_FAULT_CCM_TIMEOUT;
}
if(msg.fault_flags & BHH_BTE_EVENT_CCM_RDI){
group_info->faults |= BCM_OAM_BHH_FAULT_CCM_RDI;
}
if(msg.fault_flags & BHH_BTE_EVENT_CCM_UNKNOWN_MEG_LEVEL){
group_info->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_MEG_LEVEL;
}
if(msg.fault_flags & BHH_BTE_EVENT_CCM_UNKNOWN_MEG_ID){
group_info->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_MEG_ID;
}
if(msg.fault_flags & BHH_BTE_EVENT_CCM_UNKNOWN_MEP_ID){
group_info->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_MEP_ID;
}
if(msg.fault_flags & BHH_BTE_EVENT_CCM_UNKNOWN_PERIOD){
group_info->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_PERIOD;
}
if(msg.fault_flags & BHH_BTE_EVENT_CCM_UNKNOWN_PRIORITY){
group_info->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_PRIORITY;
}
if(msg.fault_flags & BHH_BTE_EVENT_CSF_LOS) {
group_info->faults |=
BCM_OAM_ENDPOINT_BHH_FAULT_CSF_LOS;
}
if(msg.fault_flags & BHH_BTE_EVENT_CSF_FDI) {
group_info->faults |=
BCM_OAM_ENDPOINT_BHH_FAULT_CSF_FDI;
}
if(msg.fault_flags & BHH_BTE_EVENT_CSF_RDI) {
group_info->faults |=
BCM_OAM_ENDPOINT_BHH_FAULT_CSF_RDI;
}
}
}
cur = cur->next;
}
}
#endif
}
_BCM_OAM_UNLOCK(oc);
return (BCM_E_NONE);
}
/*
* Function:
* bcm_kt2_oam_group_destroy
* Purpose:
* Destroy an OAM group object and its associated endpoints.
* Parameters:
* unit - (IN) BCM device number
* group - (IN) The ID of the OAM group object to destroy
* Returns:
* BCM_E_XXX
*/
int
bcm_kt2_oam_group_destroy(int unit,
bcm_oam_group_t group)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
_bcm_oam_group_data_t *g_info_p; /* Pointer to group list. */
int rv; /* Operation return status. */
maid_reduction_entry_t maid_reduction_entry; /* MAID_REDUCTION entry. */
ma_state_entry_t ma_state_entry; /* MA_STATE table entry. */
/* Get OAM device control structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Validate group index. */
_BCM_OAM_GROUP_INDEX_VALIDATE(group);
_BCM_OAM_LOCK(oc);
/* Check if the group is in use. */
rv = shr_idxres_list_elem_state(oc->group_pool, group);
if (BCM_E_EXISTS != rv) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: GID=%d - %s.\n"),
group, bcm_errmsg(rv)));
return (rv);
}
/* Get pointer to this group in the group list. */
g_info_p = &oc->group_info[group];
rv = _bcm_kt2_oam_group_endpoints_destroy(unit, g_info_p);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: bcm_kt2_oam_endpoint_destroy_all"
" (GID=%d) - %s.\n"), group, bcm_errmsg(rv)));
return (rv);
}
sal_memset(&maid_reduction_entry, 0, sizeof(maid_reduction_entry_t));
rv = WRITE_MAID_REDUCTIONm(unit, MEM_BLOCK_ALL, group,
&maid_reduction_entry);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MAID REDUCTION write "
"(GID=%d) - %s.\n"), group, bcm_errmsg(rv)));
return (rv);
}
/*
* Maintenance Association State table update.
*/
sal_memset(&ma_state_entry, 0, sizeof(ma_state_entry_t));
rv = WRITE_MA_STATEm(unit, MEM_BLOCK_ALL, group, &ma_state_entry);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MA STATE write "
"(GID=%d) - %s.\n"), group, bcm_errmsg(rv)));
return (rv);
}
/* Return Group ID back to free group. */
rv = shr_idxres_list_free(oc->group_pool, group);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
return (rv);
}
#ifdef BCM_WARM_BOOT_SUPPORT
SOC_CONTROL_LOCK(unit);
SOC_CONTROL(unit)->scache_dirty = 1;
SOC_CONTROL_UNLOCK(unit);
#endif
_BCM_OAM_UNLOCK(oc);
return (BCM_E_NONE);
}
/*
* Function:
* bcm_kt2_oam_group_destroy_all
* Purpose:
* Destroy all OAM group objects and their associated endpoints.
* Parameters:
* unit - (IN) BCM device number
* Returns:
* BCM_E_XXX
*/
int
bcm_kt2_oam_group_destroy_all(int unit)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
int group; /* Maintenance Association group index. */
int rv; /* Operation return status . */
/* Get device OAM control structure handle. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
for (group = 0; group < oc->group_count; group++) {
/* Check if the group is in use. */
rv = shr_idxres_list_elem_state(oc->group_pool, group);
if (BCM_E_EXISTS != rv) {
continue;
}
rv = bcm_kt2_oam_group_destroy(unit, group);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group destroy failed "
"(GID=%d) - %s.\n"), group, bcm_errmsg(rv)));
return (rv);
}
}
#ifdef BCM_WARM_BOOT_SUPPORT
SOC_CONTROL_LOCK(unit);
SOC_CONTROL(unit)->scache_dirty = 1;
SOC_CONTROL_UNLOCK(unit);
#endif
_BCM_OAM_UNLOCK(oc);
return (BCM_E_NONE);
}
/*
* Function:
* bcm_kt2_oam_group_traverse
* Purpose:
* Traverse the entire set of OAM groups, calling a specified
* callback for each one
* Parameters:
* unit - (IN) BCM device number
* cb - (IN) Pointer to call back function.
* user_data - (IN) Pointer to user supplied data.
* Returns:
* BCM_E_XXX
*/
int
bcm_kt2_oam_group_traverse(int unit, bcm_oam_group_traverse_cb cb,
void *user_data)
{
_bcm_oam_control_t *oc; /* OAM control structure pointer. */
bcm_oam_group_info_t group_info; /* Group information to be set. */
bcm_oam_group_t grp_idx; /* MA Group index. */
_bcm_oam_group_data_t *group_p; /* Pointer to group list. */
int rv; /* Operation return status. */
if (NULL == cb) {
return (BCM_E_PARAM);
}
/* Get device OAM control structure handle. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
/* Initialize to group array pointer. */
group_p = oc->group_info;
for (grp_idx = 0; grp_idx < oc->group_count; grp_idx++) {
/* Check if the group is in use. */
if (BCM_E_EXISTS
== shr_idxres_list_elem_state(oc->group_pool, grp_idx)) {
/* Initialize the group information structure. */
bcm_oam_group_info_t_init(&group_info);
/* Retrieve group information and set in group_info structure. */
rv = _bcm_kt2_oam_get_group(unit, grp_idx, group_p, &group_info);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: _bcm_kt2_oam_get_group "
"(GID=%d) - %s.\n"), grp_idx, bcm_errmsg(rv)));
return (rv);
}
/* Call the user call back routine with group information. */
rv = cb(unit, &group_info, user_data);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: User call back routine "
"(GID=%d) - %s.\n"), grp_idx, bcm_errmsg(rv)));
return (rv);
}
}
}
_BCM_OAM_UNLOCK(oc);
return BCM_E_NONE;
}
/*
* Function:
* bcm_kt2_oam_endpoint_lm_update
* Purpose:
* Update LM related params of an endpoint
* Parameters:
* unit - (IN) BCM device number
* endpoint_info - (IN/OUT) Pointer to endpoint information buffer.
* Returns:
* BCM_E_XXX
*/
STATIC int
bcm_kt2_oam_endpoint_lm_update(int unit, bcm_oam_endpoint_info_t *ep_info)
{
_bcm_oam_hash_data_t *h_data_p;
_bcm_oam_control_t *oc;
int loss_add, loss_del;
int loss_configured;
soc_profile_mem_t pri_map_prof_mem;
uint32 pri_ent[BCM_OAM_INTPRI_MAX]; /* ing profile */
/* ing profile copy from HW*/
uint32 pri_ent_copy[BCM_OAM_INTPRI_MAX];
int i;
void *entries[1];
int lm_ctr_pool_id = -1;
int mem_index;
egr_mp_group_entry_t egr_mp_group;
l3_entry_1_entry_t l3_entry; /* LMEP view table entry.*/
int l3_index = -1; /* L3 table hardware index.*/
uint32 profile_index = -1;
int rv = BCM_E_NONE;
soc_mem_t mem = ING_SERVICE_PRI_MAPm;;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
if (ep_info->flags & BCM_OAM_ENDPOINT_REMOTE) {
/* Only LMEPs can update Loss parameters */
return BCM_E_PARAM;
}
if (ep_info->flags & BCM_OAM_ENDPOINT_INTERMEDIATE) {
/* Only MEPs can update LM */
return BCM_E_PARAM;
}
/* Validate endpoint index value. */
_BCM_OAM_EP_INDEX_VALIDATE(ep_info->id);
h_data_p = &oc->oam_hash_data[ep_info->id];
if (0 == h_data_p->in_use) {
return BCM_E_NOT_FOUND;
}
/* Figure out if loss is being added or deleted */
loss_add = loss_del = 0;
loss_configured = 1; /* Indicates if BCM_OAM_ENDPOINT_LOSS_MEASUREMENT flag is
* present in new & old config
*/
if (h_data_p->flags & BCM_OAM_ENDPOINT_LOSS_MEASUREMENT) {
if (!(ep_info->flags & BCM_OAM_ENDPOINT_LOSS_MEASUREMENT)) {
loss_del = 1;
}
} else {
if (ep_info->flags & BCM_OAM_ENDPOINT_LOSS_MEASUREMENT) {
loss_add = 1;
} else {
/* BCM_OAM_ENDPOINT_LOSS_MEASUREMENT flag is not present in old/new
* config.
*/
loss_configured = 0;
}
}
/* If loss configuration is present in either old & new configuration, then
* find out if something is being modified in loss, updates will result in
* delete and add.
*/
if (1 == loss_configured && 0 == loss_add && 0 == loss_del) {
lm_ctr_pool_id = (h_data_p->rx_ctr >> 24) & 0xFF;
if (lm_ctr_pool_id != ep_info->lm_ctr_pool_id) {
loss_del = loss_add = 1;
}
if(h_data_p->pri_map_index != _BCM_OAM_INVALID_INDEX) {
pri_map_prof_mem = oc->ing_service_pri_map;
for (i = 0; i < BCM_OAM_INTPRI_MAX; i++) {
if (ep_info->pri_map[i] > _BCM_OAM_SERVICE_PRI_MAX_OFFSET) {
return BCM_E_PARAM;
}
pri_ent[i] = ep_info->pri_map[i];
if (SOC_MEM_FIELD_VALID(unit, mem, OFFSET_VALIDf)) {
soc_mem_field32_set(unit, mem, &pri_ent[i], OFFSET_VALIDf, 1);
}
}
soc_mem_lock(unit, mem);
entries[0] = &pri_ent_copy;
rv = soc_profile_mem_get(unit, &pri_map_prof_mem,
(h_data_p->pri_map_index * BCM_OAM_INTPRI_MAX),
BCM_OAM_INTPRI_MAX, (void *) entries);
if (BCM_FAILURE(rv)) {
soc_mem_unlock(unit, mem);
return rv;
}
if (sal_memcmp(pri_ent, pri_ent_copy,
sizeof(uint32)*BCM_OAM_INTPRI_MAX)){
rv = soc_profile_mem_delete(unit, &pri_map_prof_mem,
(h_data_p->pri_map_index * BCM_OAM_INTPRI_MAX));
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Profile table update error (idx=%d)"
"- %s.\n"), h_data_p->pri_map_index, bcm_errmsg(rv)));
soc_mem_unlock(unit, mem);
return (rv);
}
h_data_p->pri_map_index = profile_index;
loss_del = loss_add = 1;
}
soc_mem_unlock(unit, mem);
} else {
/* If for some reason i.e. warmboot,index became invalid
* though loss is configured */
return BCM_E_INTERNAL;
}
}
if (1 == loss_del) {
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_find_lmep(unit, h_data_p, &l3_index,
&l3_entry));
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_clear_counter(unit, L3_ENTRY_IPV4_UNICASTm,
l3_index, h_data_p,
(void *)&l3_entry));
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_find_egr_lmep(unit, h_data_p,
&mem_index,
&egr_mp_group));
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_clear_counter(unit, EGR_MP_GROUPm,
mem_index, h_data_p,
(void *)&egr_mp_group));
h_data_p->flags &= ~BCM_OAM_ENDPOINT_LOSS_MEASUREMENT;
}
if (1 == loss_add) {
h_data_p->flags |= BCM_OAM_ENDPOINT_LOSS_MEASUREMENT;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_lmep_counters_set(unit, ep_info));
}
if(ep_info->type == bcmOAMEndpointTypeEthernet) {
if((ep_info->ccm_tx_update_lm_counter_size) &&
(_BCM_OAM_INVALID_INDEX != h_data_p->local_tx_index)) {
BCM_IF_ERROR_RETURN(bcm_kt2_oam_hw_ccm_tx_ctr_update(unit, ep_info));
}
}
return BCM_E_NONE;
}
/*
* Function:
* bcm_kt2_oam_endpoint_create
* Purpose:
* Create or replace an OAM endpoint object
* Parameters:
* unit - (IN) BCM device number
* endpoint_info - (IN/OUT) Pointer to endpoint information buffer.
* Returns:
* BCM_E_XXX
*/
int
bcm_kt2_oam_endpoint_create(int unit, bcm_oam_endpoint_info_t *endpoint_info)
{
_bcm_oam_hash_data_t *hash_data = NULL; /* Endpoint hash data pointer. */
_bcm_oam_hash_key_t hash_key; /* Hash Key buffer. */
int ep_req_index; /* Requested endpoint index. */
int rv; /* Operation return status. */
_bcm_oam_control_t *oc; /* Pointer to OAM control */
/* structure. */
uint32 sglp = 0; /* Source global logical port. */
uint32 dglp = 0; /* Dest global logical port. */
uint32 svp = 0; /* Source virtual port */
bcm_port_t src_pp_port = 0; /* Source pp port. */
bcm_port_t dst_pp_port = 0; /* Dest pp port. */
int mep_ccm_tx = 0; /* Endpoint CCM Tx status. */
int mep_ccm_rx = 0; /* Endpoint CCM Rx status. */
int remote = 0; /* Remote endpoint status. */
int up_mep = 0; /* Endpoint is an upMep */
int key_type = 0;
port_tab_entry_t port_entry;
int oam_key1 = 0;
int oam_key2 = 0;
bcm_trunk_t trunk_id = BCM_TRUNK_INVALID;
int is_vp_valid = 0;
bcm_gport_t tx_gport = BCM_GPORT_INVALID;
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: bcm_kt2_oam_endpoint_create "
"Endpoint ID=%d.\n"), endpoint_info->id));
/* Validate input parameter. */
if (NULL == endpoint_info) {
return (BCM_E_PARAM);
}
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
#if defined(KEY_PRINT)
_bcm_oam_hash_key_print(&hash_key);
#endif
/* Calculate the hash key for given enpoint input parameters. */
_bcm_kt2_oam_ep_hash_key_construct(unit, oc, endpoint_info, &hash_key);
/* Validate endpoint input parameters. */
rv = _bcm_kt2_oam_endpoint_params_validate(unit, oc, &hash_key,
endpoint_info);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
if ((endpoint_info->flags & BCM_OAM_ENDPOINT_REPLACE) &&
(endpoint_info->flags2 & BCM_OAM_ENDPOINT2_UPDATE_COUNTER_ACTION)) {
/* Update LM parameters */
rv = bcm_kt2_oam_endpoint_lm_update(unit, endpoint_info);
_BCM_OAM_UNLOCK(oc);
return rv;
}
if(soc_feature(unit, soc_feature_bhh)) {
#if defined(INCLUDE_BHH)
if(BHH_EP_TYPE(endpoint_info))
{
if ((oc->ukernel_not_ready == 1) &&
(!(endpoint_info->flags2 & BCM_OAM_ENDPOINT_FLAGS2_REDIRECT_TO_CPU))){
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: BTE(ukernel) "
"not ready.\n"), unit));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_INIT);
}
rv = bcm_kt2_oam_bhh_endpoint_create(unit, endpoint_info, &hash_key);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: BHH Endpoint create (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
}
_BCM_OAM_UNLOCK(oc);
return (rv);
}
#endif /* INCLUDE_BHH */
}
/* Get MEP remote endpoint status. */
remote = (endpoint_info->flags & BCM_OAM_ENDPOINT_REMOTE) ? 1 : 0;
/* For remote endpoints gport is not required.
Only group and MEP ID info is required. */
if (!remote) {
/* Resolve given endpoint gport value to Source GLP and Dest GLP values. */
rv = _bcm_kt2_oam_endpoint_gport_resolve(unit, endpoint_info, &sglp, &dglp,
&src_pp_port, &dst_pp_port,
&svp, &trunk_id, &is_vp_valid,
&tx_gport);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Gport resolve (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
}
/* Get MEP CCM Tx status. */
mep_ccm_tx
= ((endpoint_info->ccm_period != BCM_OAM_ENDPOINT_CCM_PERIOD_DISABLED)
? 1 : 0);
/* Get MEP CCM Rx status. */
mep_ccm_rx
= ((endpoint_info->flags & _BCM_OAM_EP_RX_ENABLE) ? 1 : 0);
/* Check whether up/down MEP */
if (endpoint_info->flags & BCM_OAM_ENDPOINT_UP_FACING) {
up_mep = 1;
}
/* Set the search key type. */
if (is_vp_valid) {
key_type = _BCM_OAM_DOMAIN_VP;
} else if(endpoint_info->flags & BCM_OAM_ENDPOINT_MATCH_INNER_VLAN) {
key_type = _BCM_OAM_DOMAIN_CVLAN;
} else if(endpoint_info->flags &
BCM_OAM_ENDPOINT_MATCH_OUTER_AND_INNER_VLAN) {
key_type = _BCM_OAM_DOMAIN_S_PLUS_CVLAN;
} else if((endpoint_info->vlan == 0) && (endpoint_info->inner_vlan == 0)) {
key_type = _BCM_OAM_DOMAIN_PORT;
} else {
key_type = _BCM_OAM_DOMAIN_SVLAN;
}
/* We need to set the OAM_KEY1 and OAM_KEY2 fields of the PORT_TABLE and
EGR_PORT_TABLE based on the key_type of OAM */
if (trunk_id != BCM_TRUNK_INVALID) {
rv = soc_mem_read(unit, PORT_TABm, MEM_BLOCK_ANY,
src_pp_port, &port_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint create (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
oam_key1 = soc_PORT_TABm_field32_get(unit, &port_entry, OAM_KEY1f);
oam_key2 = soc_PORT_TABm_field32_get(unit, &port_entry, OAM_KEY2f);
if (key_type != _BCM_OAM_DOMAIN_PORT) {
if ((oam_key1 > 0) && (oam_key2 > 0)) {
if ((oam_key1 == key_type) || (oam_key2 == key_type)) {
} else {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Invalid OAM domain to "
"resolve (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_PARAM);
}
}
}
}
/* For Downmep CCM tx purposes, int_pri variable is used like a cosq.
* Hence verify if it is in the valid range, Else return error.
* For UPMEP CCM tx purposes, int_pri is used as internal priority only.
* Hence verify if it is in valid range of internal priority
*/
if (mep_ccm_tx && !up_mep &&
(!BCM_COSQ_QUEUE_VALID(unit, endpoint_info->int_pri))) {
_BCM_OAM_UNLOCK(oc);
return (BCM_E_PARAM);
} else if (mep_ccm_tx && up_mep &&
(!(endpoint_info->int_pri >= 0 && endpoint_info->int_pri < BCM_OAM_INTPRI_MAX))) {
_BCM_OAM_UNLOCK(oc);
return (BCM_E_PARAM);
}
/* Replace an existing endpoint. */
if (endpoint_info->flags & BCM_OAM_ENDPOINT_REPLACE) {
rv = _bcm_kt2_oam_endpoint_destroy(unit, endpoint_info->id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint destroy (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
}
/* Create a new endpoint with the requested ID. */
if (endpoint_info->flags & BCM_OAM_ENDPOINT_WITH_ID) {
ep_req_index = endpoint_info->id;
rv = shr_idxres_list_reserve(oc->mep_pool, ep_req_index,
ep_req_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint reserve (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return ((rv == BCM_E_RESOURCE) ? BCM_E_EXISTS : rv);
}
} else {
/* Allocate the next available endpoint index. */
rv = shr_idxres_list_alloc(oc->mep_pool,
(shr_idxres_element_t *)&ep_req_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint alloc failed - %s.\n"),
bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/* Set the allocated endpoint id value. */
endpoint_info->id = ep_req_index;
}
/* Get the hash data pointer where the data is to be stored. */
hash_data = &oc->oam_hash_data[ep_req_index];
/* Clear the hash data element contents before storing the values. */
_BCM_OAM_HASH_DATA_CLEAR(hash_data);
hash_data->type = endpoint_info->type;
hash_data->ep_id = endpoint_info->id;
hash_data->is_remote = remote;
hash_data->local_tx_enabled = mep_ccm_tx;
hash_data->local_rx_enabled = mep_ccm_rx;
hash_data->group_index = endpoint_info->group;
hash_data->name = endpoint_info->name;
hash_data->level = endpoint_info->level;
hash_data->vlan = endpoint_info->vlan;
hash_data->inner_vlan = endpoint_info->inner_vlan;
hash_data->gport = endpoint_info->gport;
hash_data->sglp = sglp;
hash_data->dglp = dglp;
hash_data->src_pp_port = src_pp_port;
hash_data->dst_pp_port = dst_pp_port;
hash_data->flags = endpoint_info->flags;
hash_data->opcode_flags = ( endpoint_info->opcode_flags &
_BCM_KT2_OAM_OPCODE_MASK );
hash_data->period = endpoint_info->ccm_period;
hash_data->in_use = 1;
hash_data->oam_domain = key_type;
hash_data->outer_tpid = endpoint_info->outer_tpid;
hash_data->inner_tpid = endpoint_info->inner_tpid;
hash_data->subport_tpid = endpoint_info->subport_tpid;
hash_data->int_pri = endpoint_info->int_pri;
hash_data->trunk_index = endpoint_info->trunk_index;
if (is_vp_valid) {
hash_data->vp = svp;
if (endpoint_info->flags & BCM_OAM_ENDPOINT_UP_FACING) {
/* For implementation of VP based lookup on egress side (UP MEP)
* We need to set EGR_DVP_ATTRIBUTE OAM_KEY3 = 1.
*/
rv = bcm_esw_port_control_set(unit, endpoint_info->gport,
bcmPortControlOamLookupWithDvp,
1);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
return (rv);
}
}
hash_data->int_flags |= _BCM_OAM_ENDPOINT_IS_VP_BASED;
}
if(endpoint_info->flags & BCM_OAM_ENDPOINT_MATCH_OUTER_AND_INNER_VLAN) {
hash_data->inner_vlan = endpoint_info->inner_vlan;
}
hash_data->trunk_id = trunk_id;
if (BCM_GPORT_IS_TRUNK(endpoint_info->gport)) {
hash_data->resolved_trunk_gport = tx_gport;
}
/* Initialize hardware index as invalid indices. */
hash_data->local_tx_index = _BCM_OAM_INVALID_INDEX;
hash_data->local_rx_index = _BCM_OAM_INVALID_INDEX;
hash_data->remote_index = _BCM_OAM_INVALID_INDEX;
hash_data->rx_ctr = _BCM_OAM_INVALID_INDEX;
hash_data->tx_ctr = _BCM_OAM_INVALID_INDEX;
hash_data->profile_index = _BCM_OAM_INVALID_INDEX;
hash_data->dglp1_profile_index = _BCM_OAM_INVALID_INDEX;
hash_data->dglp2_profile_index = _BCM_OAM_INVALID_INDEX;
hash_data->pri_map_index = _BCM_OAM_INVALID_INDEX;
hash_data->egr_pri_map_index = _BCM_OAM_INVALID_INDEX;
hash_data->outer_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
hash_data->subport_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
hash_data->inner_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
hash_data->ma_base_index = _BCM_OAM_INVALID_INDEX;
if (1 == remote) {
/* Allocate the next available index for RMEP table. */
rv = shr_idxres_list_alloc
(oc->rmep_pool,
(shr_idxres_element_t *)&hash_data->remote_index);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: RMEP index alloc failed EP:%d %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
return (rv);
}
} else {
/* Allocate the next available index for LMEP table. */
if (1 == mep_ccm_tx) {
rv = shr_idxres_list_alloc
(oc->lmep_pool,
(shr_idxres_element_t *)&hash_data->local_tx_index);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LMEP Tx index alloc failed EP:%d "
"%s.\n"), endpoint_info->id, bcm_errmsg(rv)));
return (rv);
}
}
/* Allocate the next available index for MA_INDEX table. */
if (1 == mep_ccm_rx) {
/* check up or down MEP */
if (1 == up_mep) {
rv = _bcm_kt2_oam_upmep_rx_endpoint_reserve(unit,
endpoint_info);
if (BCM_FAILURE(rv)) {
if (1 == mep_ccm_tx) {
/* Return Tx index to the LMEP pool. */
shr_idxres_list_free(oc->lmep_pool,
hash_data->local_tx_index);
}
/* Return endpoint index to MEP pool. */
shr_idxres_list_free(oc->mep_pool, endpoint_info->id);
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LMEP Rx index alloc failed "
"EP:%d " "%s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
return (rv);
}
} else { /* down Mep */
rv = _bcm_kt2_oam_downmep_rx_endpoint_reserve(unit,
endpoint_info);
if (BCM_FAILURE(rv)) {
if (1 == mep_ccm_tx) {
/* Return Tx index to the LMEP pool. */
shr_idxres_list_free(oc->lmep_pool,
hash_data->local_tx_index);
}
/* Return endpoint index to MEP pool. */
shr_idxres_list_free(oc->mep_pool, endpoint_info->id);
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LMEP Rx index alloc "
"failed EP:%d " "%s.\n"), endpoint_info->id,
bcm_errmsg(rv)));
return (rv);
}
}
}
}
rv = shr_htb_insert(oc->ma_mep_htbl, hash_key, hash_data);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Hash table insert failed EP=%d %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, up_mep, hash_key, hash_data);
_BCM_OAM_UNLOCK(oc);
return (rv);
}
if (1 == remote) {
rv = _bcm_oam_kt2_remote_mep_hw_set(unit, endpoint_info);
if (BCM_FAILURE(rv)) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Remote MEP set failed EP=%d %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, up_mep, hash_key, hash_data);
_BCM_OAM_UNLOCK(oc);
return (rv);
}
} else {
if (mep_ccm_rx) {
rv = _bcm_kt2_oam_local_rx_mep_hw_set(unit, endpoint_info);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Rx config failed for EP=%d %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, up_mep,
hash_key, hash_data);
_BCM_OAM_UNLOCK(oc);
return (rv);
}
}
if (mep_ccm_tx) {
rv = _bcm_oam_kt2_local_tx_mep_hw_set(unit, endpoint_info);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Tx config failed for EP=%d %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, up_mep,
hash_key, hash_data);
_BCM_OAM_UNLOCK(oc);
return (rv);
}
}
rv = _bcm_kt2_oam_port_table_key_update(unit, PORT_TABm, hash_data);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint create (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, up_mep, hash_key, hash_data);
_BCM_OAM_UNLOCK(oc);
return (rv);
}
rv = _bcm_kt2_oam_port_table_key_update(unit, EGR_PORTm, hash_data);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint create (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, up_mep, hash_key, hash_data);
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/* Set OAM LM cng and cpu data control from LM flags*/
if(hash_data->flags & BCM_OAM_ENDPOINT_LOSS_MEASUREMENT) {
_bcm_kt2_oam_lm_cng_cpu_ctrl_set(unit, endpoint_info);
}
}
rv = _bcm_kt2_oam_group_ep_list_add(unit, endpoint_info->group,
endpoint_info->id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: failed to add entry to ep list "
"for EP=%d %s.\n"), endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, up_mep, hash_key, hash_data);
_BCM_OAM_UNLOCK(oc);
return (rv);
}
rv = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_add(unit, hash_data);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: failed to add mapping to ma_idx_to_ep_id list "
"for EP=%d %s.\n"), unit, endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, up_mep, hash_key, hash_data);
_BCM_OAM_UNLOCK(oc);
return (rv);
}
_BCM_OAM_UNLOCK(oc);
#ifdef BCM_WARM_BOOT_SUPPORT
SOC_CONTROL_LOCK(unit);
SOC_CONTROL(unit)->scache_dirty = 1;
SOC_CONTROL_UNLOCK(unit);
#endif
return (BCM_E_NONE);
}
/*
* Function:
* bcm_kt2_oam_hw_ccm_tx_ctr_get
* Purpose:
* Update CCM CTR params
* Parameters:
* unit - (IN) Unit number.
* lmep_1_entry - (IN) lemp_1 entry read from HW
* ep_info - (OUT) Pointer to get endpoint parameters
* Returns:
* BCM_E_XXX
* Notes:
*/
void
bcm_kt2_oam_hw_ccm_tx_ctr_get(int unit,
lmep_1_entry_t *lmep_1_entry,
bcm_oam_endpoint_info_t *endpoint_info)
{
int num_counter = 0;
uint32 counter_base_id = 0;
if(soc_LMEP_1m_field32_get(unit, lmep_1_entry, COUNTER_1_ACTIONf)) {
counter_base_id = soc_LMEP_1m_field32_get(unit, lmep_1_entry, COUNTER_1_IDf);
endpoint_info->ccm_tx_update_lm_counter_base_id[0] = (counter_base_id & 0xFFF8);
endpoint_info->ccm_tx_update_lm_counter_offset[0] = counter_base_id % 8;
num_counter++;
}
if(soc_LMEP_1m_field32_get(unit, lmep_1_entry, COUNTER_2_ACTIONf)) {
counter_base_id = soc_LMEP_1m_field32_get(unit, lmep_1_entry, COUNTER_2_IDf);
endpoint_info->ccm_tx_update_lm_counter_base_id[1] = (counter_base_id & 0xFFF8) + (1 << 24);
endpoint_info->ccm_tx_update_lm_counter_offset[1] = counter_base_id % 8;
num_counter++;
}
endpoint_info->ccm_tx_update_lm_counter_size = num_counter;
return;
}
/*
* Function:
* bcm_kt2_oam_endpoint_get
* Purpose:
* Get an OAM endpoint object
* Parameters:
* unit - (IN) BCM device number
* endpoint - (IN) Endpoint ID
* endpoint_info - (OUT) Pointer to OAM endpoint information buffer.
* Returns:
* BCM_E_XXX
*/
int
bcm_kt2_oam_endpoint_get(int unit, bcm_oam_endpoint_t endpoint,
bcm_oam_endpoint_info_t *endpoint_info)
{
_bcm_oam_hash_data_t *h_data_p; /* Pointer to endpoint hash data. */
int rv = BCM_E_NONE;/* Operation return status. */
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
rmep_entry_t rmep_entry; /* Remote MEP entry buffer. */
lmep_entry_t lmep_entry; /* Local MEP entry buffer. */
lmep_1_entry_t lmep_1_entry; /* Local MEP entry buffer. */
void *entries[1];
uint32 mem_entries[BCM_OAM_INTPRI_MAX];
int i = 0;
uint16 inner_vlan_pri = 0;
#if defined(INCLUDE_BHH)
bhh_sdk_msg_ctrl_sess_get_t msg;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
int sess_id = 0;
bcm_l3_egress_t l3_egress;
bcm_l3_intf_t l3_intf;
#endif
uint16 glp = 0;
bcm_gport_t tx_gport = 0; /* Gport value. */
if (NULL == endpoint_info) {
return (BCM_E_PARAM);
}
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_EP_INDEX_VALIDATE(endpoint);
_BCM_OAM_LOCK(oc);
h_data_p = &oc->oam_hash_data[endpoint];
if (NULL == h_data_p) {
_BCM_OAM_UNLOCK(oc);
return (BCM_E_INTERNAL);
}
if (0 == h_data_p->in_use) {
_BCM_OAM_UNLOCK(oc);
return (BCM_E_NOT_FOUND);
}
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: Endpoint (EP=%d) remote=%d local_tx=%d"
"local_tx_idx=%d local_rx_en=%d local_rx_idx=%d oam_domain=%d\n"),
endpoint, h_data_p->is_remote, h_data_p->local_tx_enabled,
h_data_p->local_tx_index, h_data_p->local_rx_enabled,
h_data_p->local_rx_index, h_data_p->oam_domain));
if (bcmOAMEndpointTypeEthernet == h_data_p->type) {
if (1 == h_data_p->is_remote) {
sal_memset(&rmep_entry, 0, sizeof(rmep_entry_t));
/* Get hardware table entry information. */
rv = READ_RMEPm(unit, MEM_BLOCK_ANY, h_data_p->remote_index,
&rmep_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: RMEP table read failed for"
" EP=%d %s.\n"), endpoint, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
rv = _bcm_kt2_oam_read_clear_faults(unit, h_data_p->remote_index,
RMEPm, (uint32 *) &rmep_entry,
(void *) endpoint_info);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: RMEP table read failed for"
" EP=%d %s.\n"), endpoint, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
if (endpoint_info->flags2 & BCM_OAM_ENDPOINT_FLAGS2_GET_FAULTS_ONLY) {
endpoint_info->flags2 &= ~BCM_OAM_ENDPOINT_FLAGS2_GET_FAULTS_ONLY;
_BCM_OAM_UNLOCK(oc);
return BCM_E_NONE;
}
} else {
if (1 == h_data_p->local_tx_enabled) {
sal_memset(&lmep_entry, 0, sizeof(lmep_entry_t));
/* Get hardware table entry information. */
rv = READ_LMEPm(unit, MEM_BLOCK_ANY, h_data_p->local_tx_index,
&lmep_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LMEP table read failed for EP=%d"
" %s.\n"), endpoint, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
soc_LMEPm_mac_addr_get(unit, &lmep_entry, SAf,
endpoint_info->src_mac_address);
endpoint_info->pkt_pri
= soc_LMEPm_field32_get(unit, &lmep_entry, PRIORITYf);
endpoint_info->port_state
= (soc_LMEPm_field32_get
(unit, &lmep_entry, PORT_TLVf)
? BCM_OAM_PORT_TLV_UP : BCM_OAM_PORT_TLV_BLOCKED);
inner_vlan_pri = soc_LMEPm_field32_get(unit, &lmep_entry, CVLAN_TAGf);
endpoint_info->inner_vlan = 0xFFF & inner_vlan_pri;
endpoint_info->inner_pkt_pri = inner_vlan_pri >> 13;
sal_memset(&lmep_1_entry, 0, sizeof(lmep_1_entry_t));
/* Get hardware table entry information. */
rv = READ_LMEP_1m(unit, MEM_BLOCK_ANY, h_data_p->local_tx_index,
&lmep_1_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LMEP_1 table read failed "
"for EP=%d"" %s.\n"), endpoint, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
soc_LMEP_1m_mac_addr_get(unit, &lmep_1_entry, DAf,
endpoint_info->dst_mac_address);
if (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
endpoint_info->int_pri = soc_LMEP_1m_field32_get(unit,
&lmep_1_entry, INT_PRIf);
} else {
endpoint_info->int_pri = h_data_p->int_pri;
}
endpoint_info->interface_state
= soc_LMEP_1m_field32_get(unit, &lmep_1_entry, INTERFACE_TLVf);
bcm_kt2_oam_hw_ccm_tx_ctr_get(unit, &lmep_1_entry, endpoint_info);
glp = soc_LMEP_1m_field32_get(unit, &lmep_1_entry, PP_PORTf);
_BCM_KT2_SUBPORT_PORT_ID_SET(tx_gport, glp);
if (BCM_PBMP_MEMBER(SOC_INFO(unit).general_pp_port_pbm, glp)) {
_BCM_KT2_SUBPORT_PORT_TYPE_SET(tx_gport, _BCM_KT2_SUBPORT_TYPE_GENERAL);
endpoint_info->tx_gport = tx_gport;
endpoint_info->flags2 |= BCM_OAM_ENDPOINT_FLAGS2_VLAN_VP_UP_MEP_IN_HW;
}
}
}
}
else if (soc_feature(unit, soc_feature_bhh) &&
BHH_EP_TYPE(h_data_p)) {
#if defined(INCLUDE_BHH)
if (!(h_data_p->flags2 & BCM_OAM_ENDPOINT_FLAGS2_REDIRECT_TO_CPU)) {
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(endpoint);
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_SESS_GET,
sess_id, 0,
MOS_MSG_SUBCLASS_BHH_SESS_GET_REPLY,
&reply_len);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: ukernel msg failed for"
" EP=%d %s.\n"), unit, endpoint, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = bhh_sdk_msg_ctrl_sess_get_unpack(buffer, &msg);
buffer_len = buffer_ptr - buffer;
if (reply_len != buffer_len) {
rv = BCM_E_INTERNAL;
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: ukernel msg failed for"
" EP=%d %s.\n"), unit, endpoint, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
} else {
endpoint_info->int_pri = msg.tx_cos;
endpoint_info->pkt_pri = msg.tx_pri;
endpoint_info->mpls_exp = msg.priority;
if(endpoint_info->flags & BCM_OAM_ENDPOINT_REMOTE){
endpoint_info->name = msg.remote_mep_id;
endpoint_info->ccm_period = msg.remote_period;
} else {
endpoint_info->name = msg.mep_id;
endpoint_info->ccm_period = msg.local_period;
}
if(msg.fault_flags & BHH_BTE_EVENT_CCM_TIMEOUT){
endpoint_info->faults |= BCM_OAM_BHH_FAULT_CCM_TIMEOUT;
}
if(msg.fault_flags & BHH_BTE_EVENT_CCM_RDI){
endpoint_info->faults |= BCM_OAM_BHH_FAULT_CCM_RDI;
}
if(msg.fault_flags & BHH_BTE_EVENT_CCM_UNKNOWN_MEG_LEVEL){
endpoint_info->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_MEG_LEVEL;
}
if(msg.fault_flags & BHH_BTE_EVENT_CCM_UNKNOWN_MEG_ID){
endpoint_info->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_MEG_ID;
}
if(msg.fault_flags & BHH_BTE_EVENT_CCM_UNKNOWN_MEP_ID){
endpoint_info->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_MEP_ID;
}
if(msg.fault_flags & BHH_BTE_EVENT_CCM_UNKNOWN_PERIOD){
endpoint_info->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_PERIOD;
}
if(msg.fault_flags & BHH_BTE_EVENT_CCM_UNKNOWN_PRIORITY){
endpoint_info->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_PRIORITY;
}
if(msg.fault_flags & BHH_BTE_EVENT_CSF_LOS) {
endpoint_info->faults |= BCM_OAM_ENDPOINT_BHH_FAULT_CSF_LOS;
}
if(msg.fault_flags & BHH_BTE_EVENT_CSF_FDI) {
endpoint_info->faults |= BCM_OAM_ENDPOINT_BHH_FAULT_CSF_FDI;
}
if(msg.fault_flags & BHH_BTE_EVENT_CSF_RDI) {
endpoint_info->faults |= BCM_OAM_ENDPOINT_BHH_FAULT_CSF_RDI;
}
}
}else {
endpoint_info->ccm_period = h_data_p->period;
endpoint_info->int_pri = h_data_p->int_pri;
endpoint_info->pkt_pri = h_data_p->vlan_pri;
}
endpoint_info->intf_id = h_data_p->egress_if;
endpoint_info->cpu_qid = h_data_p->cpu_qid;
endpoint_info->mpls_label = h_data_p->label;
endpoint_info->gport = h_data_p->gport;
endpoint_info->vpn = h_data_p->vpn;
endpoint_info->vccv_type = h_data_p->vccv_type;
endpoint_info->egress_label.label = h_data_p->egr_label;
endpoint_info->egress_label.exp = h_data_p->egr_label_exp;
endpoint_info->egress_label.ttl = h_data_p->egr_label_ttl;
endpoint_info->inner_pkt_pri = h_data_p->inner_vlan_pri;
if (!BHH_EP_MPLS_SECTION_TYPE(h_data_p)) {
/*
* Get MAC address
*/
bcm_l3_egress_t_init(&l3_egress);
bcm_l3_intf_t_init(&l3_intf);
if (BCM_FAILURE
(bcm_esw_l3_egress_get(unit, h_data_p->egress_if, &l3_egress))) {
_BCM_OAM_UNLOCK(oc);
return (BCM_E_INTERNAL);
}
l3_intf.l3a_intf_id = l3_egress.intf;
if (BCM_FAILURE(bcm_esw_l3_intf_get(unit, &l3_intf))) {
_BCM_OAM_UNLOCK(oc);
return (BCM_E_INTERNAL);
}
sal_memcpy(endpoint_info->src_mac_address, l3_intf.l3a_mac_addr,
_BHH_MAC_ADDR_LENGTH);
} else {
sal_memcpy(endpoint_info->src_mac_address, h_data_p->src_mac_address,
_BHH_MAC_ADDR_LENGTH);
sal_memcpy(endpoint_info->dst_mac_address, h_data_p->dst_mac_address,
_BHH_MAC_ADDR_LENGTH);
}
#else
_BCM_OAM_UNLOCK(oc);
return (BCM_E_UNAVAIL);
#endif /* INCLUDE_BHH */
}
else {
_BCM_OAM_UNLOCK(oc);
return (BCM_E_PARAM);
}
/* BHH MEP Id & period already filled from uC msg */
if (!BHH_EP_TYPE(h_data_p)) {
endpoint_info->name = h_data_p->name;
endpoint_info->ccm_period = h_data_p->period;
}
endpoint_info->id = endpoint;
endpoint_info->group = h_data_p->group_index;
endpoint_info->vlan = h_data_p->vlan;
endpoint_info->inner_vlan = h_data_p->inner_vlan;
endpoint_info->level = h_data_p->level;
endpoint_info->gport = h_data_p->gport;
endpoint_info->trunk_index = h_data_p->trunk_index;
endpoint_info->flags |= h_data_p->flags;
endpoint_info->flags &= ~(BCM_OAM_ENDPOINT_WITH_ID);
endpoint_info->opcode_flags = h_data_p->opcode_flags;
endpoint_info->type = h_data_p->type;
endpoint_info->lm_counter_base_id = h_data_p->rx_ctr;
endpoint_info->outer_tpid = h_data_p->outer_tpid;
endpoint_info->inner_tpid = h_data_p->inner_tpid;
endpoint_info->subport_tpid = h_data_p->subport_tpid;
endpoint_info->timestamp_format = h_data_p->ts_format;
entries[0] = &mem_entries;
if (h_data_p->pri_map_index != _BCM_OAM_INVALID_INDEX) {
rv = soc_profile_mem_get(unit, &oc->ing_service_pri_map,
(h_data_p->pri_map_index * BCM_OAM_INTPRI_MAX),
BCM_OAM_INTPRI_MAX, (void *)entries);
for (i = 0; i < BCM_OAM_INTPRI_MAX ; i++) {
if (SOC_MEM_FIELD_VALID(unit, ING_SERVICE_PRI_MAPm, OFFSET_VALIDf)) {
soc_mem_field32_set(unit, ING_SERVICE_PRI_MAPm, &mem_entries[i],
OFFSET_VALIDf, 0);
endpoint_info->pri_map[i] = mem_entries[i];
}
}
endpoint_info->pri_map_id = h_data_p->pri_map_index;
}
_BCM_OAM_UNLOCK(oc);
return (BCM_E_NONE);
}
/*
* Function:
* bcm_kt2_oam_endpoint_destroy
* Purpose:
* Destroy an OAM endpoint object
* Parameters:
* unit - (IN) BCM device number
* endpoint - (IN) Endpoint ID to destroy.
* result =s:
* BCM_E_XXX
*/
int
bcm_kt2_oam_endpoint_destroy(int unit, bcm_oam_endpoint_t endpoint)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
int rv; /* Operation return status. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Validate endpoint index value. */
_BCM_OAM_EP_INDEX_VALIDATE(endpoint);
_BCM_OAM_LOCK(oc);
rv = _bcm_kt2_oam_endpoint_destroy(unit, endpoint);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint destroy EP=%d failed - "
"%s.\n"), endpoint, bcm_errmsg(rv)));
}
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/*
* Function:
* bcm_kt2_oam_endpoint_destroy_all
* Purpose:
* Destroy all OAM endpoint objects associated with a group.
* Parameters:
* unit - (IN) BCM device number
* group - (IN) The OAM group whose endpoints should be destroyed
* Returns:
* BCM_E_XXX
*/
int
bcm_kt2_oam_endpoint_destroy_all(int unit, bcm_oam_group_t group)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
int rv; /* Operation return status. */
_bcm_oam_group_data_t *g_info_p;
/* Get OAM device control structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Validate group index. */
_BCM_OAM_GROUP_INDEX_VALIDATE(group);
_BCM_OAM_LOCK(oc);
/* Check if the group is in use. */
rv = shr_idxres_list_elem_state(oc->group_pool, group);
if (BCM_E_EXISTS != rv) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group ID=%d does not exist.\n"),
group));
return (rv);
}
/* Get the group data pointer. */
g_info_p = &oc->group_info[group];
rv = _bcm_kt2_oam_group_endpoints_destroy(unit, g_info_p);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group (GID=%d) endpoints destroy"
" failed - %s.\n"), group, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/*
* Function:
* bcm_kt2_oam_endpoint_traverse
* Purpose:
* Traverse the set of OAM endpoints associated with the
* specified group, calling a specified callback for each one
* Parameters:
* unit - (IN) BCM device number
* group - (IN) The OAM group whose endpoints should be traversed
* cb - (IN) A pointer to the callback function to call for each OAM
* endpoint in the specified group
* user_data - (IN) Pointer to user data to supply in the callback
* Returns:
* BCM_E_XXX
*/
int
bcm_kt2_oam_endpoint_traverse(int unit, bcm_oam_group_t group,
bcm_oam_endpoint_traverse_cb cb,
void *user_data)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
int rv; /* Operation return status. */
bcm_oam_endpoint_info_t ep_info;
_bcm_oam_hash_data_t *h_data_p;
_bcm_oam_ep_list_t *cur;
_bcm_oam_group_data_t *g_info_p;
/* Validate input parameter. */
if (NULL == cb) {
return (BCM_E_PARAM);
}
/* Get OAM device control structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Validate group index. */
_BCM_OAM_GROUP_INDEX_VALIDATE(group);
_BCM_OAM_LOCK(oc);
/* Check if the group is in use. */
rv = shr_idxres_list_elem_state(oc->group_pool, group);
if (BCM_E_EXISTS != rv) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group ID=%d does not exist.\n"),
group));
return (rv);
}
/* Get the group data pointer. */
g_info_p = &oc->group_info[group];
/* Get the endpoint list head pointer. */
cur = *(g_info_p->ep_list);
if (NULL == cur) {
_BCM_OAM_UNLOCK(oc);
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: No endpoints in group GID=%d.\n"),
group));
return (BCM_E_NONE);
}
/* Traverse to the tail of the list. */
while (NULL != cur->next) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Info: GID=%d EP:%d.\n"),
cur->ep_data_p->group_index, cur->ep_data_p->ep_id));
cur = cur->next;
}
while (NULL != cur) {
h_data_p = cur->ep_data_p;
if (NULL == h_data_p) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Group=%d endpoints access failed -"
" %s.\n"), group, bcm_errmsg(BCM_E_INTERNAL)));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_INTERNAL);
}
bcm_oam_endpoint_info_t_init(&ep_info);
rv = bcm_kt2_oam_endpoint_get(unit, h_data_p->ep_id, &ep_info);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EP=%d info get failed %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
rv = cb(unit, &ep_info, user_data);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EP=%d callback failed - %s.\n"),
h_data_p->ep_id, bcm_errmsg(rv)));
return (rv);
}
cur = cur->prev;
}
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/*
* Function:
* bcm_kt2_oam_event_register
* Purpose:
* Register a callback for handling OAM events
* Parameters:
* unit - (IN) BCM device number
* event_types - (IN) The set of OAM events for which the specified
* callback should be called.
* cb - (IN) A pointer to the callback function to call for
* the specified OAM events
* user_data - (IN) Pointer to user data to supply in the callback
* Returns:
* BCM_E_XXX
*/
int
bcm_kt2_oam_event_register(int unit, bcm_oam_event_types_t event_types,
bcm_oam_event_cb cb, void *user_data)
{
_bcm_oam_control_t *oc;
_bcm_oam_event_handler_t *event_h_p;
_bcm_oam_event_handler_t *prev_p = NULL;
bcm_oam_event_type_t e_type;
uint32 rval;
int hw_update = 0;
uint32 event_bmp;
int rv; /* Operation return status. */
/* Validate event callback input parameter. */
if (NULL == cb) {
return (BCM_E_PARAM);
}
/* Check if an event is set for register in the events bitmap. */
SHR_BITTEST_RANGE(event_types.w, 0, bcmOAMEventCount, event_bmp);
if (0 == event_bmp) {
/* No events specified. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: No events specified for register.\n")));
return (BCM_E_PARAM);
}
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
for (event_h_p = oc->event_handler_list_p; event_h_p != NULL;
event_h_p = event_h_p->next_p) {
if (event_h_p->cb == cb) {
break;
}
prev_p = event_h_p;
}
if (NULL == event_h_p) {
_BCM_OAM_ALLOC(event_h_p, _bcm_oam_event_handler_t,
sizeof(_bcm_oam_event_handler_t), "OAM event handler");
if (NULL == event_h_p) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Event handler alloc failed -"
" %s.\n"), bcm_errmsg(BCM_E_MEMORY)));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_MEMORY);
}
event_h_p->next_p = NULL;
event_h_p->cb = cb;
SHR_BITCLR_RANGE(event_h_p->event_types.w, 0, bcmOAMEventCount);
if (prev_p != NULL) {
prev_p->next_p = event_h_p;
} else {
oc->event_handler_list_p = event_h_p;
}
}
rv = READ_CCM_INTERRUPT_CONTROLr(unit, &rval);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: CCM interrupt control read failed -"
" %s.\n"), bcm_errmsg(rv)));
return (rv);
}
for (e_type = 0; e_type < bcmOAMEventCount; ++e_type) {
if (SHR_BITGET(event_types.w, e_type)) {
if(soc_feature(unit, soc_feature_bhh)) {
#if defined(INCLUDE_BHH)
/*
* BHH events are generated by the uKernel
*/
if ((e_type == bcmOAMEventBHHLBTimeout) ||
(e_type == bcmOAMEventBHHLBDiscoveryUpdate) ||
(e_type == bcmOAMEventBHHCCMTimeout) ||
(e_type == bcmOAMEventBHHCCMTimeoutClear) ||
(e_type == bcmOAMEventBHHCCMState) ||
(e_type == bcmOAMEventBHHCCMRdi) ||
(e_type == bcmOAMEventBHHCCMUnknownMegLevel) ||
(e_type == bcmOAMEventBHHCCMUnknownMegId) ||
(e_type == bcmOAMEventBHHCCMUnknownMepId) ||
(e_type == bcmOAMEventBHHCCMUnknownPeriod) ||
(e_type == bcmOAMEventBHHCCMUnknownPriority) ||
(e_type == bcmOAMEventBHHCCMRdiClear) ||
(e_type == bcmOAMEventBHHCCMUnknownMegLevelClear) ||
(e_type == bcmOAMEventBHHCCMUnknownMegIdClear) ||
(e_type == bcmOAMEventBHHCCMUnknownMepIdClear) ||
(e_type == bcmOAMEventBHHCCMUnknownPeriodClear) ||
(e_type == bcmOAMEventBHHCCMUnknownPriorityClear) ||
(e_type == bcmOAMEventCsfLos) ||
(e_type == bcmOAMEventCsfFdi) ||
(e_type == bcmOAMEventCsfRdi) ||
(e_type == bcmOAMEventBhhPmCounterRollover) ||
(e_type == bcmOAMEventCsfDci)) {
SHR_BITSET(event_h_p->event_types.w, e_type);
oc->event_handler_cnt[e_type] += 1;
continue;
}
#endif
}
if (!soc_reg_field_valid
(unit, CCM_INTERRUPT_CONTROLr,
_kt2_oam_intr_en_fields[e_type].field)) {
continue;
}
if (!SHR_BITGET(event_h_p->event_types.w, e_type)) {
/* Add this event to the registered events list. */
SHR_BITSET(event_h_p->event_types.w, e_type);
oc->event_handler_cnt[e_type] += 1;
if (1 == oc->event_handler_cnt[e_type]) {
hw_update = 1;
soc_reg_field_set
(unit, CCM_INTERRUPT_CONTROLr, &rval,
_kt2_oam_intr_en_fields[e_type].field, 1);
}
}
}
}
event_h_p->user_data = user_data;
if (1 == hw_update) {
rv = WRITE_CCM_INTERRUPT_CONTROLr(unit, rval);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: CCM interrupt control write failed -"
" %s.\n"), bcm_errmsg(rv)));
return (rv);
}
}
if (soc_feature(unit, soc_feature_bhh)) {
#if defined(INCLUDE_BHH)
/*
* Update BHH Events mask
*/
rv = _bcm_kt2_oam_bhh_event_mask_set(unit);
#endif /* INCLUDE_BHH */
}
_BCM_OAM_UNLOCK(oc);
return (BCM_E_NONE);
}
/*
* Function:
* bcm_kt2_oam_event_unregister
* Purpose:
* Remove a registered event from the event handler list.
* Parameters:
* unit - (IN) BCM device number
* event_types - (IN) The set of OAM events for which the specified
* callback should not be called
* cb - (IN) A pointer to the callback function to unregister
* from the specified OAM events
* Returns:
* BCM_E_XXX
*/
int
bcm_kt2_oam_event_unregister(int unit, bcm_oam_event_types_t event_types,
bcm_oam_event_cb cb)
{
_bcm_oam_control_t *oc;
_bcm_oam_event_handler_t *event_h_p;
_bcm_oam_event_handler_t *prev_p = NULL;
bcm_oam_event_type_t e_type;
uint32 rval;
int hw_update = 0;
uint32 event_bmp;
int rv; /* Operation return status. */
/* Validate event callback input parameter. */
if (NULL == cb) {
return (BCM_E_PARAM);
}
/* Check if an event is set for unregister in the events bitmap. */
SHR_BITTEST_RANGE(event_types.w, 0, bcmOAMEventCount, event_bmp);
if (0 == event_bmp) {
/* No events specified. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: No events specified for register.\n")));
return (BCM_E_PARAM);
}
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
for (event_h_p = oc->event_handler_list_p; event_h_p != NULL;
event_h_p = event_h_p->next_p) {
if (event_h_p->cb == cb) {
break;
}
prev_p = event_h_p;
}
if (NULL == event_h_p) {
_BCM_OAM_UNLOCK(oc);
return (BCM_E_NOT_FOUND);
}
rv = READ_CCM_INTERRUPT_CONTROLr(unit, &rval);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: CCM interrupt control read failed -"
" %s.\n"), bcm_errmsg(rv)));
return (rv);
}
for (e_type = 0; e_type < bcmOAMEventCount; ++e_type) {
if (SHR_BITGET(event_types.w, e_type)) {
if(soc_feature(unit, soc_feature_bhh)) {
#if defined(INCLUDE_BHH)
/*
* BHH events are generated by the uKernel
*/
if ((e_type == bcmOAMEventBHHLBTimeout) ||
(e_type == bcmOAMEventBHHLBDiscoveryUpdate) ||
(e_type == bcmOAMEventBHHCCMTimeout) ||
(e_type == bcmOAMEventBHHCCMTimeoutClear) ||
(e_type == bcmOAMEventBHHCCMState) ||
(e_type == bcmOAMEventBHHCCMRdi) ||
(e_type == bcmOAMEventBHHCCMUnknownMegLevel) ||
(e_type == bcmOAMEventBHHCCMUnknownMegId) ||
(e_type == bcmOAMEventBHHCCMUnknownMepId) ||
(e_type == bcmOAMEventBHHCCMUnknownPeriod) ||
(e_type == bcmOAMEventBHHCCMUnknownPriority) ||
(e_type == bcmOAMEventBHHCCMRdiClear) ||
(e_type == bcmOAMEventBHHCCMUnknownMegLevelClear) ||
(e_type == bcmOAMEventBHHCCMUnknownMegIdClear) ||
(e_type == bcmOAMEventBHHCCMUnknownMepIdClear) ||
(e_type == bcmOAMEventBHHCCMUnknownPeriodClear) ||
(e_type == bcmOAMEventBHHCCMUnknownPriorityClear) ||
(e_type == bcmOAMEventCsfLos) ||
(e_type == bcmOAMEventCsfFdi) ||
(e_type == bcmOAMEventCsfRdi) ||
(e_type == bcmOAMEventCsfDci) ||
(e_type == bcmOAMEventBhhPmCounterRollover)) {
SHR_BITCLR(event_h_p->event_types.w, e_type);
oc->event_handler_cnt[e_type] -= 1;
continue;
}
#endif
}
if (!soc_reg_field_valid
(unit, CCM_INTERRUPT_CONTROLr,
_kt2_oam_intr_en_fields[e_type].field)) {
_BCM_OAM_UNLOCK(oc);
return (BCM_E_UNAVAIL);
}
if ((oc->event_handler_cnt[e_type] > 0)
&& SHR_BITGET(event_h_p->event_types.w, e_type)) {
/* Remove this event from the registered events list. */
SHR_BITCLR(event_h_p->event_types.w, e_type);
oc->event_handler_cnt[e_type] -= 1;
if (0 == oc->event_handler_cnt[e_type]) {
hw_update = 1;
soc_reg_field_set
(unit, CCM_INTERRUPT_CONTROLr, &rval,
_kt2_oam_intr_en_fields[e_type].field, 0);
}
}
}
}
if (1 == hw_update) {
rv = WRITE_CCM_INTERRUPT_CONTROLr(unit, rval);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: CCM interrupt control write failed -"
" %s.\n"), bcm_errmsg(rv)));
return (rv);
}
}
SHR_BITTEST_RANGE(event_h_p->event_types.w, 0, bcmOAMEventCount, event_bmp);
if (0 == event_bmp) {
if (NULL != prev_p) {
prev_p->next_p = event_h_p->next_p;
} else {
oc->event_handler_list_p = event_h_p->next_p;
}
sal_free(event_h_p);
}
if(soc_feature(unit, soc_feature_bhh)) {
#if defined(INCLUDE_BHH)
/*
* Update BHH Events mask
*/
rv = _bcm_kt2_oam_bhh_event_mask_set(unit);
#endif /* INCLUDE_BHH */
}
_BCM_OAM_UNLOCK(oc);
return (BCM_E_NONE);
}
/*
* Function:
* bcm_kt2_oam_endpoint_action_set
* Purpose:
* Remove a registered event from the event handler list.
* Parameters:
* unit - (IN) BCM device number
* endpoint - (IN) Endpoint id
* action - (IN) Set of OAM endpoint actions, for a set of
* opcodes specified.
* Returns:
* BCM_E_XXX
* In order to put the opcodes into opcode group1 and group2, first call the
* action set API with opcode action and set of opcodes that need to use
* this action. This will put all these opcodes under group1. To put
* the opcode to group2, call the set API with an action(with all the opcodes
* that need this action) which is differing compared to group1. This will
* classify the opcode to group2
*/
int bcm_kt2_oam_endpoint_action_set(int unit, bcm_oam_endpoint_t endpoint,
bcm_oam_endpoint_action_t *action)
{
int rv = BCM_E_NONE; /* Operation return status. */
_bcm_oam_hash_data_t *h_data_p; /* Pointer to endpoint hash data. */
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
void *entries[1];
oam_opcode_control_profile_entry_t opcode_profile;
egr_oam_opcode_control_profile_entry_t egr_opcode_profile;
soc_profile_mem_t *profile_mem_ptr;
soc_mem_t ma_index_mem = MA_INDEXm;
ma_index_entry_t ma_idx_entry; /* MA_INDEX table entry buffer. */
egr_ma_index_entry_t egr_ma_idx_entry; /* EGR_MA_INDEX table entry buffer*/
void *ma_idx_ptr;
int ma_offset = 0;
uint32 old_profile_index = 0;
char free_old_profile = 0;
/*setting default value to 0 whcih will be converted to 1
* if opcode control profile set is touched */
uint8 opcode_profile_changed = 0;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_EP_INDEX_VALIDATE(endpoint);
sal_memset(&opcode_profile, 0, sizeof(oam_opcode_control_profile_entry_t));
sal_memset(&egr_opcode_profile, 0,
sizeof(egr_oam_opcode_control_profile_entry_t));
_BCM_OAM_LOCK(oc);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->mep_pool, endpoint);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint EP=%d %s.\n"),
endpoint, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
h_data_p = &oc->oam_hash_data[endpoint];
if (NULL == h_data_p) {
_BCM_OAM_UNLOCK(oc);
return (BCM_E_INTERNAL);
}
if (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
entries[0] = &egr_opcode_profile;
profile_mem_ptr = &oc->egr_oam_opcode_control_profile;
ma_index_mem = EGR_MA_INDEXm;
ma_idx_ptr = &egr_ma_idx_entry;
} else {
entries[0] = &opcode_profile;
profile_mem_ptr = &oc->oam_opcode_control_profile;
ma_idx_ptr = &ma_idx_entry;
}
/* Get MA_INDEX offset */
rv = _bcm_kt2_oam_ma_index_offset_get(unit, h_data_p, &ma_offset);
h_data_p->local_rx_index = h_data_p->ma_base_index + ma_offset;
/* Using the profile index, get the profile */
if ((h_data_p->profile_index == _BCM_OAM_INVALID_INDEX) ||
(h_data_p->profile_index == 0)) {
/* No profile exists or default profile is used, create one */
/* convert action into opcode profile entry */
rv = _bcm_kt2_oam_convert_action_to_opcode_entry(unit, action,
endpoint,
entries[0],
&opcode_profile_changed);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Failed to convert action to "
"profile -" " %s.\n"), bcm_errmsg(rv)));
return (rv);
}
if(opcode_profile_changed) {
rv = soc_profile_mem_add(unit, profile_mem_ptr,
(void *)entries, 1,
(uint32 *) &h_data_p->profile_index);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Soc profile mem add failed -"
" %s.\n"), bcm_errmsg(rv)));
return (rv);
}
}
} else {
free_old_profile = 1;
old_profile_index = h_data_p->profile_index;
rv = soc_profile_mem_get(unit, profile_mem_ptr,
old_profile_index, 1, entries);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Soc profile mem get failed -"
" %s.\n"), bcm_errmsg(rv)));
return (rv);
}
/* convert action into opcode profile entry */
_bcm_kt2_oam_convert_action_to_opcode_entry(unit, action,
endpoint,
entries[0],
&opcode_profile_changed);
/* Add new profile */
rv = soc_profile_mem_add(unit, profile_mem_ptr,
(void *)entries, 1,
(uint32 *) &h_data_p->profile_index);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Soc profile mem add failed -"
" %s.\n"), bcm_errmsg(rv)));
return (rv);
}
}
/* Set OAM opcode control profile table index. */
rv = soc_mem_read(unit, ma_index_mem, MEM_BLOCK_ANY,
h_data_p->local_rx_index, ma_idx_ptr);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MA index table entry read failed "
"%s.\n"), bcm_errmsg(rv)));
return (rv);
}
soc_mem_field32_set(unit, ma_index_mem, ma_idx_ptr,
OAM_OPCODE_CONTROL_PROFILE_PTRf,
h_data_p->profile_index);
rv = soc_mem_write(unit, ma_index_mem, MEM_BLOCK_ALL,
h_data_p->local_rx_index, ma_idx_ptr);
/* Delete old profile once ma_index is pointing to new one*/
if(free_old_profile) {
rv = soc_profile_mem_delete(unit, profile_mem_ptr,
old_profile_index);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Soc profile mem delete failed -"
" %s.\n"), bcm_errmsg(rv)));
return (rv);
}
}
_BCM_OAM_UNLOCK(oc);
return rv;
}
/*
* Function:
* _kt2_oam_opcode_control_field_set
* Purpose:
* Set the fields of opcode profile entry
* Parameters:
* unit - (IN) BCM device number
* field_index - (IN) Opcode profile action to be set
* mem - (IN) Opcode profile memory - ing/egr
* oam_opcode - (IN) Oam opcode for which action needs to be set
* value - (IN) value of the opcode action
* profile - (OUT) Pointer to the opcode profile table index.
* Returns:
* BCM_E_XXX
*/
int
_kt2_oam_opcode_control_field_set(int unit, bcm_oam_action_type_t field_index,
soc_mem_t mem, int oam_opcode, int value,
void *profile)
{
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
ingress_oam_opcode_group_entry_t entry;
int grp_1_modified = 0;
int grp_2_modified = 0;
uint32 opcode_bmp_1 = 0;
uint32 opcode_bmp_2 = 0;
soc_field_t cfm_field[bcmOAMActionCount+1][_BCM_OAM_OPCODE_TYPE_NON_CFM_FIRST] = {
/* bcmOAMActionCountEnable */
{ 0, 0, 0, 0, 0, 0},
/* bcmOAMActionMeterEnable */
{ 0, 0, 0, 0, 0, 0},
/* bcmOAMActionDrop */
{ 0, CCM_DROPf, LBR_DROPf, LBM_UC_DROPf,
LTR_DROPf, LTM_DROPf },
/* bcmOAMActionCopyToCpu */
{ 0, CCM_COPYTO_CPUf, LBR_COPYTO_CPUf, LBM_UC_COPYTO_CPUf,
LTR_COPYTO_CPUf, LTM_COPYTO_CPUf},
/* bcmOAMActionFwdAsData */
{ 0, 0, 0, 0, 0, 0 },
/* bcmOAMActionFwd */
{ 0, FWD_LMEP_PKTf, LBR_FWDf, LBM_ACTIONf, LTR_FWDf, LTM_FWDf},
/* bcmOAMActionUcDrop */
{ 0, 0, 0, LBM_UC_DROPf, 0, 0},
/* bcmOAMActionUcCopyToCpu */
{ 0, 0, 0, LBM_UC_COPYTO_CPUf, 0, 0},
/* bcmOAMActionUcFwdAsData */
{ 0, 0, 0, 0, 0, 0 },
/* bcmOAMActionUcFwd */
{ 0, 0, 0, LBM_ACTIONf, 0, 0},
/* bcmOAMActionMcDrop */
{ 0, 0, 0, LBM_MC_DROPf, 0, 0},
/* bcmOAMActionMcCopyToCpu */
{ 0, 0, 0, LBM_MC_COPYTO_CPUf, 0, 0},
/* bcmOAMActionMcFwdAsData */
{ 0, 0, 0, 0, 0, 0},
/* bcmOAMActionMcFwd*/
{ 0, 0, 0, LBM_MC_FWDf, 0, 0},
/* bcmOAMActionLowMdlDrop */
{ 0, LOW_MDL_CCM_FWD_ACTIONf, LOW_MDL_LB_LT_DROPf,
LOW_MDL_LB_LT_DROPf, LOW_MDL_LB_LT_DROPf,
LOW_MDL_LB_LT_DROPf },
/* bcmOAMActionLowMdlCopyToCpu */
{ 0, LOW_MDL_CCM_COPYTO_CPUf, LOW_MDL_LB_LT_COPYTO_CPUf,
LOW_MDL_LB_LT_COPYTO_CPUf,
LOW_MDL_LB_LT_COPYTO_CPUf, LOW_MDL_LB_LT_COPYTO_CPUf},
/* bcmOAMActionLowMdlFwdAsData */
{ 0, LOW_MDL_CCM_FWD_ACTIONf, LOW_MDL_LB_LT_DROPf,
LOW_MDL_LB_LT_DROPf,
LOW_MDL_LB_LT_DROPf, LOW_MDL_LB_LT_DROPf},
/* bcmOAMActionLowMdlFwd*/
{ 0, 0, LOW_MDL_LB_LT_FWDf, LOW_MDL_LB_LT_FWDf, 0, 0},
/* bcmOAMActionMyStationMissCopyToCpu */
{ 0, 0, LB_LT_UC_MY_STATION_MISS_COPYTO_CPUf,
LB_LT_UC_MY_STATION_MISS_COPYTO_CPUf,
LB_LT_UC_MY_STATION_MISS_COPYTO_CPUf,
LB_LT_UC_MY_STATION_MISS_COPYTO_CPUf},
/* bcmOAMActionMyStationMissDrop */
{ 0, 0, LB_LT_UC_MY_STATION_MISS_DROPf,
LB_LT_UC_MY_STATION_MISS_DROPf,
LB_LT_UC_MY_STATION_MISS_DROPf,
LB_LT_UC_MY_STATION_MISS_DROPf},
/* bcmOAMActionMyStationMissFwdAsData */
{ 0, 0, 0, 0, 0, 0},
/* bcmOAMActionMyStationMissFwd */
{ 0, 0, LB_LT_UC_MY_STATION_MISS_FWDf,
LB_LT_UC_MY_STATION_MISS_FWDf,
LB_LT_UC_MY_STATION_MISS_FWDf,
LB_LT_UC_MY_STATION_MISS_FWDf},
/* bcmOAMActionProcessInHw */
{ 0, CCM_PROCESS_IN_HWf, 0, 0, 0, 0},
/* bcmOAMActionLowMdlCcmFwdAsRegularCcm */
{ 0, LOW_MDL_CCM_FWD_ACTIONf, 0, 0, 0, 0},
{ 0, 0, 0, 0, 0, 0}
};
soc_field_t non_cfm_field[bcmOAMActionCount+1][2]= {
/* bcmOAMActionCountEnable */
{ 0, 0 },
/* bcmOAMActionMeterEnable */
{ 0, 0 },
/* bcmOAMActionDrop */
{ OTHER_OPCODE_GROUP_1_UC_MY_STATION_HIT_ACTIONf,
OTHER_OPCODE_GROUP_2_UC_MY_STATION_HIT_ACTIONf },
/* bcmOAMActionCopyToCpu */
{ OTHER_OPCODE_GROUP_1_UC_MY_STATION_HIT_COPYTO_CPUf,
OTHER_OPCODE_GROUP_2_UC_MY_STATION_HIT_COPYTO_CPUf },
/* bcmOAMActionFwdAsData */
{ OTHER_OPCODE_GROUP_1_UC_MY_STATION_HIT_ACTIONf,
OTHER_OPCODE_GROUP_2_UC_MY_STATION_HIT_ACTIONf },
/* bcmOAMActionFwd */
{ OTHER_OPCODE_GROUP_1_UC_MY_STATION_HIT_ACTIONf,
OTHER_OPCODE_GROUP_2_UC_MY_STATION_HIT_ACTIONf },
/* bcmOAMActionUcDrop */
{ 0, 0 },
/* bcmOAMActionUcCopyToCpu */
{ 0, 0 },
/* bcmOAMActionUcFwdAsData */
{ 0, 0 },
/* bcmOAMActionUcFwd */
{ 0, 0 },
/* bcmOAMActionMcDrop */
{ OTHER_OPCODE_GROUP_1_MC_ACTIONf,
OTHER_OPCODE_GROUP_2_MC_ACTIONf },
/* bcmOAMActionMcCopyToCpu */
{ OTHER_OPCODE_GROUP_1_MC_COPYTO_CPUf,
OTHER_OPCODE_GROUP_2_MC_COPYTO_CPUf },
/* bcmOAMActionMcFwdAsData */
{ OTHER_OPCODE_GROUP_1_MC_ACTIONf,
OTHER_OPCODE_GROUP_2_MC_ACTIONf },
/* bcmOAMActionMcFwd*/
{ OTHER_OPCODE_GROUP_1_MC_ACTIONf,
OTHER_OPCODE_GROUP_2_MC_ACTIONf },
/* bcmOAMActionLowMdlDrop */
{ OTHER_OPCODE_GROUP_1_LOW_MDL_ACTIONf,
OTHER_OPCODE_GROUP_2_LOW_MDL_ACTIONf},
/* bcmOAMActionLowMdlCopyToCpu */
{ OTHER_OPCODE_GROUP_1_LOW_MDL_COPYTO_CPUf,
OTHER_OPCODE_GROUP_2_LOW_MDL_COPYTO_CPUf },
/* bcmOAMActionLowMdlFwdAsData */
{ OTHER_OPCODE_GROUP_1_LOW_MDL_ACTIONf,
OTHER_OPCODE_GROUP_2_LOW_MDL_ACTIONf },
/* bcmOAMActionLowMdlFwd*/
{ OTHER_OPCODE_GROUP_1_LOW_MDL_ACTIONf,
OTHER_OPCODE_GROUP_2_LOW_MDL_ACTIONf},
/* bcmOAMActionMyStationMissCopyToCpu */
{ OTHER_OPCODE_GROUP_1_UC_MY_STATION_MISS_COPYTO_CPUf,
OTHER_OPCODE_GROUP_2_UC_MY_STATION_MISS_COPYTO_CPUf },
/* bcmOAMActionMyStationMissDrop */
{ OTHER_OPCODE_GROUP_1_UC_MY_STATION_MISS_ACTIONf,
OTHER_OPCODE_GROUP_2_UC_MY_STATION_MISS_ACTIONf },
/* bcmOAMActionMyStationMissFwdAsData */
{ OTHER_OPCODE_GROUP_1_UC_MY_STATION_MISS_ACTIONf,
OTHER_OPCODE_GROUP_2_UC_MY_STATION_MISS_ACTIONf },
/* bcmOAMActionMyStationMissFwd */
{ OTHER_OPCODE_GROUP_1_UC_MY_STATION_MISS_ACTIONf,
OTHER_OPCODE_GROUP_2_UC_MY_STATION_MISS_ACTIONf},
/* bcmOAMActionProcessInHw */
{ 0, 0 },
/* bcmOAMActionLowMdlCcmFwdAsRegularCcm */
{ 0, 0 },
{ 0, 0 }
};
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
SHR_BITTEST_RANGE(oc->opcode_grp_1_bmp, 0,
_BCM_OAM_OPCODE_TYPE_NON_CFM_LAST, opcode_bmp_1);
SHR_BITTEST_RANGE(oc->opcode_grp_2_bmp, 0,
_BCM_OAM_OPCODE_TYPE_NON_CFM_LAST, opcode_bmp_2);
if (oam_opcode < _BCM_OAM_OPCODE_TYPE_NON_CFM_FIRST) {
if (cfm_field[field_index][oam_opcode] != 0) {
soc_mem_field32_set(unit, mem, profile,
cfm_field[field_index][oam_opcode], value);
}
} else {
if ((opcode_bmp_1 == 0) && (opcode_bmp_2 == 0)) {
if (SHR_BITGET(oc->opcode_grp_bmp, oam_opcode)) {
SHR_BITSET(oc->opcode_grp_1_bmp, oam_opcode);
SHR_BITCLR(oc->opcode_grp_bmp, oam_opcode);
opcode_bmp_1 = 1;
grp_1_modified = 1;
}
}
/* Changing the action for same opcode */
if (SHR_BITGET(oc->opcode_grp_1_bmp, oam_opcode)) {
if (non_cfm_field[field_index][0] != 0) {
soc_mem_field32_set(unit, mem, profile,
non_cfm_field[field_index][0], value);
}
} else if (SHR_BITGET(oc->opcode_grp_2_bmp, oam_opcode)) {
if (non_cfm_field[field_index][1] != 0) {
soc_mem_field32_set(unit, mem, profile,
non_cfm_field[field_index][1], value);
}
} else if (SHR_BITGET(oc->opcode_grp_bmp, oam_opcode)) {
if (value == soc_mem_field32_get(unit, mem, profile,
non_cfm_field[field_index][0])) {
/* Add this opcode to group 1 */
grp_1_modified = 1;
SHR_BITSET(oc->opcode_grp_1_bmp, oam_opcode);
SHR_BITCLR(oc->opcode_grp_bmp, oam_opcode);
if (non_cfm_field[field_index][0] != 0) {
soc_mem_field32_set(unit, mem, profile,
non_cfm_field[field_index][0], value);
}
} else if (value == soc_mem_field32_get(unit, mem, profile,
non_cfm_field[field_index][1])) {
/* Add this opcode to group 2 */
grp_2_modified = 1;
SHR_BITSET(oc->opcode_grp_2_bmp, oam_opcode);
SHR_BITCLR(oc->opcode_grp_bmp, oam_opcode);
if (non_cfm_field[field_index][1] != 0) {
soc_mem_field32_set(unit, mem, profile,
non_cfm_field[field_index][1], value);
}
} else if (opcode_bmp_1 == 0) {
grp_1_modified = 1;
SHR_BITSET(oc->opcode_grp_1_bmp, oam_opcode);
SHR_BITCLR(oc->opcode_grp_bmp, oam_opcode);
if (non_cfm_field[field_index][0] != 0) {
soc_mem_field32_set(unit, mem, profile,
non_cfm_field[field_index][0], value);
}
} else if (opcode_bmp_2 == 0) {
grp_2_modified = 1;
SHR_BITSET(oc->opcode_grp_2_bmp, oam_opcode);
SHR_BITCLR(oc->opcode_grp_bmp, oam_opcode);
if (non_cfm_field[field_index][1] != 0) {
soc_mem_field32_set(unit, mem, profile,
non_cfm_field[field_index][1], value);
}
} else {
/* Add the opcode to group 1 by default */
grp_1_modified = 1;
SHR_BITSET(oc->opcode_grp_1_bmp, oam_opcode);
SHR_BITCLR(oc->opcode_grp_bmp, oam_opcode);
if (non_cfm_field[field_index][0] != 0) {
soc_mem_field32_set(unit, mem, profile,
non_cfm_field[field_index][0], value);
}
}
} else {
return (BCM_E_PARAM);
}
}
if (mem == OAM_OPCODE_CONTROL_PROFILEm) {
mem = INGRESS_OAM_OPCODE_GROUPm;
} else {
mem = EGR_OAM_OPCODE_GROUPm;
}
if (grp_1_modified) {
soc_mem_field32_set(unit, mem, &entry, OPCODE_GROUP_VALIDf, 1);
soc_mem_field32_set(unit, mem, &entry, OPCODE_GROUPf, 0);
SOC_IF_ERROR_RETURN(soc_mem_write(unit, mem,
MEM_BLOCK_ANY, oam_opcode, &entry));
} else if(grp_2_modified) {
soc_mem_field32_set(unit, mem, &entry, OPCODE_GROUP_VALIDf, 1);
soc_mem_field32_set(unit, mem, &entry, OPCODE_GROUPf, 1);
SOC_IF_ERROR_RETURN(soc_mem_write(unit, mem,
MEM_BLOCK_ANY, oam_opcode, &entry));
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_resolve_dglp
* Purpose:
* Add an dglp profile entry
* Parameters:
* unit - (IN) BCM device number
* mem - (IN) dglp profile memory - ing/egr
* dglp - (IN) DGLP value
* profile - (OUT) Pointer to the dglp profile table index.
* olp_enable - (OUT) Pointer to field indicating whether olp is enabled on
* the port.
* Returns:
* BCM_E_XXX
*/
int
_bcm_kt2_oam_resolve_dglp(int unit, soc_mem_t mem, uint32 dglp,
uint32 *profile_index, int *olp_enable)
{
int rv = BCM_E_NONE;
egr_olp_dgpp_config_entry_t *buf;
egr_olp_dgpp_config_entry_t *entry;
egr_oam_dglp_profile_entry_t egr_dglp_profile_entry;
ing_oam_dglp_profile_entry_t ing_dglp_profile_entry;
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
int index_max = 0;
int index = 0;
uint32 configured_dglp = 0;
void *entries[1];
void *profile_entry;
soc_mem_t profile_mem;
soc_profile_mem_t profile_ptr;
int entry_mem_size = 0;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
if (mem == EGR_OAM_OPCODE_CONTROL_PROFILEm) {
profile_entry = (void *)&egr_dglp_profile_entry;
profile_mem = EGR_OAM_DGLP_PROFILEm;
profile_ptr = oc->egr_oam_dglp_profile;
} else {
profile_entry = (void *)&ing_dglp_profile_entry;
profile_mem = ING_OAM_DGLP_PROFILEm;
profile_ptr = oc->ing_oam_dglp_profile;
}
/* Check whether this DGLP is already configured as OLP port */
entry_mem_size = sizeof(egr_olp_dgpp_config_entry_t);
/* Allocate buffer to store the DMAed table entries. */
index_max = soc_mem_index_max(unit, EGR_OLP_DGPP_CONFIGm);
buf = soc_cm_salloc(unit, entry_mem_size * (index_max + 1),
"OLP dglp config table entry buffer");
if (NULL == buf) {
return (BCM_E_MEMORY);
}
/* Initialize the entry buffer. */
sal_memset(buf, 0, sizeof(entry_mem_size) * (index_max + 1));
/* Read the table entries into the buffer. */
rv = soc_mem_read_range(unit, EGR_OLP_DGPP_CONFIGm, MEM_BLOCK_ALL,
0, index_max, buf);
if (BCM_FAILURE(rv)) {
if (buf) {
soc_cm_sfree(unit, buf);
}
return rv;
}
/* Iterate over the table entries. */
for (index = 0; index <= index_max; index++) {
entry = soc_mem_table_idx_to_pointer
(unit, EGR_OLP_DGPP_CONFIGm, egr_olp_dgpp_config_entry_t *,
buf, index);
soc_mem_field_get(unit, EGR_OLP_DGPP_CONFIGm,
(uint32 *)entry, DGLPf, &configured_dglp);
if (dglp == configured_dglp) {
*olp_enable = 1;
break;
}
}
if (buf) {
soc_cm_sfree(unit, buf);
}
/* Add entry to dglp profile table. */
soc_mem_field32_set(unit, profile_mem, profile_entry, DGLPf, dglp);
entries[0] = profile_entry;
rv = soc_profile_mem_add(unit, &profile_ptr,
(void *)entries, 1, profile_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM DGLP profile table is full - %s.\n"),
bcm_errmsg(rv)));
return rv;
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_ma_index_entry_modify
* Purpose:
* Set dglp profile pointer index in the MA index table entry
* Parameters:
* unit - (IN) BCM device number
* index - (IN) MA index table index
* mem - (IN) MA index memory - ing/egr
* profile_index- (IN)DGLP profile index
* olp_enable - (IN) indicates whether olp is enabled for this entry
* dglp_prt - (OUT) Pointer to dglp1/2 profile.
* Returns:
* BCM_E_XXX
*/
int
_bcm_kt2_ma_index_entry_modify(int unit, int index, soc_mem_t mem,
int profile_index, int olp_enable, int *dglp_ptr)
{
int rv = BCM_E_NONE;
ma_index_entry_t ma_idx_entry; /* MA_INDEX table entry buffer. */
egr_ma_index_entry_t egr_ma_idx_entry; /* EGR_MA_INDEX table entry buffer.*/
void *entry;
if (mem == EGR_MA_INDEXm) {
entry = (void *)&egr_ma_idx_entry;
} else {
entry = (void *)&ma_idx_entry;
}
rv = soc_mem_read(unit, mem, MEM_BLOCK_ANY, index, entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MA index table entry read failed "
"%s.\n"), bcm_errmsg(rv)));
return (rv);
}
if (0 == soc_mem_field32_get(unit, mem, (uint32 *)entry,
DGLP1_PROFILE_PTRf)){
soc_mem_field32_set(unit, mem, entry,
DGLP1_PROFILE_PTRf, profile_index);
if (olp_enable) {
soc_mem_field32_set(unit, mem, entry, DGLP1_OLP_HDR_ADDf, 1);
}
*dglp_ptr = _BCM_OAM_DGLP1_PROFILE_PTR;
} else if (0 == soc_mem_field32_get(unit, mem, (uint32 *)entry,
DGLP2_PROFILE_PTRf)){
soc_mem_field32_set(unit, mem, entry,
DGLP2_PROFILE_PTRf, profile_index);
if (olp_enable) {
soc_mem_field32_set(unit, mem, entry, DGLP2_OLP_HDR_ADDf, 1);
}
*dglp_ptr = _BCM_OAM_DGLP2_PROFILE_PTR;
}
SOC_IF_ERROR_RETURN(soc_mem_write(unit, mem, MEM_BLOCK_ANY, index, entry));
return (BCM_E_NONE);
}
/*
* Function:
* _kt2_oam_counter_control_set
* Purpose:
* Enable/disable LM counter for a particular opcode
* Parameters:
* unit - (IN) BCM device number
* opcode_i - (IN) OAM opcode for which counter needs
* to be enabled/disabled
* value - (IN) Counter enable/disable
* Returns:
* BCM_E_XXX
*/
int
_kt2_oam_counter_control_set(int unit, int opcode_i, int value)
{
int rv = BCM_E_NONE;
egr_lm_counter_control_entry_t egr_ctr;
lm_counter_control_entry_t ing_ctr;
rv = soc_mem_read(unit, LM_COUNTER_CONTROLm, MEM_BLOCK_ANY,
opcode_i, &ing_ctr);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Counter control table entry read "
"failed %s.\n"), bcm_errmsg(rv)));
return (rv);
}
soc_LM_COUNTER_CONTROLm_field32_set(unit, &ing_ctr, COUNT_ENABLEf, value);
rv = soc_mem_write(unit, LM_COUNTER_CONTROLm, MEM_BLOCK_ALL,
opcode_i, &ing_ctr);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Counter control table entry wtite "
"failed %s.\n"), bcm_errmsg(rv)));
return (rv);
}
rv = soc_mem_read(unit, EGR_LM_COUNTER_CONTROLm, MEM_BLOCK_ANY,
opcode_i, &egr_ctr);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Counter control table entry read "
"failed %s.\n"), bcm_errmsg(rv)));
return (rv);
}
soc_EGR_LM_COUNTER_CONTROLm_field32_set(unit, &egr_ctr,
COUNT_ENABLEf, value);
rv = soc_mem_write(unit, EGR_LM_COUNTER_CONTROLm, MEM_BLOCK_ALL,
opcode_i, &ing_ctr);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Counter control table entry wtite "
"failed %s.\n"), bcm_errmsg(rv)));
return (rv);
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_convert_action_to_opcode_entry
* Purpose:
* Convert endpoint action to opcode profile entry action
* Parameters:
* unit - (IN) BCM device number
* action - (IN) Pointer to endpoint action
* ep_id - (IN) Endpoint id
* profile - (IN) Opcode profile table entry
* Returns:
* BCM_E_XXX
*/
int
_bcm_kt2_oam_convert_action_to_opcode_entry(int unit,
bcm_oam_endpoint_action_t *action,
bcm_oam_endpoint_t ep_id,
void *profile,
uint8 *opcode_profile_changed)
{
int rv = BCM_E_NONE;
int action_i = 0;
int opcode_i = 0;
int break_the_loop = 0;
int profile_index = _BCM_OAM_INVALID_INDEX;
int olp_enable = 0;
_bcm_oam_control_t *oc; /* Pointer to OAM control structure. */
_bcm_oam_hash_data_t *h_data_p; /* Pointer to endpoint hash data. */
soc_mem_t mem = OAM_OPCODE_CONTROL_PROFILEm;
soc_mem_t ma_index_mem = MA_INDEXm;
int dglp_ptr = 0;
int skip_ma_index_modify = 0;
int value = 0;
bcm_module_t module_id;
bcm_port_t port_id;
bcm_trunk_t trunk_id = BCM_TRUNK_INVALID;
int local_id;
uint32 dglp = 0;
int reset = 0;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
h_data_p = &oc->oam_hash_data[ep_id];
if (h_data_p->flags & BCM_OAM_ENDPOINT_UP_FACING) {
ma_index_mem = EGR_MA_INDEXm;
mem = EGR_OAM_OPCODE_CONTROL_PROFILEm;
}
reset = (action->flags & BCM_OAM_ENDPOINT_ACTION_DISABLE) ? 1 : 0;
for (action_i = 0; action_i < bcmOAMActionCount; action_i++) {
if (BCM_OAM_ACTION_GET(*action, action_i)) {
for (opcode_i = 0; opcode_i <= _BCM_OAM_OPCODE_TYPE_NON_CFM_LAST;
opcode_i++) {
if (BCM_OAM_OPCODE_GET(*action, opcode_i)) {
switch (action_i) {
case bcmOAMActionCountEnable:
rv = _kt2_oam_counter_control_set(unit, opcode_i,
!reset);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error:Counter control "
"set - %s.\n"), bcm_errmsg(rv)));
return (rv);
}
break;
case bcmOAMActionDrop:
case bcmOAMActionUcDrop:
case bcmOAMActionMcDrop:
case bcmOAMActionLowMdlDrop:
case bcmOAMActionMyStationMissDrop:
if ((action_i == bcmOAMActionLowMdlDrop) &&
(opcode_i == _BCM_OAM_OPCODE_TYPE_CCM)) {
value = reset ? 0 : _BCM_OAM_LOW_MDL_DROP_PACKET;
} else {
value = !reset;
}
rv = _kt2_oam_opcode_control_field_set(unit,
action_i, mem, opcode_i,
value, profile);
*opcode_profile_changed = 1;
break;
case bcmOAMActionCopyToCpu:
case bcmOAMActionUcCopyToCpu:
case bcmOAMActionMcCopyToCpu:
case bcmOAMActionLowMdlCopyToCpu:
case bcmOAMActionMyStationMissCopyToCpu:
rv = _kt2_oam_opcode_control_field_set(unit,
action_i, mem, opcode_i,
!reset, profile);
*opcode_profile_changed = 1;
break;
case bcmOAMActionFwdAsData:
if (opcode_i == _BCM_OAM_OPCODE_TYPE_CCM) {
/* Need not check reset in this case
* since this is default case
*/
soc_mem_field32_set(unit, mem, profile,
LOW_MDL_CCM_FWD_ACTIONf, 0);
break_the_loop = 1;
}
*opcode_profile_changed = 1;
break;
case bcmOAMActionFwd:
case bcmOAMActionUcFwd:
case bcmOAMActionMcFwd:
case bcmOAMActionLowMdlFwd:
case bcmOAMActionMyStationMissFwd:
/* forward packets to DGLP1 or DGLP2 */
if (!skip_ma_index_modify) {
SOC_IF_ERROR_RETURN(_bcm_esw_gport_resolve(unit,
action->destination, &module_id,
&port_id, &trunk_id, &local_id));
if (BCM_GPORT_IS_TRUNK(action->destination) &&
(trunk_id != BCM_TRUNK_INVALID)) {
/* Set LAG ID indicator bit */
dglp |= (1 << DGLP_LAG_ID_INDICATOR_SHIFT_BITS);
}
dglp |= ((module_id << DGLP_MODULE_ID_SHIFT_BITS) + port_id);
if (dglp == h_data_p->dglp1) {
skip_ma_index_modify = 1;
dglp_ptr = _BCM_OAM_DGLP1_PROFILE_PTR;
} else if (dglp == h_data_p->dglp2) {
skip_ma_index_modify = 1;
dglp_ptr = _BCM_OAM_DGLP2_PROFILE_PTR;
/* if both dglp1 and dglp2 are already set,
return error */
} else if ((h_data_p->dglp1 > 0) &&
(h_data_p->dglp2 > 0)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Invalid action"
"destination - %s.\n"), bcm_errmsg(rv)));
return BCM_E_PARAM;
}
if (!skip_ma_index_modify) {
rv = _bcm_kt2_oam_resolve_dglp(unit, mem,
dglp, (uint32 *)&profile_index,
&olp_enable);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Gport "
"to dglp-%s\n"), bcm_errmsg(rv)));
return (rv);
}
if (profile_index != _BCM_OAM_INVALID_INDEX) {
/* modify MA_INDEX table entry */
rv = _bcm_kt2_ma_index_entry_modify(unit,
h_data_p->local_rx_index,
ma_index_mem, profile_index,
olp_enable, &dglp_ptr);
if (BCM_FAILURE(rv)) {
return rv;
}
if (dglp_ptr ==
_BCM_OAM_DGLP1_PROFILE_PTR) {
h_data_p->dglp1 = dglp;
h_data_p->dglp1_profile_index =
profile_index;
} else {
h_data_p->dglp2 = dglp;
h_data_p->dglp2_profile_index =
profile_index;
}
skip_ma_index_modify = 1;
}
}
}
if (dglp_ptr == 0) {
break;
}
/* Opcode control value is to bet set to 2 for
DGLP1 and 3 for DGLP2 */
if (((opcode_i == _BCM_OAM_OPCODE_TYPE_LBM) &&
(action_i != bcmOAMActionMcFwd)) ||
(opcode_i >= _BCM_OAM_OPCODE_TYPE_NON_CFM_FIRST)) {
value = dglp_ptr + 1;
} else {
value = dglp_ptr;
}
_kt2_oam_opcode_control_field_set(unit, action_i,
mem, opcode_i, value, profile);
*opcode_profile_changed = 1;
break;
case bcmOAMActionUcFwdAsData:
case bcmOAMActionMcFwdAsData:
case bcmOAMActionLowMdlFwdAsData:
case bcmOAMActionMyStationMissFwdAsData:
/* Need not check reset in this case
* since this is default case
*/
rv = _kt2_oam_opcode_control_field_set(unit,
action_i, mem, opcode_i,
_BCM_OAM_FWD_AS_DATA, profile);
*opcode_profile_changed = 1;
break;
case bcmOAMActionProcessInHw:
if (opcode_i == _BCM_OAM_OPCODE_TYPE_CCM) {
soc_mem_field32_set(unit, mem, profile,
CCM_PROCESS_IN_HWf, !reset);
} else if (opcode_i == _BCM_OAM_OPCODE_TYPE_LBM) {
soc_mem_field32_set(unit, mem, profile,
LBM_ACTIONf, !reset);
}
break_the_loop = 1;
/* TBD - When CCM processing is done in HW, MA_PTR
value in MA_INDEX table should be less than 2k.
If not, we should return error */
*opcode_profile_changed = 1;
break;
case bcmOAMActionLowMdlCcmFwdAsRegularCcm:
if (opcode_i == _BCM_OAM_OPCODE_TYPE_CCM) {
soc_mem_field32_set(unit, mem, profile,
LOW_MDL_CCM_FWD_ACTIONf, !reset);
break_the_loop = 1;
}
*opcode_profile_changed = 1;
break;
default:
break;
}
}
if (break_the_loop) {
dglp_ptr = 0;
break_the_loop = 0;
olp_enable = 0;
profile_index = _BCM_OAM_INVALID_INDEX;
break;
}
}
}
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_flex_drop_get
* Purpose:
* Query the value of the device-wide OAM control.
* Parameters:
* unit - (IN) BCM device number
* type - (IN) OAM control type
* arg - (OUT) A pointer to value of the control
* Returns:
* BCM_E_XXX
*/
int _bcm_kt2_oam_flex_drop_get(int unit, bcm_oam_control_type_t type,
uint64 *arg)
{
uint32 rval_32;
soc_reg_t reg = 0;
uint32 is_64bit_reg = 0;
int rv = BCM_E_NONE;
switch(type) {
case bcmOamControlFlexibleDropPort:
reg = OAM_PORT_INTERFACE_DROP_CONTROLr;
break;
case bcmOamControlFlexibleDropCVlan:
reg = OAM_C_INTERFACE_DROP_CONTROLr;
break;
case bcmOamControlFlexibleDropSVlan:
reg = OAM_S_INTERFACE_DROP_CONTROLr;
break;
case bcmOamControlFlexibleDropCPlusSVlan:
reg = OAM_S_C_INTERFACE_DROP_CONTROLr;
break;
case bcmOamControlFlexibleDropVP:
reg = OAM_SVP_INTERFACE_DROP_CONTROLr;
break;
case bcmOamControlFlexibleDropEgressPort:
reg = EGR_OAM_PORT_INTERFACE_DROP_CONTROL_64r;
is_64bit_reg = 1;
break;
case bcmOamControlFlexibleDropEgressCVlan:
reg = EGR_OAM_C_INTERFACE_DROP_CONTROL_64r;
is_64bit_reg = 1;
break;
case bcmOamControlFlexibleDropEgressSVlan:
reg = EGR_OAM_S_INTERFACE_DROP_CONTROL_64r;
is_64bit_reg = 1;
break;
case bcmOamControlFlexibleDropEgressCPlusSVlan:
reg = EGR_OAM_S_C_INTERFACE_DROP_CONTROL_64r;
is_64bit_reg = 1;
break;
case bcmOamControlFlexibleDropEgressVP:
reg = EGR_OAM_DVP_INTERFACE_DROP_CONTROL_64r;
is_64bit_reg = 1;
break;
case bcmOamControlFlexibleDropPasssiveSap:
reg = OAM_S_INTERFACE_PASSIVE_PROCESSING_CONTROLr;
break;
case bcmOamControlFlexibleDropEgressPasssiveSap:
reg = EGR_OAM_S_INTERFACE_PASSIVE_PROCESSING_CONTROLr;
break;
default:
return (BCM_E_UNAVAIL);
break;
}
if (is_64bit_reg == 0) {
rv = soc_reg32_get(unit, reg, REG_PORT_ANY, 0, &rval_32);
if(BCM_SUCCESS(rv)) {
COMPILER_64_SET(*arg, 0, rval_32);
}
} else {
rv = soc_reg64_get(unit, reg, REG_PORT_ANY, 0, arg);
}
return rv;
}
/*
* Function:
* _bcm_kt2_oam_flex_drop_get
* Purpose:
* Query the value of the device-wide OAM control.
* Parameters:
* unit - (IN) BCM device number
* type - (IN) OAM control type
* arg - (IN) value to be set for the control
* Returns:
* BCM_E_XXX
*/
int _bcm_kt2_oam_flex_drop_set(int unit, bcm_oam_control_type_t type,
uint64 arg)
{
uint32 rval_32;
soc_reg_t reg = 0;
uint32 is_64bit_reg = 0;
int rv = BCM_E_NONE;
switch(type) {
case bcmOamControlFlexibleDropPort:
reg = OAM_PORT_INTERFACE_DROP_CONTROLr;
break;
case bcmOamControlFlexibleDropCVlan:
reg = OAM_C_INTERFACE_DROP_CONTROLr;
break;
case bcmOamControlFlexibleDropSVlan:
reg = OAM_S_INTERFACE_DROP_CONTROLr;
break;
case bcmOamControlFlexibleDropCPlusSVlan:
reg = OAM_S_C_INTERFACE_DROP_CONTROLr;
break;
case bcmOamControlFlexibleDropVP:
reg = OAM_SVP_INTERFACE_DROP_CONTROLr;
break;
case bcmOamControlFlexibleDropEgressPort:
reg = EGR_OAM_PORT_INTERFACE_DROP_CONTROL_64r;
is_64bit_reg = 1;
break;
case bcmOamControlFlexibleDropEgressCVlan:
reg = EGR_OAM_C_INTERFACE_DROP_CONTROL_64r;
is_64bit_reg = 1;
break;
case bcmOamControlFlexibleDropEgressSVlan:
reg = EGR_OAM_S_INTERFACE_DROP_CONTROL_64r;
is_64bit_reg = 1;
break;
case bcmOamControlFlexibleDropEgressCPlusSVlan:
reg = EGR_OAM_S_C_INTERFACE_DROP_CONTROL_64r;
is_64bit_reg = 1;
break;
case bcmOamControlFlexibleDropEgressVP:
reg = EGR_OAM_DVP_INTERFACE_DROP_CONTROL_64r;
is_64bit_reg = 1;
break;
case bcmOamControlFlexibleDropPasssiveSap:
reg = OAM_S_INTERFACE_PASSIVE_PROCESSING_CONTROLr;
break;
case bcmOamControlFlexibleDropEgressPasssiveSap:
reg = EGR_OAM_S_INTERFACE_PASSIVE_PROCESSING_CONTROLr;
break;
default:
return (BCM_E_UNAVAIL);
break;
}
if (is_64bit_reg == 0) {
COMPILER_64_TO_32_LO(rval_32, arg);
rv = soc_reg32_set(unit, reg, REG_PORT_ANY, 0, rval_32);
} else {
rv = soc_reg64_set(unit, reg, REG_PORT_ANY, 0, arg);
}
return rv;
}
/*
* Function:
* bcm_kt2_oam_control_get
* Purpose:
* Query the value of the device-wide OAM control.
* Parameters:
* unit - (IN) BCM device number
* type - (IN) OAM control type
* arg - (OUT) A pointer to value of the control
* Returns:
* BCM_E_XXX
*/
int bcm_kt2_oam_control_get(int unit, bcm_oam_control_type_t type,
uint64 *arg)
{
int rv = BCM_E_NONE;
switch(type) {
case bcmOamControlFlexibleDropPort:
case bcmOamControlFlexibleDropCVlan:
case bcmOamControlFlexibleDropSVlan:
case bcmOamControlFlexibleDropCPlusSVlan:
case bcmOamControlFlexibleDropVP:
case bcmOamControlFlexibleDropEgressPort:
case bcmOamControlFlexibleDropEgressCVlan:
case bcmOamControlFlexibleDropEgressSVlan:
case bcmOamControlFlexibleDropEgressCPlusSVlan:
case bcmOamControlFlexibleDropEgressVP:
case bcmOamControlFlexibleDropPasssiveSap:
case bcmOamControlFlexibleDropEgressPasssiveSap:
rv = _bcm_kt2_oam_flex_drop_get(unit, type, arg);
break;
default:
rv = BCM_E_UNAVAIL;
break;
}
return rv;
}
/*
* Function:
* bcm_kt2_oam_control_set
* Purpose:
* Set the value of the device-wide OAM control.
* Parameters:
* unit - (IN) BCM device number
* type - (IN) OAM control type
* arg - (IN) value to be set for control
* Returns:
* BCM_E_XXX
*/
int bcm_kt2_oam_control_set(int unit, bcm_oam_control_type_t type,
uint64 arg)
{
int rv = BCM_E_NONE;
switch(type) {
case bcmOamControlFlexibleDropPort:
case bcmOamControlFlexibleDropCVlan:
case bcmOamControlFlexibleDropSVlan:
case bcmOamControlFlexibleDropCPlusSVlan:
case bcmOamControlFlexibleDropVP:
case bcmOamControlFlexibleDropEgressPort:
case bcmOamControlFlexibleDropEgressCVlan:
case bcmOamControlFlexibleDropEgressSVlan:
case bcmOamControlFlexibleDropEgressCPlusSVlan:
case bcmOamControlFlexibleDropEgressVP:
case bcmOamControlFlexibleDropPasssiveSap:
case bcmOamControlFlexibleDropEgressPasssiveSap:
rv = _bcm_kt2_oam_flex_drop_set(unit, type, arg);
break;
default:
rv = BCM_E_UNAVAIL;
break;
}
return rv;
}
/*
* Function:
* bcm_kt2_oam_upmep_cosq_set
* Purpose:
* Set the value of the device-wide UP MEP PDU type CPU queue.
* Parameters:
* unit - (IN) BCM device number
* upmep_pdu_type - (IN) UP MEP PDU type
* cosq - (IN) value to be set for CPU queue
* Returns:
* BCM_E_XXX
*/
int bcm_kt2_oam_upmep_cosq_set(int unit, bcm_oam_upmep_pdu_type_t upmep_pdu_type,
bcm_cos_queue_t cosq)
{
int rv = BCM_E_NONE;
uint64 cpu_control_2_64;
uint64 write_val;
SOC_IF_ERROR_RETURN(
READ_EGR_CPU_CONTROL_2_64r(unit, &cpu_control_2_64));
switch(upmep_pdu_type) {
case bcmOamUpmepPduTypeCcm:
COMPILER_64_SET(write_val, 0, cosq);
soc_reg64_field_set(unit, EGR_CPU_CONTROL_2_64r, &cpu_control_2_64,
OAM_CCM_SLOWPATH_CPU_COSf, write_val);
break;
case bcmOamUpmepPduTypeLmDm:
COMPILER_64_SET(write_val, 0, cosq);
soc_reg64_field_set(unit, EGR_CPU_CONTROL_2_64r, &cpu_control_2_64,
OAM_LMDM_CPU_COSf, write_val);
break;
case bcmOamUpmepPduTypeSlowpath:
COMPILER_64_SET(write_val, 0, cosq);
soc_reg64_field_set(unit, EGR_CPU_CONTROL_2_64r, &cpu_control_2_64,
OAM_SLOWPATH_CPU_COSf, write_val);
break;
case bcmOamUpmepPduTypeCount :
rv = BCM_E_PARAM;
break;
default:
rv = BCM_E_UNAVAIL;
break;
}
if (rv == BCM_E_NONE) {
SOC_IF_ERROR_RETURN(
WRITE_EGR_CPU_CONTROL_2_64r(unit, cpu_control_2_64));
}
return rv;
}
/*
* Function:
* bcm_kt2_oam_upmep_cosq_get
* Purpose:
* Query the value of the device-wide UP MEP PDU type CPU queue.
* Parameters:
* unit - (IN) BCM device number
* upmep_pdu_type - (IN) UP MEP PDU type
* cosq - (OUT) pointer to CPU queue to be get
* Returns:
* BCM_E_XXX
*/
int bcm_kt2_oam_upmep_cosq_get(int unit, bcm_oam_upmep_pdu_type_t upmep_pdu_type,
bcm_cos_queue_t *cosq)
{
int rv = BCM_E_NONE;
uint64 cpu_control_2_64;
SOC_IF_ERROR_RETURN(
READ_EGR_CPU_CONTROL_2_64r(unit, &cpu_control_2_64));
switch(upmep_pdu_type) {
case bcmOamUpmepPduTypeCcm:
*cosq = soc_reg64_field32_get(unit, EGR_CPU_CONTROL_2_64r,
cpu_control_2_64, OAM_CCM_SLOWPATH_CPU_COSf);
break;
case bcmOamUpmepPduTypeLmDm:
*cosq = soc_reg64_field32_get(unit, EGR_CPU_CONTROL_2_64r,
cpu_control_2_64, OAM_LMDM_CPU_COSf);
break;
case bcmOamUpmepPduTypeSlowpath:
*cosq = soc_reg64_field32_get(unit, EGR_CPU_CONTROL_2_64r,
cpu_control_2_64, OAM_SLOWPATH_CPU_COSf);
break;
case bcmOamUpmepPduTypeCount :
rv = BCM_E_PARAM;
break;
default:
rv = BCM_E_UNAVAIL;
break;
}
return rv;
}
#if defined(INCLUDE_BHH)
/*
* Function:
* bcm_kt2_oam_mpls_tp_channel_type_tx_get
* Purpose:
* Get MPLS TP(BHH) ACH Channel type
* Parameters:
* unit (IN) BCM device number
* channel_type (IN) Channel type is ignored in XGS devices
* Value (OUT) User define MPLS TP(BHH) ACH channel type
*
* Returns:
* BCM_E_NONE No error
* BCM_E_XXXX Error
*/
int
bcm_kt2_oam_mpls_tp_channel_type_tx_get(int unit,
bcm_oam_mpls_tp_channel_type_t channel_type,
int *value)
{
*value = kt2_oam_mpls_tp_ach_channel_type;
return BCM_E_NONE;
}
/*
* Function:
* bcm_kt2_oam_mpls_tp_channel_type_tx_set
* Purpose:
* Update MPLS TP(BHH) ACH Channel type with user defined value
* Parameters:
* unit (IN) BCM device number
* channel_type (IN) Channel type is ignored in XGS devices
* Value (IN) User define MPLS TP(BHH) ACH channel type
*
* Returns:
* BCM_E_NONE No error
* BCM_E_XXXX Error
*/
int
bcm_kt2_oam_mpls_tp_channel_type_tx_set(int unit,
bcm_oam_mpls_tp_channel_type_t channel_type,
int value)
{
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
int rv = BCM_E_NONE;
uint32 *msg;
_bcm_oam_control_t *oc; /* OAM control structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (oc->ukernel_not_ready) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
if ((value != SHR_BHH_ACH_CHANNEL_TYPE) &&
(value != SHR_BHH_ACH_CHANNEL_TYPE_1)) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_CONFIG;
}
kt2_oam_mpls_tp_ach_channel_type = value;
msg = (uint32 *)&kt2_oam_mpls_tp_ach_channel_type;
/* Set BHH ACH Type in BHH_RX_ACH_TYPE register */
rv = WRITE_BHH_RX_ACH_TYPEr(unit, kt2_oam_mpls_tp_ach_channel_type);
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = bhh_sdk_msg_ctrl_ach_channel_type_msg_pack(buffer, msg);
buffer_len = buffer_ptr - buffer;
rv = _bcm_kt2_oam_bhh_msg_send_receive(
unit,
MOS_MSG_SUBCLASS_BHH_ACH_CHANNEL_TYPE,
buffer_len, 0,
MOS_MSG_SUBCLASS_BHH_ACH_CHANNEL_TYPE_REPLY,
&reply_len);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
return (rv);
}
_BCM_OAM_UNLOCK(oc);
return BCM_E_NONE;
}
/*
* Function:
* _bcm_kt2_oam_bhh_encap_hw_set
* Purpose:
* Sets BHH encapsulation type in HW device.
* Parameters:
* unit - (IN) Unit number.
* module_id - (IN) Module id.
* port_id - (IN) Port.
* is_local - (IN) Indicates if module id is local.
* endpoint_config - (IN) Pointer to BHH endpoint structure.
* Returns:
* BCM_E_XXX
* Notes:
*/
STATIC int
_bcm_kt2_oam_bhh_encap_hw_set(int unit, _bcm_oam_hash_data_t *h_data_p,
bcm_module_t module_id, bcm_port_t port_id,
int is_local, bcm_oam_endpoint_info_t *endpoint_info)
{
int rv = BCM_E_NONE;
#if defined(BCM_KATANA2_SUPPORT) && defined(BCM_MPLS_SUPPORT)
bcm_l3_egress_t l3_egress;
int ingress_if;
int i;
int num_entries;
mpls_entry_entry_t mpls_entry;
mpls_entry_entry_t mpls_key;
int vccv_type = 0;
int mpls_index = 0;
#endif /* BCM_KATANA2_SUPPORT && BCM_MPLS_SUPPORT */
switch(h_data_p->type) {
case bcmOAMEndpointTypeBHHMPLS:
case bcmOAMEndpointTypeBHHMPLSVccv:
#if defined(BCM_KATANA2_SUPPORT) && defined(BCM_MPLS_SUPPORT)
SOC_IF_ERROR_RETURN(bcm_tr_mpls_lock (unit));
sal_memset(&mpls_key, 0, sizeof(mpls_key));
soc_MPLS_ENTRYm_field32_set(unit, &mpls_key, MPLS__MPLS_LABELf,
h_data_p->label);
rv = soc_mem_search(unit, MPLS_ENTRYm, MEM_BLOCK_ANY, &mpls_index,
&mpls_key, &mpls_entry, 0);
if (SOC_FAILURE(rv)) {
bcm_tr_mpls_unlock (unit);
return rv;
}
if ((soc_MPLS_ENTRYm_field32_get(unit, &mpls_entry,
VALIDf) != 0x1)) {
bcm_tr_mpls_unlock (unit);
return (BCM_E_PARAM);
}
if (h_data_p->type == bcmOAMEndpointTypeBHHMPLSVccv) {
/* Get the control channel type */
vccv_type = soc_MPLS_ENTRYm_field32_get(unit, &mpls_entry,
MPLS__PW_CC_TYPEf);
if (vccv_type == 0) { /* No VCCV type configured */
soc_MPLS_ENTRYm_field32_set(unit, &mpls_entry,
MPLS__PW_CC_TYPEf,
(h_data_p->vccv_type + 1));
} else if (vccv_type != (h_data_p->vccv_type + 1)) {
/* Endpoint add configuration conflicts with MPLS port add config */
bcm_tr_mpls_unlock(unit);
return BCM_E_PARAM;
}
}
soc_MPLS_ENTRYm_field32_set(unit, &mpls_entry,
MPLS__SESSION_IDENTIFIER_TYPEf, 1);
soc_MPLS_ENTRYm_field32_set(unit, &mpls_entry, MPLS__BHH_ENABLEf, 1);
rv = soc_mem_write(unit, MPLS_ENTRYm,
MEM_BLOCK_ANY, mpls_index,
&mpls_entry);
if (rv != SOC_E_NONE) {
bcm_tr_mpls_unlock(unit);
return rv;
}
/*
* PW CC-3
*
* Set MPLS entry DECAP_USE_TTL=0 for corresponding
* Tunnel Terminator label.
*/
if (h_data_p->type == bcmOAMEndpointTypeBHHMPLSVccv) {
if (h_data_p->vccv_type == bcmOamBhhVccvTtl) {
rv = bcm_esw_l3_egress_get(unit, h_data_p->egress_if,
&l3_egress);
if (rv != BCM_E_NONE) {
bcm_tr_mpls_unlock(unit);
return rv;
}
/* Look for Tunnel Terminator label */
num_entries = soc_mem_index_count(unit, MPLS_ENTRYm);
for (i = 0; i < num_entries; i++) {
rv = READ_MPLS_ENTRYm(unit, MEM_BLOCK_ANY, i, &mpls_entry);
if (rv != SOC_E_NONE) {
bcm_tr_mpls_unlock(unit);
return rv;
}
if (!soc_MPLS_ENTRYm_field32_get(unit, &mpls_entry, VALIDf)) {
continue;
}
if (soc_MPLS_ENTRYm_field32_get(unit, &mpls_entry,
MPLS__MPLS_ACTION_IF_BOSf) == 0x1) {
continue; /* L2_SVP */
}
ingress_if = soc_MPLS_ENTRYm_field32_get(unit, &mpls_entry,
MPLS__L3_IIFf);
if (ingress_if == l3_egress.intf) {
/* Label found */
soc_MPLS_ENTRYm_field32_set(unit, &mpls_entry,
MPLS__DECAP_USE_TTLf, 0);
rv = soc_mem_write(unit, MPLS_ENTRYm,
MEM_BLOCK_ALL, i,
&mpls_entry);
if (rv != SOC_E_NONE) {
bcm_tr_mpls_unlock(unit);
return rv;
}
break;
}
}
}
}
bcm_tr_mpls_unlock (unit);
#else
rv = BCM_E_UNAVAIL;
#endif /* BCM_KATANA2_SUPPORT && BCM_MPLS_SUPPORT */
break;
default:
rv = BCM_E_UNAVAIL;
break;
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_bhh_encap_data_dump
* Purpose:
* Dumps buffer contents.
* Parameters:
* buffer - (IN) Buffer to dump data.
* length - (IN) Length of buffer.
* Returns:
* None
*/
void
_bcm_kt2_oam_bhh_encap_data_dump(uint8 *buffer, int length)
{
int i;
LOG_CLI((BSL_META("\nBHH encapsulation (length=%d):\n"), length));
for (i = 0; i < length; i++) {
if ((i % 16) == 0) {
LOG_CLI((BSL_META("\n")));
}
LOG_CLI((BSL_META(" %02x"), buffer[i]));
}
LOG_CLI((BSL_META("\n")));
return;
}
STATIC int
_bcm_kt2_oam_bhh_ach_header_get(uint32 packet_flags, _ach_header_t *ach)
{
sal_memset(ach, 0, sizeof(*ach));
ach->f_nibble = SHR_BHH_ACH_FIRST_NIBBLE;
ach->version = SHR_BHH_ACH_VERSION;
ach->reserved = 0;
ach->channel_type = kt2_oam_mpls_tp_ach_channel_type;
return (BCM_E_NONE);
}
STATIC int
_bcm_kt2_oam_bhh_mpls_label_get(uint32 label, uint8 exp, uint8 s, uint8 ttl,
_mpls_label_t *mpls)
{
sal_memset(mpls, 0, sizeof(*mpls));
mpls->label = label;
mpls->exp = exp & 0x07;
mpls->s = s;
if(ttl)
mpls->ttl = ttl;
else
mpls->ttl = _BHH_MPLS_DFLT_TTL;
return (BCM_E_NONE);
}
STATIC int
_bcm_kt2_oam_bhh_mpls_gal_label_get(_mpls_label_t *mpls, uint8 oam_exp)
{
return _bcm_kt2_oam_bhh_mpls_label_get(SHR_BHH_MPLS_GAL_LABEL,
oam_exp, 1, 1, mpls);
}
STATIC int
_bcm_kt2_oam_bhh_mpls_labels_get(int unit, _bcm_oam_hash_data_t *h_data_p,
uint32 packet_flags,
int max_count,
_mpls_label_t *pw_label,
_mpls_label_t *mpls,
int *mpls_count, bcm_if_t *l3_intf_id)
{
int count = 0;
bcm_l3_egress_t l3_egress;
bcm_mpls_port_t mpls_port;
bcm_mpls_egress_label_t label_array[_BHH_MPLS_MAX_LABELS];
bcm_mpls_egress_label_t tmp_label;
int label_count;
int i = 0;
/* Get L3 objects */
bcm_l3_egress_t_init(&l3_egress);
if (BCM_FAILURE(bcm_esw_l3_egress_get(unit, h_data_p->egress_if, &l3_egress))) {
return (BCM_E_PARAM);
}
/* Look for a tunnel associated with this interface */
if (BCM_SUCCESS
(bcm_esw_mpls_tunnel_initiator_get(unit, l3_egress.intf,
_BHH_MPLS_MAX_LABELS,
label_array, &label_count))) {
/* We need to swap the labels configured in tunnel initiator as
* it returns labels in reverse order than how it should be in pkt.
*/
for (i = 0; i < (label_count / 2); i++) {
sal_memcpy(&tmp_label, &label_array[i], sizeof(tmp_label));
sal_memcpy(&label_array[i], &label_array[label_count - 1 - i],
sizeof(tmp_label));
sal_memcpy(&label_array[label_count - 1 - i], &tmp_label,
sizeof(tmp_label));
}
/* Add VRL Label */
if (h_data_p->type == bcmOAMEndpointTypeBHHMPLS) {
if ((l3_egress.mpls_label != BCM_MPLS_LABEL_INVALID) &&
(h_data_p->egr_label != BCM_MPLS_LABEL_INVALID) &&
(label_count < _BHH_MPLS_MAX_LABELS) ) {
label_array[label_count].label = l3_egress.mpls_label;
label_array[label_count].exp = l3_egress.mpls_exp;
label_array[label_count].ttl = l3_egress.mpls_ttl;
label_count++;
}
/* Traverse through Label stack and match on control label, this
* label should be the one just above GAL in the label stack, any
* following labels should not be added.
*/
for (i = 0; i < label_count; i++) {
if (label_array[i].label == h_data_p->egr_label) {
label_count = i + 1;
label_array[i].exp = h_data_p->egr_label_exp;
if (soc_property_get(unit, spn_MPLS_OAM_EGRESS_LABEL_TTL, 0) &&
h_data_p->egr_label_ttl) {
label_array[i].ttl = h_data_p->egr_label_ttl;
}
break;
}
}
}
}
/* MPLS Router Alert */
if (packet_flags & _BHH_ENCAP_PKT_MPLS_ROUTER_ALERT) {
/* Ignore overrun error as RAL is only added for PW MEPs and so overrun
* condition will not exist.
*/
/* coverity[overrun-local] */
label_array[label_count].label = SHR_BHH_MPLS_ROUTER_ALERT_LABEL;
label_array[label_count].exp = 0;
label_array[label_count].ttl = 0;
label_count++;
}
/* Use GPORT to resolve interface */
if (BCM_GPORT_IS_MPLS_PORT(h_data_p->gport)) {
/* Get mpls port and label info */
bcm_mpls_port_t_init(&mpls_port);
mpls_port.mpls_port_id = h_data_p->gport;
if (BCM_FAILURE(bcm_esw_mpls_port_get(unit, h_data_p->vpn,
&mpls_port))) {
return (BCM_E_PARAM);
}
if (h_data_p->type == bcmOAMEndpointTypeBHHMPLSVccv &&
h_data_p->vccv_type == bcmOamBhhVccvTtl) {
mpls_port.egress_label.ttl = 0x1;
}
label_array[label_count].label = mpls_port.egress_label.label;
if (h_data_p->egr_label == mpls_port.egress_label.label) {
label_array[label_count].exp = h_data_p->egr_label_exp;
if (soc_property_get(unit, spn_MPLS_OAM_EGRESS_LABEL_TTL, 0) &&
h_data_p->egr_label_ttl) {
label_array[label_count].ttl = h_data_p->egr_label_ttl;
}
} else {
label_array[label_count].exp = mpls_port.egress_label.exp;
label_array[label_count].ttl = mpls_port.egress_label.ttl;
}
label_count++;
}
for (i=0; i < label_count; i++) {
_bcm_kt2_oam_bhh_mpls_label_get(label_array[i].label,
label_array[i].exp,
0,
label_array[i].ttl,
&mpls[count++]);
}
/* Set Bottom of Stack if there is no GAL label */
if (!(packet_flags & _BHH_ENCAP_PKT_GAL)) {
mpls[count-1].s = 1;
}
*mpls_count = count;
return (BCM_E_NONE);
}
STATIC int
_bcm_kt2_oam_bhh_l2_header_get(int unit, _bcm_oam_hash_data_t *h_data_p,
bcm_port_t port, uint16 etype,
_l2_header_t *l2)
{
uint16 tpid;
bcm_l3_egress_t l3_egress;
bcm_l3_intf_t l3_intf;
bcm_vlan_control_vlan_t vc;
int tpid_select;
bcm_pbmp_t pbmp, ubmp;
sal_memset(l2, 0, sizeof(*l2));
if (!BHH_EP_MPLS_SECTION_TYPE(h_data_p)) {
/* Get L3 interfaces */
bcm_l3_egress_t_init(&l3_egress);
bcm_l3_intf_t_init(&l3_intf);
if (BCM_FAILURE
(bcm_esw_l3_egress_get(unit, h_data_p->egress_if, &l3_egress))) {
return (BCM_E_PARAM);
}
l3_intf.l3a_intf_id = l3_egress.intf;
if (BCM_FAILURE(bcm_esw_l3_intf_get(unit, &l3_intf))) {
return (BCM_E_PARAM);
}
/* Get TPID */
BCM_IF_ERROR_RETURN(bcm_esw_vlan_control_port_get(unit,
port,
bcmVlanPortOuterTpidSelect,
&tpid_select));
if (tpid_select == BCM_PORT_OUTER_TPID) {
BCM_IF_ERROR_RETURN(bcm_esw_port_tpid_get(unit, port, &tpid));
} else {
BCM_IF_ERROR_RETURN(
bcm_esw_vlan_control_vlan_get(unit, l3_intf.l3a_vid, &vc));
tpid = vc.outer_tpid;
}
sal_memcpy(l2->dst_mac, l3_egress.mac_addr, _BHH_MAC_ADDR_LENGTH);
sal_memcpy(l2->src_mac, l3_intf.l3a_mac_addr, _BHH_MAC_ADDR_LENGTH);
l2->vlan_tag.tpid = tpid;
l2->vlan_tag.tci.prio = h_data_p->vlan_pri;
l2->vlan_tag.tci.cfi = 0;
l2->vlan_tag.tci.vid = l3_intf.l3a_vid;
BCM_IF_ERROR_RETURN(bcm_esw_vlan_port_get(unit,
l2->vlan_tag.tci.vid,
&pbmp,
&ubmp));
if (BCM_PBMP_MEMBER(ubmp, port)) {
l2->vlan_tag.tpid = 0; /* Set to 0 to indicate untagged */
}
if (l3_intf.l3a_inner_vlan != 0) {
BCM_IF_ERROR_RETURN(bcm_esw_port_inner_tpid_get(unit, port, &tpid));
l2->vlan_tag_inner.tpid = tpid;
l2->vlan_tag_inner.tci.prio = h_data_p->inner_vlan_pri;
l2->vlan_tag_inner.tci.cfi = 0;
l2->vlan_tag_inner.tci.vid = l3_intf.l3a_inner_vlan;
}
l2->etype = etype;
} else {
sal_memcpy(l2->dst_mac, h_data_p->dst_mac_address, _BHH_MAC_ADDR_LENGTH);
sal_memcpy(l2->src_mac, h_data_p->src_mac_address, _BHH_MAC_ADDR_LENGTH);
if((h_data_p->type == bcmOAMEndpointTypeBhhSectionOuterVlan) ||
(h_data_p->type == bcmOAMEndpointTypeBhhSectionInnervlan) ||
(h_data_p->type == bcmOAMEndpointTypeBhhSectionOuterPlusInnerVlan)){
if (h_data_p->vlan != 0) {
l2->vlan_tag.tpid = h_data_p->outer_tpid;
l2->vlan_tag.tci.prio = h_data_p->vlan_pri;
l2->vlan_tag.tci.cfi = 0;
l2->vlan_tag.tci.vid = h_data_p->vlan;
}
}
if((h_data_p->type == bcmOAMEndpointTypeBhhSectionInnervlan) ||
(h_data_p->type == bcmOAMEndpointTypeBhhSectionOuterPlusInnerVlan)){
l2->vlan_tag_inner.tpid = h_data_p->inner_tpid;
l2->vlan_tag_inner.tci.prio = h_data_p->inner_vlan_pri;
l2->vlan_tag_inner.tci.cfi = 0;
l2->vlan_tag_inner.tci.vid = h_data_p->inner_vlan;
}
l2->etype = SHR_BHH_L2_ETYPE_MPLS_UCAST;
}
return BCM_E_NONE;
}
STATIC uint8 *
_bcm_kt2_oam_bhh_ach_header_pack(uint8 *buffer, _ach_header_t *ach)
{
uint32 tmp;
tmp = ((ach->f_nibble & 0xf) << 28) | ((ach->version & 0xf) << 24) |
(ach->reserved << 16) | ach->channel_type;
_BHH_ENCAP_PACK_U32(buffer, tmp);
return (buffer);
}
STATIC uint8 *
_bcm_kt2_oam_bhh_mpls_label_pack(uint8 *buffer, _mpls_label_t *mpls)
{
uint32 tmp;
tmp = ((mpls->label & 0xfffff) << 12) | ((mpls->exp & 0x7) << 9) |
((mpls->s & 0x1) << 8) | mpls->ttl;
_BHH_ENCAP_PACK_U32(buffer, tmp);
return (buffer);
}
STATIC uint8 *
_bcm_kt2_oam_bhh_l2_header_pack(uint8 *buffer, _l2_header_t *l2)
{
uint32 tmp;
int i;
for (i = 0; i < _BHH_MAC_ADDR_LENGTH; i++) {
_BHH_ENCAP_PACK_U8(buffer, l2->dst_mac[i]);
}
for (i = 0; i < _BHH_MAC_ADDR_LENGTH; i++) {
_BHH_ENCAP_PACK_U8(buffer, l2->src_mac[i]);
}
/* Vlan Tag tpid = 0 indicates untagged */
if (0 != l2->vlan_tag.tpid) {
tmp = (l2->vlan_tag.tpid << 16) |
((l2->vlan_tag.tci.prio & 0x7) << 13) |
((l2->vlan_tag.tci.cfi & 0x1) << 12) |
(l2->vlan_tag.tci.vid & 0xfff);
_BHH_ENCAP_PACK_U32(buffer, tmp);
}
/*Inner Vlan Tag inner_tpid = 0 indicates single tag */
if (0 != l2->vlan_tag_inner.tpid) {
tmp = (l2->vlan_tag_inner.tpid << 16) |
((l2->vlan_tag_inner.tci.prio & 0x7) << 13) |
((l2->vlan_tag_inner.tci.cfi & 0x1) << 12) |
(l2->vlan_tag_inner.tci.vid & 0xfff);
_BHH_ENCAP_PACK_U32(buffer, tmp);
}
_BHH_ENCAP_PACK_U16(buffer, l2->etype);
return (buffer);
}
/*
* Function:
* _bcm_kt2_oam_bhh_encap_build_pack
* Purpose:
* Builds and packs the BHH packet encapsulation for a given
* BHH tunnel type.
* Parameters:
* unit - (IN) Unit number.
* port - (IN) Port.
* endpoint_config - (IN/OUT) Pointer to BHH endpoint structure.
* packet_flags - (IN) Flags for building packet.
* buffer - (OUT) Buffer returning BHH encapsulation.
* Returns:
* BCM_E_XXX
* Notes:
* The returning BHH encapsulation includes only all the
* encapsulation headers/labels and does not include
* the BHH control packet.
*/
STATIC int
_bcm_kt2_oam_bhh_encap_build_pack(int unit, bcm_port_t port,
_bcm_oam_hash_data_t *h_data_p,
uint32 packet_flags,
uint8 *buffer,
uint32 *encap_length)
{
uint8 *cur_ptr = buffer;
uint16 etype = 0;
bcm_if_t l3_intf_id = -1;
_ach_header_t ach;
_mpls_label_t mpls_gal;
_mpls_label_t mpls_labels[_BHH_MPLS_MAX_LABELS];
_mpls_label_t pw_label;
int mpls_count = 0;
_l2_header_t l2;
int i;
/*
* Get necessary headers/labels information.
*
* Following order is important since some headers/labels
* may depend on previous header/label information.
*/
if (packet_flags & _BHH_ENCAP_PKT_G_ACH) {
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_bhh_ach_header_get(packet_flags, &ach));
}
if (packet_flags & _BHH_ENCAP_PKT_GAL) {
if (h_data_p->egr_label == SHR_BHH_MPLS_GAL_LABEL) {
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_bhh_mpls_gal_label_get(&mpls_gal,
h_data_p->egr_label_exp));
} else {
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_bhh_mpls_gal_label_get(&mpls_gal, 0));
}
}
if (packet_flags & _BHH_ENCAP_PKT_MPLS) {
etype = SHR_BHH_L2_ETYPE_MPLS_UCAST;
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_bhh_mpls_labels_get(unit, h_data_p,
packet_flags,
_BHH_MPLS_MAX_LABELS,
&pw_label,
mpls_labels,
&mpls_count,
&l3_intf_id));
}
/* Always build L2 Header */
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_bhh_l2_header_get(unit,
h_data_p,
port,
etype,
&l2));
/*
* Pack header/labels into given buffer (network packet format).
*
* Following packing order must be followed to correctly
* build the packet encapsulation.
*/
cur_ptr = buffer;
/* L2 Header is always present */
cur_ptr = _bcm_kt2_oam_bhh_l2_header_pack(cur_ptr, &l2);
if (packet_flags & _BHH_ENCAP_PKT_MPLS) {
for (i = 0;i < mpls_count;i++) {
cur_ptr = _bcm_kt2_oam_bhh_mpls_label_pack(cur_ptr, &mpls_labels[i]);
}
}
if (packet_flags & _BHH_ENCAP_PKT_GAL) {
cur_ptr = _bcm_kt2_oam_bhh_mpls_label_pack(cur_ptr, &mpls_gal);
}
if (packet_flags & _BHH_ENCAP_PKT_G_ACH) {
cur_ptr = _bcm_kt2_oam_bhh_ach_header_pack(cur_ptr, &ach);
}
/* Set BHH encapsulation length */
*encap_length = cur_ptr - buffer;
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_bhh_encap_create
* Purpose:
* Creates a BHH packet encapsulation.
* Parameters:
* unit - (IN) Unit number.
* port_id - (IN) Port.
* endpoint_config - (IN/OUT) Pointer to BHH endpoint structure.
* encap_data - (OUT) Buffer returning BHH encapsulation.
* Returns:
* BCM_E_XXX
* Notes:
* The returning BHH encapsulation buffer includes all the
* corresponding headers/labels EXCEPT for the BHH control packet.
*/
STATIC int
_bcm_kt2_oam_bhh_encap_create(int unit, bcm_port_t port_id,
_bcm_oam_hash_data_t *h_data_p,
uint8 *encap_data,
uint8 *encap_type,
uint32 *encap_length)
{
uint32 packet_flags;
_bcm_oam_control_t *oc;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
packet_flags = 0;
/*
* Get BHH encapsulation packet format flags
*
* Also, perform the following for each BHH tunnel type:
* - Check for valid parameter values
* - Set specific values required by the BHH tunnel definition
* (e.g. such as ttl=1,...)
*/
switch (h_data_p->type) {
case bcmOAMEndpointTypeBHHMPLS:
packet_flags |=
(_BHH_ENCAP_PKT_MPLS |
_BHH_ENCAP_PKT_GAL |
_BHH_ENCAP_PKT_G_ACH);
break;
case bcmOAMEndpointTypeBHHMPLSVccv:
switch(h_data_p->vccv_type) {
case bcmOamBhhVccvChannelAch:
packet_flags |=
(_BHH_ENCAP_PKT_MPLS |
_BHH_ENCAP_PKT_PW |
_BHH_ENCAP_PKT_G_ACH);
break;
case bcmOamBhhVccvRouterAlert:
packet_flags |=
(_BHH_ENCAP_PKT_MPLS |
_BHH_ENCAP_PKT_PW |
_BHH_ENCAP_PKT_MPLS_ROUTER_ALERT |
_BHH_ENCAP_PKT_G_ACH);
break;
case bcmOamBhhVccvTtl:
packet_flags |=
(_BHH_ENCAP_PKT_MPLS |
_BHH_ENCAP_PKT_PW |
_BHH_ENCAP_PKT_G_ACH);
break;
case bcmOamBhhVccvGal13:
packet_flags |=
(_BHH_ENCAP_PKT_MPLS |
_BHH_ENCAP_PKT_PW |
_BHH_ENCAP_PKT_GAL |
_BHH_ENCAP_PKT_G_ACH);
break;
default:
return (BCM_E_PARAM);
break;
}
break;
case bcmOAMEndpointTypeBhhSection:
case bcmOAMEndpointTypeBhhSectionInnervlan:
case bcmOAMEndpointTypeBhhSectionOuterVlan:
case bcmOAMEndpointTypeBhhSectionOuterPlusInnerVlan:
packet_flags |=
(_BHH_ENCAP_PKT_GAL |
_BHH_ENCAP_PKT_G_ACH);
break;
default:
return (BCM_E_PARAM);
}
/* Build header/labels and pack in buffer */
BCM_IF_ERROR_RETURN
(_bcm_kt2_oam_bhh_encap_build_pack(unit, port_id,
h_data_p,
packet_flags,
encap_data,
encap_length));
/* Set encap type (indicates uC side that checksum is required) */
*encap_type = SHR_BHH_ENCAP_TYPE_RAW;
#ifdef _BHH_DEBUG_DUMP
_bcm_kt2_oam_bhh_encap_data_dump(encap_data, *encap_length);
#endif
if (*encap_length > oc->bhh_max_encap_length) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Encap length greater than max,"
"encap_length=%u max=%u\n"),
unit, *encap_length, oc->bhh_max_encap_length));
return BCM_E_CONFIG;
}
return (BCM_E_NONE);
}
/*
* Function:
* _bcm_kt2_oam_bhh_appl_callback
* Purpose:
* Update FW BHH appl state
* Parameters:
* unit - (IN) Unit number.
* uC - (IN) core number.
* stage - (IN) core reset stage.
* user_data - (IN) data pointer.
* Returns:
* BCM_E_XXX
* Notes:
*/
int _bcm_kt2_oam_bhh_appl_callback(int unit,
int uC,
soc_cmic_uc_shutdown_stage_t stage,
void *user_data) {
_bcm_oam_control_t *oc;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
oc->ukernel_not_ready = 1;
_BCM_OAM_UNLOCK(oc);
return BCM_E_NONE;
}
/*
* Function:
* bcm_kt2_oam_bhh_endpoint_create
* Purpose:
* Initialize the HW for BHH packet processing.
* Configure:
* - Copy to CPU BHH error packets
* - CPU COS Queue for BHH packets
* - RX DMA channel
* Parameters:
* unit - (IN) Unit number.
* Returns:
* BCM_E_XXX
* Notes:
*/
int
bcm_kt2_oam_bhh_endpoint_create(int unit,
bcm_oam_endpoint_info_t *endpoint_info,
_bcm_oam_hash_key_t *hash_key)
{
_bcm_oam_control_t *oc;
_bcm_oam_hash_data_t *hash_data = NULL; /* Endpoint hash data pointer. */
_bcm_oam_group_data_t *group_p; /* Pointer to group data. */
int ep_req_index; /* Requested endpoint index. */
int rv = BCM_E_NONE; /* Operation return status. */
int is_remote = 0; /* Remote endpoint status. */
int is_replace;
int is_local = 0;
uint32 sglp = 0; /* Source global logical port. */
uint32 dglp = 0; /* Dest global logical port. */
bcm_module_t module_id; /* Module ID */
bcm_port_t port_id;
bcm_trunk_t trunk_id;
int local_id;
uint32 svp = 0; /* Source virtual port */
bcm_port_t src_pp_port = 0; /* Source pp port. */
bcm_port_t dst_pp_port = 0; /* Dest pp port. */
bhh_sdk_msg_ctrl_sess_set_t msg_sess;
bhh_sdk_msg_ctrl_rmep_create_t msg_rmep_create;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
int encap = 0;
uint32 session_flags;
int bhh_pool_ep_idx = 0;
egr_l3_next_hop_entry_t egr_nh_entry;
int egr_nh_index;
int is_vp_valid = 0;
uint8 group_sw_rdi = 0;
bcm_gport_t tx_gport;
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Info: bcm_kt2_oam_bhh_endpoint_create"
"Endpoint ID=%d.\n"), endpoint_info->id));
_BCM_OAM_BHH_IS_VALID(unit);
/* Validate input parameter. */
if (NULL == endpoint_info) {
return (BCM_E_PARAM);
}
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Perform BHH specific validation checks */
/* BHH EP can only be at the MAX Level */
if ( endpoint_info->level != _BCM_OAM_EP_LEVEL_MAX ) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EP Level should be equal to %d\n"),
_BCM_OAM_EP_LEVEL_MAX));
return (BCM_E_PARAM);
}
/* Get MEP remote endpoint status. */
is_remote = (endpoint_info->flags & BCM_OAM_ENDPOINT_REMOTE) ? 1 : 0;
is_replace = ((endpoint_info->flags & BCM_OAM_ENDPOINT_REPLACE) != 0);
/* Validate EP Id if BCM_OAM_ENDPOINT_WITH_ID flag is set */
if (endpoint_info->flags & BCM_OAM_ENDPOINT_WITH_ID) {
if((endpoint_info->id < _BCM_OAM_BHH_KT2_ENDPOINT_OFFSET) ||
(endpoint_info->id >= (_BCM_OAM_BHH_KT2_ENDPOINT_OFFSET
+ oc->bhh_endpoint_count))) {
return (BCM_E_PARAM);
}
}
/* Validate remote EP Id if BCM_OAM_ENDPOINT_REMOTE flag is set */
if (is_remote) {
if((endpoint_info->local_id < _BCM_OAM_BHH_KT2_ENDPOINT_OFFSET) ||
(endpoint_info->local_id >= (_BCM_OAM_BHH_KT2_ENDPOINT_OFFSET
+ oc->bhh_endpoint_count))) {
return (BCM_E_PARAM);
}
}
if (is_replace) {
hash_data = &oc->oam_hash_data[endpoint_info->id];
if (!hash_data->in_use) {
return (BCM_E_NOT_FOUND);
}
/* Delete original and recreate new EP */
rv = _bcm_kt2_oam_endpoint_destroy(unit, endpoint_info->id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint destroy (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
return (rv);
}
}
/*
BHH API have been using L3_EGRESS type in
bcm_oam_endpoint_info_t.intf field.
However for supporting LM/DM on Katana2 this should be DVP_EGRESS type for PW,
only CCM will work with L3_EGRESS type, retaining the old type for
backward compatibility
*/
if(!BHH_EP_MPLS_SECTION_TYPE(endpoint_info)){
if ((endpoint_info->type == bcmOAMEndpointTypeBHHMPLSVccv) &&
(endpoint_info->flags & (BCM_OAM_ENDPOINT_LOSS_MEASUREMENT |
BCM_OAM_ENDPOINT_DELAY_MEASUREMENT))) {
if (!BCM_XGS3_DVP_EGRESS_IDX_VALID(unit, endpoint_info->intf_id)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: bcm_oam_endpoint_info_t.intf type"
" not valid. It should be DVP_EGRESS type for LM/DM support.\n")));
return (BCM_E_PARAM);
}
} else {
if ( !BCM_XGS3_L3_EGRESS_IDX_VALID(unit, endpoint_info->intf_id) &
!BCM_XGS3_DVP_EGRESS_IDX_VALID(unit, endpoint_info->intf_id) ) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: bcm_oam_endpoint_info_t.intf type"
" not valid. It should be DVP_EGRESS or L3_EGRESS type.\n")));
return (BCM_E_PARAM);
}
}
}
/* Now that we passed the validation checks
* Create a new endpoint with the requested ID. */
if ( !is_remote && (endpoint_info->flags & BCM_OAM_ENDPOINT_WITH_ID) ) {
hash_data = &oc->oam_hash_data[endpoint_info->id];
if (!is_replace && hash_data->in_use) {
return (BCM_E_EXISTS);
}
ep_req_index = endpoint_info->id;
bhh_pool_ep_idx = BCM_OAM_BHH_GET_UKERNEL_EP(ep_req_index);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error: Endpoint check (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
return (rv);
}
if(!is_replace) {
rv = shr_idxres_list_reserve(oc->bhh_pool, bhh_pool_ep_idx,
bhh_pool_ep_idx);
if (BCM_FAILURE(rv)) {
rv = (rv == BCM_E_RESOURCE) ? (BCM_E_EXISTS) : rv;
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error: Endpoint reserve (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
return (rv);
}
}
} else {
/* BHH uses local and remote same index */
if(endpoint_info->flags & BCM_OAM_ENDPOINT_REMOTE) {
ep_req_index = endpoint_info->local_id;
bhh_pool_ep_idx = BCM_OAM_BHH_GET_UKERNEL_EP(ep_req_index);
}
else {
/* Allocate the next available endpoint index. */
rv = shr_idxres_list_alloc(oc->bhh_pool,
(shr_idxres_element_t *)&bhh_pool_ep_idx);
if (BCM_FAILURE(rv)) {
rv = (rv == BCM_E_RESOURCE) ? (BCM_E_FULL) : rv;
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error: Endpoint alloc failed - %s.\n"),
bcm_errmsg(rv)));
return (rv);
}
/* Set the allocated endpoint id value. */
ep_req_index = BCM_OAM_BHH_GET_SDK_EP(bhh_pool_ep_idx);
}
endpoint_info->id = ep_req_index;
}
/* Get the hash data pointer where the data is to be stored. */
hash_data = &oc->oam_hash_data[ep_req_index];
group_p = &oc->group_info[endpoint_info->group];
/*
* The uKernel is not provisioned until both endpoints (local and remote)
* are provisioned in the host.
*/
if (is_remote) {
if (!hash_data->in_use) {
return (BCM_E_NOT_FOUND);
} else if (hash_data->flags & BCM_OAM_ENDPOINT_REMOTE) {
return (BCM_E_EXISTS);
}
if(!(endpoint_info->flags2 & BCM_OAM_ENDPOINT_FLAGS2_REDIRECT_TO_CPU)){
/*
* Now that both ends are provisioned the uKernel can be
* configured.
*/
msg_rmep_create.sess_id = bhh_pool_ep_idx;
msg_rmep_create.flags = 0;
msg_rmep_create.enable = 1;
msg_rmep_create.remote_mep_id = endpoint_info->name;
msg_rmep_create.period = _kt2_ccm_intervals[
_bcm_kt2_oam_ccm_msecs_to_hw_encode(endpoint_info->ccm_period)];
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = bhh_sdk_msg_ctrl_rmep_create_pack(buffer, &msg_rmep_create);
buffer_len = buffer_ptr - buffer;
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_RMEP_CREATE,
buffer_len, 0,
MOS_MSG_SUBCLASS_BHH_RMEP_CREATE_REPLY,
&reply_len);
if (BCM_FAILURE(rv) || (reply_len != 0)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH(unit %d) Error: Endpoint destroy (EP=%d) - %s.\n"),
unit, endpoint_info->id, bcm_errmsg(rv)));
return (BCM_E_INTERNAL);
}
}
} else {
/* Resolve given endpoint gport value to SGLP and DGLP values. */
rv = _bcm_kt2_oam_endpoint_gport_resolve(unit, endpoint_info, &sglp,
&dglp, &src_pp_port,
&dst_pp_port, &svp, &trunk_id,
&is_vp_valid, &tx_gport);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Gport resolve (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
/* Return endpoint index to MEP pool. */
shr_idxres_list_free(oc->bhh_pool, bhh_pool_ep_idx);
return (rv);
}
/* Clear the hash data element contents before storing the values. */
_BCM_OAM_HASH_DATA_CLEAR(hash_data);
hash_data->oam_domain = _BCM_OAM_DOMAIN_BHH;
hash_data->type = endpoint_info->type;
hash_data->ep_id = endpoint_info->id;
hash_data->group_index = endpoint_info->group;
hash_data->level = endpoint_info->level;
hash_data->vlan = endpoint_info->vlan;
hash_data->inner_vlan = endpoint_info->inner_vlan;
hash_data->outer_tpid = endpoint_info->outer_tpid;
hash_data->inner_tpid = endpoint_info->inner_tpid;
hash_data->vlan_pri = endpoint_info->pkt_pri;
hash_data->inner_vlan_pri = endpoint_info->inner_pkt_pri;
hash_data->gport = endpoint_info->gport;
hash_data->trunk_index = endpoint_info->trunk_index;
hash_data->sglp = sglp;
hash_data->dglp = dglp;
hash_data->flags = endpoint_info->flags;
hash_data->flags2 = endpoint_info->flags2;
hash_data->period = endpoint_info->ccm_period;
hash_data->vccv_type = endpoint_info->vccv_type;
hash_data->vpn = endpoint_info->vpn;
hash_data->name = endpoint_info->name;
hash_data->egress_if = endpoint_info->intf_id;
hash_data->cpu_qid = endpoint_info->cpu_qid;
hash_data->int_pri = endpoint_info->int_pri;
hash_data->label = endpoint_info->mpls_label;
hash_data->local_tx_enabled = 0;
hash_data->local_rx_enabled = 1;
hash_data->ts_format = endpoint_info->timestamp_format;
hash_data->egr_label = endpoint_info->egress_label.label;
hash_data->egr_label_exp = endpoint_info->egress_label.exp;
hash_data->egr_label_ttl = endpoint_info->egress_label.ttl;
/* Initialize hardware index as invalid indices. */
hash_data->local_tx_index = _BCM_OAM_INVALID_INDEX;
hash_data->local_rx_index = _BCM_OAM_INVALID_INDEX;
hash_data->remote_index = _BCM_OAM_INVALID_INDEX;
hash_data->dglp1_profile_index = _BCM_OAM_INVALID_INDEX;
hash_data->dglp2_profile_index = _BCM_OAM_INVALID_INDEX;
hash_data->rx_ctr = _BCM_OAM_INVALID_INDEX;
hash_data->tx_ctr = _BCM_OAM_INVALID_INDEX;
hash_data->profile_index = _BCM_OAM_INVALID_INDEX;
hash_data->pri_map_index = _BCM_OAM_INVALID_INDEX;
hash_data->egr_pri_map_index = _BCM_OAM_INVALID_INDEX;
hash_data->outer_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
hash_data->subport_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
hash_data->inner_tpid_profile_index = _BCM_OAM_INVALID_INDEX;
hash_data->ma_base_index = _BCM_OAM_INVALID_INDEX;
hash_data->pm_profile_attached = _BCM_OAM_INVALID_INDEX;
sal_memcpy(hash_data->dst_mac_address, endpoint_info->dst_mac_address,
_BHH_MAC_ADDR_LENGTH);
sal_memcpy(hash_data->src_mac_address, endpoint_info->src_mac_address,
_BHH_MAC_ADDR_LENGTH);
hash_data->in_use = 1;
if(!BHH_EP_MPLS_SECTION_TYPE(endpoint_info)) {
/* Now that we have reserved the EP index, We can reserve MA INDEX */
rv = _bcm_kt2_oam_downmep_rx_endpoint_reserve(unit, endpoint_info);
if (BCM_FAILURE(rv)) {
/* Return endpoint index to MEP pool. */
shr_idxres_list_free(oc->bhh_pool, bhh_pool_ep_idx);
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: EP MA INDEX alloc"
"failed EP:%d %s.\n"), endpoint_info->id,
bcm_errmsg(rv)));
return (rv);
}
rv = _bcm_kt2_oam_local_rx_mep_hw_set(unit, endpoint_info);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Rx config failed for EP=%d %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, 0, *hash_key, hash_data);
return (rv);
}
/* Enable BHH in the EGR_L3_NEXT_HOP table */
if (BCM_XGS3_DVP_EGRESS_IDX_VALID(unit, endpoint_info->intf_id)) {
egr_nh_index = endpoint_info->intf_id -
BCM_XGS3_DVP_EGRESS_IDX_MIN(unit);
}
else {
egr_nh_index = endpoint_info->intf_id -
BCM_XGS3_EGRESS_IDX_MIN(unit);
}
rv = soc_mem_read(unit, EGR_L3_NEXT_HOPm, MEM_BLOCK_ANY,
egr_nh_index, &egr_nh_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: reading EGR_L3_NEXT_HOP,"
"for EP=%d %s.\n"), endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, 0, *hash_key, hash_data);
return (rv);
}
soc_mem_field32_set(unit, EGR_L3_NEXT_HOPm, &egr_nh_entry, MPLS__BHH_ENABLEf, 1);
rv = soc_mem_write(unit, EGR_L3_NEXT_HOPm, MEM_BLOCK_ALL,
egr_nh_index, &egr_nh_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: writing EGR_L3_NEXT_HOP,"
"for EP=%d %s.\n"), endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, 0, *hash_key, hash_data);
return (rv);
}
} else {
if(hash_data->flags & BCM_OAM_ENDPOINT_LOSS_MEASUREMENT) {
shr_idxres_element_t ctr_index;
rv = _bcm_kt2_oam_bhh_sec_mep_alloc_counter(unit, &ctr_index);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: LM counter block alloc - %s.\n"),
bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, 0, *hash_key, hash_data);
return (rv);
}
hash_data->rx_ctr = (0 << 24) | (ctr_index); /* Pool id 0 for ingress counters*/
hash_data->tx_ctr = (1 << 24) | (ctr_index); /* Pool id 1 for egress counters */
}
}
/* Get the Trunk, Port and Modid info for this Gport */
rv = _bcm_esw_gport_resolve(unit, tx_gport, &module_id, &port_id,
&trunk_id, &local_id);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error: Gport resolve (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, 0, *hash_key, hash_data);
return (rv);
}
/* Get local port used for TX BHH packet */
rv = _bcm_esw_modid_is_local(unit, module_id, &is_local);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error: local port for "
"BHH TX failed(EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, 0, *hash_key, hash_data);
return (rv);
}
if (!is_local) { /* HG port */
rv = bcm_esw_stk_modport_get(unit, module_id, &port_id);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error: HG port get failed "
"(EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, 0, *hash_key, hash_data);
return (rv);
}
}
/* Create or Update */
session_flags = (is_replace) ? 0 : SHR_BHH_SESS_SET_F_CREATE;
/* Set Endpoint config entry */
hash_data->bhh_endpoint_index = ep_req_index;
/* Set Encapsulation in HW */
if ((!BHH_EP_MPLS_SECTION_TYPE(endpoint_info)) &&
((!is_replace) || (endpoint_info->flags & BCM_BHH_ENDPOINT_ENCAP_SET))) {
rv = _bcm_kt2_oam_bhh_encap_hw_set(unit, hash_data, module_id,
port_id, is_local, endpoint_info);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error: HW encap set failed (EP=%d)"
" - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, 0, *hash_key, hash_data);
return (rv);
}
}
encap = 1;
if(!(endpoint_info->flags2 & BCM_OAM_ENDPOINT_FLAGS2_REDIRECT_TO_CPU)) {
/* Set control message data */
sal_memset(&msg_sess, 0, sizeof(msg_sess));
/*
* Set the BHH encapsulation data
*
* The function _bcm_kt2_oam_bhh_encap_create() is called first
* since this sets some fields in 'hash_data' which are
* used in the message.
*/
if (encap) {
rv = _bcm_kt2_oam_bhh_encap_create(unit,
port_id,
hash_data,
msg_sess.encap_data,
&msg_sess.encap_type,
&msg_sess.encap_length);
if(BCM_FAILURE(rv))
{
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error: Endpoint destroy (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, 0, *hash_key, hash_data);
return (rv);
}
}
/*
* Endpoint can be one of: CCM, LB, LM or DM. The default is CCM,
* all others modes must be specified via a config flag.
*/
if (endpoint_info->flags & BCM_OAM_ENDPOINT_LOOPBACK)
session_flags |= SHR_BHH_SESS_SET_F_LB;
else if (endpoint_info->flags & BCM_OAM_ENDPOINT_DELAY_MEASUREMENT)
session_flags |= SHR_BHH_SESS_SET_F_DM;
else if (endpoint_info->flags & BCM_OAM_ENDPOINT_LOSS_MEASUREMENT)
session_flags |= SHR_BHH_SESS_SET_F_LM;
else
session_flags |= SHR_BHH_SESS_SET_F_CCM;
if (endpoint_info->flags & BCM_OAM_ENDPOINT_INTERMEDIATE)
session_flags |= SHR_BHH_SESS_SET_F_MIP;
_bcm_kt2_oam_get_group_sw_rdi (unit, endpoint_info->group,
&group_sw_rdi);
if (group_sw_rdi) {
session_flags |= SHR_BHH_SESS_SET_F_RDI;
}
/*if (!local_tx_enabled)
session_flags |= SHR_BHH_SESS_SET_F_PASSIVE;*/
msg_sess.sess_id = bhh_pool_ep_idx;
msg_sess.flags = session_flags;
msg_sess.mel = hash_data->level;
msg_sess.mep_id = hash_data->name;
sal_memcpy(msg_sess.meg_id, group_p->name, BCM_OAM_GROUP_NAME_LENGTH);
msg_sess.period = _kt2_ccm_intervals[
_bcm_kt2_oam_ccm_msecs_to_hw_encode(endpoint_info->ccm_period)];
msg_sess.if_num = endpoint_info->intf_id;
msg_sess.tx_port = port_id;
msg_sess.tx_cos = endpoint_info->int_pri;
msg_sess.tx_pri = endpoint_info->pkt_pri;
msg_sess.tx_qnum = SOC_INFO(unit).port_uc_cosq_base[port_id] + endpoint_info->int_pri;
msg_sess.lm_counter_index
= hash_data->tx_ctr;
msg_sess.mpls_label = endpoint_info->mpls_label;
switch (endpoint_info->type) {
case bcmOAMEndpointTypeBHHMPLS:
msg_sess.endpoint_type = _SHR_BHH_EP_TYPE_LSP;
msg_sess.priority = endpoint_info->mpls_exp;
break;
case bcmOAMEndpointTypeBHHMPLSVccv:
switch(endpoint_info->vccv_type) {
case bcmOamBhhVccvChannelAch:
msg_sess.endpoint_type = _SHR_BHH_EP_TYPE_PW_VCCV_1;
msg_sess.priority = endpoint_info->mpls_exp;
break;
case bcmOamBhhVccvRouterAlert:
msg_sess.endpoint_type = _SHR_BHH_EP_TYPE_PW_VCCV_2;
msg_sess.priority = endpoint_info->mpls_exp;
break;
case bcmOamBhhVccvTtl:
msg_sess.endpoint_type = _SHR_BHH_EP_TYPE_PW_VCCV_3;
msg_sess.priority = endpoint_info->mpls_exp;
break;
case bcmOamBhhVccvGal13:
msg_sess.endpoint_type = _SHR_BHH_EP_TYPE_PW_VCCV_4;
msg_sess.priority = endpoint_info->mpls_exp;
break;
default:
return BCM_E_PARAM;
break;
}
break;
case bcmOAMEndpointTypeBhhSection:
msg_sess.endpoint_type = _SHR_BHH_EP_TYPE_PORT_SECTION;
msg_sess.priority = 0;
break;
case bcmOAMEndpointTypeBhhSectionInnervlan:
msg_sess.endpoint_type = _SHR_BHH_EP_TYPE_PORT_SECTION_INNERVLAN;
msg_sess.priority = endpoint_info->inner_pkt_pri;
break;
case bcmOAMEndpointTypeBhhSectionOuterVlan:
msg_sess.endpoint_type = _SHR_BHH_EP_TYPE_PORT_SECTION_OUTERVLAN;
msg_sess.priority = endpoint_info->pkt_pri;
break;
case bcmOAMEndpointTypeBhhSectionOuterPlusInnerVlan:
msg_sess.endpoint_type = _SHR_BHH_EP_TYPE_PORT_SECTION_OUTER_PLUS_INNERVLAN;
msg_sess.priority = endpoint_info->pkt_pri;
break;
default:
return BCM_E_PARAM;
}
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = bhh_sdk_msg_ctrl_sess_set_pack(buffer, &msg_sess);
buffer_len = buffer_ptr - buffer;
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_SESS_SET,
buffer_len, 0,
MOS_MSG_SUBCLASS_BHH_SESS_SET_REPLY,
&reply_len);
if (BCM_FAILURE(rv) || (reply_len != 0)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error: Endpoint destroy (EP=%d) - %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, 0, *hash_key, hash_data);
return (rv);
}
}
if (!is_replace) {
rv = _bcm_kt2_oam_group_ep_list_add(unit, endpoint_info->group,
endpoint_info->id);
}
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Tx config failed for EP=%d %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, 0, *hash_key, hash_data);
return (rv);
}
rv = _bcm_kt2_oam_ma_idx_to_ep_id_mapping_add(unit, hash_data);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Add mapping from ma_idx_to_ep_id list (EP=%d) -"
" %s.\n"), unit, hash_data->ep_id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, 0, *hash_key, hash_data);
return (rv);
}
/* hash collision */
rv = shr_htb_insert(oc->ma_mep_htbl, hash_key, hash_data);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Hash table insert failed EP=%d %s.\n"),
endpoint_info->id, bcm_errmsg(rv)));
_bcm_kt2_oam_endpoint_cleanup(unit, 0, *hash_key, hash_data);
return (rv);
}
}
hash_data->in_use = 1;
#ifdef BCM_WARM_BOOT_SUPPORT
SOC_CONTROL_LOCK(unit);
SOC_CONTROL(unit)->scache_dirty = 1;
SOC_CONTROL_UNLOCK(unit);
#endif
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_bhh_hw_init
* Purpose:
* Initialize the HW for BHH packet processing.
* Configure:
* - Copy to CPU BHH error packets
* - CPU COS Queue for BHH packets
* - RX DMA channel
* Parameters:
* unit - (IN) Unit number.
* Returns:
* BCM_E_XXX
* Notes:
*/
STATIC int
_bcm_kt2_oam_bhh_hw_init(int unit)
{
int rv = BCM_E_NONE;
_bcm_oam_control_t *oc; /* OAM control structure. */
uint32 val=0;
int index;
int ach_error_index;
int invalid_error_index;
int bhh_lb_index;
int cosq_map_size;
bcm_rx_reasons_t reasons, reasons_mask;
uint8 int_prio, int_prio_mask;
uint32 packet_type, packet_type_mask;
bcm_cos_queue_t cosq;
bcm_rx_chan_t chan_id;
int num_cosq = 0;
int min_cosq, max_cosq;
int i;
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
/*
* Send BHH lookup failure packet to CPU
* Configure CPU_CONTROL_M register
* Field BHH_SESSION_NOT_FOUND_TO_CPU
*/
rv = READ_CPU_CONTROL_Mr(unit, &val);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error:hw init. Read CPU Control Reg %s.\n"),
bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/* Steer lookup failure packet to CPU */
soc_reg_field_set(unit, CPU_CONTROL_Mr, &val,
BHH_SESSION_NOT_FOUND_TO_CPUf, 1);
rv = WRITE_CPU_CONTROL_Mr(unit, val);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error:hw init. Write CPU_CONTROL_M Reg %s.\n"),
bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/* Set BHH ACH Type in BHH_RX_ACH_TYPE register */
rv = WRITE_BHH_RX_ACH_TYPEr(unit, SHR_BHH_ACH_CHANNEL_TYPE);
if(oc->ukernel_not_ready == 0){
/*
* Get COSQ for BHH
*/
min_cosq = 0;
for (i = 0; i < SOC_CMCS_NUM(unit); i++) {
num_cosq = NUM_CPU_ARM_COSQ(unit, i);
if (i == oc->uc_num + 1) {
break;
}
min_cosq += num_cosq;
}
max_cosq = min_cosq + num_cosq - 1;
if(max_cosq < min_cosq) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error: "
"No BHH COS Queue available from uC%d - %s\n"),
oc->uc_num, bcm_errmsg(BCM_E_CONFIG)));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_CONFIG);
}
/* check user configured COSq */
if (oc->cpu_cosq != BHH_COSQ_INVALID) {
if ((oc->cpu_cosq < min_cosq) ||
(oc->cpu_cosq > max_cosq)) {
_BCM_OAM_UNLOCK(oc);
return (BCM_E_CONFIG);
}
min_cosq = max_cosq = oc->cpu_cosq;
}
/*
* Assign RX DMA channel to CPU COS Queue
* (This is the RX channel to listen on for BHH packets).
*
* DMA channels (12) are assigned 4 per processor:
* (see /src/bcm/common/rx.c)
* channels 0..3 --> PCI host
* channels 4..7 --> uController 0
* chnanels 8..11 --> uController 1
*
* The uControllers designate the 4 local DMA channels as follows:
* local channel 0 --> TX
* local channel 1..3 --> RX
*
* Each uController application needs to use a different
* RX DMA channel to listen on.
*/
chan_id = (BCM_RX_CHANNELS * (SOC_ARM_CMC(unit, oc->uc_num))) +
oc->rx_channel;
for (i = max_cosq; i >= min_cosq; i--) {
rv = _bcm_common_rx_queue_channel_set(unit, i, chan_id);
if(BCM_SUCCESS(rv)) {
oc->cpu_cosq = i;
break;
}
}
if (i < min_cosq) {
rv = BCM_E_RESOURCE;
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error: hw init."
" queue channel set %s.\n"),
bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/*
* Direct BHH packets to designated CPU COS Queue...or more accurately
* BFD error packets.
*
* Reasons:
* - bcmRxReasonBFD: BFD error
* - bcmRxReasonBfdUnknownVersion: BFD Unknown ACH
* NOTE:
* The user input 'cpu_qid' (bcm_BHH_endpoint_t) could be
* used to select different CPU COS queue. Currently,
* all priorities are mapped into the same CPU COS Queue.
*/
/* Find available entries in CPU COS queue map table */
ach_error_index = -1; /* COSQ map index for error packets */
invalid_error_index = -1; /* COSQ map index for error packets */
bhh_lb_index = -1; /* COSQ map index for LB packets */
rv = bcm_esw_rx_cosq_mapping_size_get(unit, &cosq_map_size);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error:hw init. cosq maps size %s.\n"),
bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
for (index = cosq_map_size-1; index >= 0; index--) {
rv = bcm_esw_rx_cosq_mapping_get(unit, index,
&reasons, &reasons_mask,
&int_prio, &int_prio_mask,
&packet_type, &packet_type_mask,
&cosq);
if (rv == BCM_E_NOT_FOUND) {
/* Assign first available index before default entries*/
rv = BCM_E_NONE;
if (ach_error_index == -1) {
ach_error_index = index;
} else if (invalid_error_index == -1) {
invalid_error_index = index;
} else if (bhh_lb_index == -1) {
bhh_lb_index = index;
break;
}
}
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error:hw init. cosq maps get %s.\n"),
bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
}
if (ach_error_index == -1 || invalid_error_index == -1) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error:hw init. ACH error %s.\n"),
bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_FULL);
}
/* Set CPU COS Queue mapping */
BCM_RX_REASON_CLEAR_ALL(reasons);
BCM_RX_REASON_SET(reasons, bcmRxReasonBHHOAM); /* BHH Packet */
BCM_RX_REASON_SET(reasons, bcmRxReasonOAMCCMSlowpath); /*OAM Slowpath CCM*/
rv = bcm_esw_rx_cosq_mapping_set(unit, ach_error_index,
reasons, reasons,
0, 0, /* Any Internal Prio */
0, 0, /* Any packet type */
oc->cpu_cosq);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error:hw init. cosq map set %s.\n"),
bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
oc->cpu_cosq_ach_error_index = ach_error_index;
BCM_RX_REASON_CLEAR_ALL(reasons);
BCM_RX_REASON_SET(reasons, bcmRxReasonBHHOAM); /* BHH Packet */
BCM_RX_REASON_SET(reasons, bcmRxReasonOAMLMDM); /*OAM Slowpath(LB/LBR)*/
rv = bcm_esw_rx_cosq_mapping_set(unit, invalid_error_index,
reasons, reasons,
0, 0, /* Any Internal Prio */
0, 0, /* Any packet type */
oc->cpu_cosq);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH Error:hw init. cosq map set %s.\n"),
bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
oc->cpu_cosq_invalid_error_index = invalid_error_index;
BCM_RX_REASON_CLEAR_ALL(reasons);
BCM_RX_REASON_SET(reasons, bcmRxReasonBHHOAM); /* BHH Packet */
BCM_RX_REASON_SET(reasons, bcmRxReasonOAMSlowpath); /*OAM Slowpath */
rv = bcm_esw_rx_cosq_mapping_set(unit, bhh_lb_index,
reasons, reasons,
0, 0, /* Any Internal Prio */
0, 0, /* Any packet type */
oc->cpu_cosq);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"BHH(unit %d) Error:hw init. cosq map set for"
" Loopback %s.\n"),
unit, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
oc->bhh_lb_index = bhh_lb_index;
}
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_bhh_session_hw_delete
* Purpose:
* Delete BHH Session in HW device.
* Parameters:
* unit - (IN) Unit number.
* h_data_p- (IN) Pointer to BHH endpoint structure.
* Returns:
* BCM_E_XXX
* Notes:
*/
STATIC int
_bcm_kt2_oam_bhh_session_hw_delete(int unit, _bcm_oam_hash_data_t *h_data_p)
{
int rv = BCM_E_NONE;
#if defined(BCM_KATANA2_SUPPORT) && defined(BCM_MPLS_SUPPORT)
mpls_entry_entry_t mpls_entry;
mpls_entry_entry_t mpls_key;
int mpls_index = 0;
#endif /* BCM_KATANA2_SUPPORT && BCM_MPLS_SUPPORT */
switch(h_data_p->type) {
case bcmOAMEndpointTypeBHHMPLS:
case bcmOAMEndpointTypeBHHMPLSVccv:
#if defined(BCM_KATANA2_SUPPORT) && defined(BCM_MPLS_SUPPORT)
SOC_IF_ERROR_RETURN(bcm_tr_mpls_lock (unit));
sal_memset(&mpls_key, 0, sizeof(mpls_key));
soc_MPLS_ENTRYm_field32_set(unit, &mpls_key,
MPLS__MPLS_LABELf, h_data_p->label);
rv = soc_mem_search(unit, MPLS_ENTRYm, MEM_BLOCK_ANY, &mpls_index,
&mpls_key, &mpls_entry, 0);
/* It can so happen that this function is called when the switch is
re-initailizing, then MPLS ENTRY will not be found, as MPLS init
happens before OAM init and it would have cleared the MPLS Entry, as
such its not an error scenario. Hence dont return failure from here.*/
if (BCM_SUCCESS(rv)) {
if ((soc_MPLS_ENTRYm_field32_get(unit, &mpls_entry, VALIDf) == 0x1))
{
soc_MPLS_ENTRYm_field32_set(unit, &mpls_entry,
MPLS__BFD_ENABLEf, 0);
rv = soc_mem_write(unit, MPLS_ENTRYm, MEM_BLOCK_ANY,
mpls_index, &mpls_entry);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: MPLS_ENTRY write failed - "
"%s.\n"), bcm_errmsg(rv)));
bcm_tr_mpls_unlock(unit);
return (rv);
}
}
}
bcm_tr_mpls_unlock (unit);
#endif /* BCM_KATANA2_SUPPORT && BCM_MPLS_SUPPORT */
break;
default:
break;
}
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_bhh_msg_send_receive
* Purpose:
* Sends given BHH control message to the uController.
* Receives and verifies expected reply.
* Performs DMA operation if required.
* Parameters:
* unit - (IN) Unit number.
* s_subclass - (IN) BHH message subclass.
* s_len - (IN) Value for 'len' field in message struct.
* Length of buffer to flush if DMA send is required.
* s_data - (IN) Value for 'data' field in message struct.
* Ignored if message requires a DMA send/receive
* operation.
* r_subclass - (IN) Expected reply message subclass.
* r_len - (OUT) Returns value in 'len' reply message field.
* Returns:
* BCM_E_XXX
* Notes:
* - The uc_msg 'len' and 'data' fields of mos_msg_data_t
* can take any arbitrary data.
*
* BHH Long Control message:
* - BHH control messages that require send/receive of information
* that cannot fit in the uc_msg 'len' and 'data' fields need to
* use DMA operations to exchange information (long control message).
*
* - BHH convention for long control messages for
* 'mos_msg_data_t' fields:
* 'len' size of the DMA buffer to send to uController
* 'data' physical DMA memory address to send or receive
*
* DMA Operations:
* - DMA read/write operation is performed when a long BHH control
* message is involved.
*
* - Messages that require DMA operation (long control message)
* is indicated by MOS_MSG_DMA_MSG().
*
* - Callers must 'pack' and 'unpack' corresponding information
* into/from DMA buffer indicated by BHH_INFO(unit)->dma_buffer.
*/
STATIC int
_bcm_kt2_oam_bhh_msg_send_receive(int unit, uint8 s_subclass,
uint16 s_len, uint32 s_data,
uint8 r_subclass, uint16 *r_len)
{
int rv;
_bcm_oam_control_t *oc;
mos_msg_data_t send, reply;
uint8 *dma_buffer;
int dma_buffer_len;
uint32 uc_rv;
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
sal_memset(&send, 0, sizeof(send));
sal_memset(&reply, 0, sizeof(reply));
send.s.mclass = MOS_MSG_CLASS_BHH;
send.s.subclass = s_subclass;
send.s.len = bcm_htons(s_len);
/*
* Set 'data' to DMA buffer address if a DMA operation is
* required for send or receive.
*/
dma_buffer = oc->dma_buffer;
dma_buffer_len = oc->dma_buffer_len;
if (MOS_MSG_DMA_MSG(s_subclass) ||
MOS_MSG_DMA_MSG(r_subclass)) {
send.s.data = bcm_htonl(soc_cm_l2p(unit, dma_buffer));
} else {
send.s.data = bcm_htonl(s_data);
}
/* Flush DMA memory */
if (MOS_MSG_DMA_MSG(s_subclass)) {
soc_cm_sflush(unit, dma_buffer, s_len);
}
/* Invalidate DMA memory to read */
if (MOS_MSG_DMA_MSG(r_subclass)) {
soc_cm_sinval(unit, dma_buffer, dma_buffer_len);
}
rv = soc_cmic_uc_msg_send_receive(unit, oc->uc_num,
&send, &reply,
_BHH_UC_MSG_TIMEOUT_USECS);
/* Check reply class, subclass */
if ((rv != SOC_E_NONE) ||
(reply.s.mclass != MOS_MSG_CLASS_BHH) ||
(reply.s.subclass != r_subclass)) {
return (BCM_E_INTERNAL);
}
/* Convert BHH uController error code to BCM */
uc_rv = bcm_ntohl(reply.s.data);
switch(uc_rv) {
case SHR_BHH_UC_E_NONE:
rv = BCM_E_NONE;
break;
case SHR_BHH_UC_E_INTERNAL:
rv = BCM_E_INTERNAL;
break;
case SHR_BHH_UC_E_MEMORY:
rv = BCM_E_MEMORY;
break;
case SHR_BHH_UC_E_PARAM:
rv = BCM_E_PARAM;
break;
case SHR_BHH_UC_E_RESOURCE:
rv = BCM_E_RESOURCE;
break;
case SHR_BHH_UC_E_EXISTS:
rv = BCM_E_EXISTS;
break;
case SHR_BHH_UC_E_NOT_FOUND:
rv = BCM_E_NOT_FOUND;
break;
case SHR_BHH_UC_E_INIT:
rv = BCM_E_INIT;
break;
default:
rv = BCM_E_INTERNAL;
break;
}
*r_len = bcm_ntohs(reply.s.len);
return (rv);
}
/*
* Function:
* _bcm_kt2_oam_bhh_callback_thread
* Purpose:
* Thread to listen for event messages from uController.
* Parameters:
* param - Pointer to BFD info structure.
* Returns:
* None
*/
STATIC void
_bcm_kt2_oam_bhh_callback_thread(void *param)
{
int rv;
_bcm_oam_control_t *oc = (_bcm_oam_control_t *)param;
bcm_oam_event_types_t events;
bcm_oam_event_type_t event_type;
bhh_msg_event_t event_msg;
int sess_id;
uint32 event_mask, flags = 0;
_bcm_oam_event_handler_t *event_handler_p;
_bcm_oam_hash_data_t *h_data_p;
int ep_id = 0;
char thread_name[SAL_THREAD_NAME_MAX_LEN];
thread_name[0] = 0;
sal_thread_name(oc->event_thread_id, thread_name, sizeof (thread_name));
while (1) {
/* Wait on notifications from uController */
rv = soc_cmic_uc_msg_receive(oc->unit, oc->uc_num,
MOS_MSG_CLASS_BHH_EVENT, &event_msg,
sal_sem_FOREVER);
if (BCM_FAILURE(rv)) {
break; /* Thread exit */
}
event_handler_p = oc->event_handler_list_p;
/* Get data from event message */
sess_id = (int)bcm_ntohs(event_msg.s.len);
ep_id = BCM_OAM_BHH_GET_SDK_EP(sess_id);
if (sess_id < 0 ||
sess_id >= oc->ep_count) {
LOG_CLI((BSL_META_U(oc->unit,
"Invalid sess_id:%d \n"), sess_id));
continue;
}
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->bhh_pool, sess_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(oc->unit,
"OAM Error: Endpoint EP=%d %s.\n"),
ep_id, bcm_errmsg(rv)));
}
h_data_p = &oc->oam_hash_data[ep_id];
event_mask = bcm_ntohl(event_msg.s.data);
/* Set events */
sal_memset(&events, 0, sizeof(events));
if (event_mask & BHH_BTE_EVENT_LB_TIMEOUT) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(oc->unit,
"****** OAM BHH LB Timeout ******\n")));
if (oc->event_handler_cnt[bcmOAMEventBHHLBTimeout] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHLBTimeout);
}
if (event_mask & BHH_BTE_EVENT_LB_DISCOVERY_UPDATE) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(oc->unit,
"****** OAM BHH LB Discovery Update ******\n")));
if (oc->event_handler_cnt[bcmOAMEventBHHLBDiscoveryUpdate] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHLBDiscoveryUpdate);
}
if (event_mask & BHH_BTE_EVENT_CCM_TIMEOUT) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(oc->unit,
"****** OAM BHH CCM Timeout ******\n")));
if (oc->event_handler_cnt[bcmOAMEventBHHCCMTimeout] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMTimeout);
}
if (event_mask & BHH_BTE_EVENT_CCM_TIMEOUT_CLEAR) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(oc->unit,
"****** OAM BHH CCM Timeout Clear ******\n")));
if (oc->event_handler_cnt[bcmOAMEventBHHCCMTimeoutClear] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMTimeoutClear);
}
if (event_mask & BHH_BTE_EVENT_STATE) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(oc->unit,
"****** OAM BHH State ******\n")));
if (oc->event_handler_cnt[bcmOAMEventBHHCCMState] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMState);
}
if (event_mask & BHH_BTE_EVENT_CCM_RDI) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(oc->unit,
"****** OAM BHH CCM RDI ******\n")));
if (oc->event_handler_cnt[bcmOAMEventBHHCCMRdi] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMRdi);
}
if (event_mask & BHH_BTE_EVENT_CCM_UNKNOWN_MEG_LEVEL) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(oc->unit,
"****** OAM BHH CCM Unknown MEG LEVEL ******\n")));
if (oc->event_handler_cnt[bcmOAMEventBHHCCMUnknownMegLevel] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMUnknownMegLevel);
}
if (event_mask & BHH_BTE_EVENT_CCM_UNKNOWN_MEG_ID) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(oc->unit,
"****** OAM BHH CCM Unknown MEG ID ******\n")));
if (oc->event_handler_cnt[bcmOAMEventBHHCCMUnknownMegId] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMUnknownMegId);
}
if (event_mask & BHH_BTE_EVENT_CCM_UNKNOWN_MEP_ID) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(oc->unit,
"****** OAM BHH CCM Unknown MEP ID ******\n")));
if (oc->event_handler_cnt[bcmOAMEventBHHCCMUnknownMepId] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMUnknownMepId);
}
if (event_mask & BHH_BTE_EVENT_CCM_UNKNOWN_PERIOD) {
if (oc->event_handler_cnt[bcmOAMEventBHHCCMUnknownPeriod] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMUnknownPeriod);
}
if (event_mask & BHH_BTE_EVENT_CCM_UNKNOWN_PRIORITY) {
if (oc->event_handler_cnt[bcmOAMEventBHHCCMUnknownPriority] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMUnknownPriority);
}
if (event_mask & BHH_BTE_EVENT_CCM_RDI_CLEAR) {
if (oc->event_handler_cnt[bcmOAMEventBHHCCMRdiClear] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMRdiClear);
}
if (event_mask & BHH_BTE_EVENT_CCM_UNKNOWN_MEG_LEVEL_CLEAR) {
if (oc->event_handler_cnt[bcmOAMEventBHHCCMUnknownMegLevelClear] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMUnknownMegLevelClear);
}
if (event_mask & BHH_BTE_EVENT_CCM_UNKNOWN_MEG_ID_CLEAR) {
if (oc->event_handler_cnt[bcmOAMEventBHHCCMUnknownMegIdClear] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMUnknownMegIdClear);
}
if (event_mask & BHH_BTE_EVENT_CCM_UNKNOWN_MEP_ID_CLEAR) {
if (oc->event_handler_cnt[bcmOAMEventBHHCCMUnknownMepIdClear] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMUnknownMepIdClear);
}
if (event_mask & BHH_BTE_EVENT_CCM_UNKNOWN_PERIOD_CLEAR) {
if (oc->event_handler_cnt[bcmOAMEventBHHCCMUnknownPeriodClear] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMUnknownPeriodClear);
}
if (event_mask & BHH_BTE_EVENT_CCM_UNKNOWN_PRIORITY_CLEAR) {
if (oc->event_handler_cnt[bcmOAMEventBHHCCMUnknownPriorityClear] > 0)
SHR_BITSET(events.w, bcmOAMEventBHHCCMUnknownPriorityClear);
}
if (event_mask & BHH_BTE_EVENT_CSF_LOS) {
if (oc->event_handler_cnt[bcmOAMEventCsfLos] > 0) {
SHR_BITSET(events.w, bcmOAMEventCsfLos);
}
}
if (event_mask & BHH_BTE_EVENT_CSF_FDI) {
if (oc->event_handler_cnt[bcmOAMEventCsfFdi] > 0) {
SHR_BITSET(events.w, bcmOAMEventCsfFdi);
}
}
if (event_mask & BHH_BTE_EVENT_CSF_RDI) {
if (oc->event_handler_cnt[bcmOAMEventCsfRdi] > 0) {
SHR_BITSET(events.w, bcmOAMEventCsfRdi);
}
}
if (event_mask & BHH_BTE_EVENT_CSF_DCI) {
if (oc->event_handler_cnt[bcmOAMEventCsfDci] > 0) {
SHR_BITSET(events.w, bcmOAMEventCsfDci);
}
}
if (event_mask & BHH_BTE_EVENT_PM_STATS_COUNTER_ROLLOVER) {
LOG_DEBUG(BSL_LS_BCM_OAM,
(BSL_META_U(oc->unit,
"****** OAM PM BHH Counter Rollover ******\n")));
if (oc->event_handler_cnt[bcmOAMEventBhhPmCounterRollover] > 0) {
SHR_BITSET(events.w, bcmOAMEventBhhPmCounterRollover);
}
}
/* Loop over registered callbacks,
* If any match the events field, then invoke
*/
for (event_handler_p = oc->event_handler_list_p;
event_handler_p != NULL;
event_handler_p = event_handler_p->next_p) {
for (event_type = bcmOAMEventBHHLBTimeout; event_type < bcmOAMEventCount; ++event_type) {
if (SHR_BITGET(events.w, event_type)) {
if (SHR_BITGET(event_handler_p->event_types.w,
event_type)) {
event_handler_p->cb(oc->unit,
flags,
event_type,
h_data_p->group_index, /* Group index */
ep_id, /* Endpoint index */
event_handler_p->user_data);
}
}
}
}
}
oc->event_thread_id = NULL;
LOG_VERBOSE(BSL_LS_BCM_OAM,
(BSL_META_U(oc->unit,
"Thread Exit:%s\n"), thread_name));
sal_thread_exit(0);
}
/* BHH Event Handler */
typedef struct _event_handler_s {
struct _event_handler_s *next;
bcm_oam_event_types_t event_types;
bcm_oam_event_cb cb;
void *user_data;
} _event_handler_t;
/*
* Function:
* _bcm_kt2_oam_bhh_event_mask_set
* Purpose:
* Set the BHH Events mask.
* Events are set per BHH module.
* Parameters:
* unit - (IN) Unit number.
* Returns:
* BCM_E_NONE Operation completed successfully
* BCM_E_XXX Operation failed
* Notes:
*/
STATIC int
_bcm_kt2_oam_bhh_event_mask_set(int unit)
{
_bcm_oam_control_t *oc;
_bcm_oam_event_handler_t *event_handler_p;
uint32 event_mask = 0;
uint16 reply_len;
int rv = BCM_E_NONE;
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
/* Get event mask from all callbacks */
for (event_handler_p = oc->event_handler_list_p;
event_handler_p != NULL;
event_handler_p = event_handler_p->next_p) {
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHLBTimeout)) {
event_mask |= BHH_BTE_EVENT_LB_TIMEOUT;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHLBDiscoveryUpdate)) {
event_mask |= BHH_BTE_EVENT_LB_DISCOVERY_UPDATE;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMTimeout)) {
event_mask |= BHH_BTE_EVENT_CCM_TIMEOUT;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMTimeoutClear)) {
event_mask |= BHH_BTE_EVENT_CCM_TIMEOUT_CLEAR;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMState)) {
event_mask |= BHH_BTE_EVENT_STATE;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMRdi)) {
event_mask |= BHH_BTE_EVENT_CCM_RDI;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMUnknownMegLevel)) {
event_mask |= BHH_BTE_EVENT_CCM_UNKNOWN_MEG_LEVEL;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMUnknownMegId)) {
event_mask |= BHH_BTE_EVENT_CCM_UNKNOWN_MEG_ID;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMUnknownMepId)) {
event_mask |= BHH_BTE_EVENT_CCM_UNKNOWN_MEP_ID;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMUnknownPeriod)) {
event_mask |= BHH_BTE_EVENT_CCM_UNKNOWN_PERIOD;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMUnknownPriority)) {
event_mask |= BHH_BTE_EVENT_CCM_UNKNOWN_PRIORITY;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMRdiClear)) {
event_mask |= BHH_BTE_EVENT_CCM_RDI_CLEAR;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMUnknownMegLevelClear)) {
event_mask |= BHH_BTE_EVENT_CCM_UNKNOWN_MEG_LEVEL_CLEAR;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMUnknownMegIdClear)) {
event_mask |= BHH_BTE_EVENT_CCM_UNKNOWN_MEG_ID_CLEAR;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMUnknownMepIdClear)) {
event_mask |= BHH_BTE_EVENT_CCM_UNKNOWN_MEP_ID_CLEAR;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMUnknownPeriodClear)) {
event_mask |= BHH_BTE_EVENT_CCM_UNKNOWN_PERIOD_CLEAR;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBHHCCMUnknownPriorityClear)) {
event_mask |= BHH_BTE_EVENT_CCM_UNKNOWN_PRIORITY_CLEAR;
}
if (SHR_BITGET(event_handler_p->event_types.w, bcmOAMEventCsfLos)) {
event_mask |= BHH_BTE_EVENT_CSF_LOS;
}
if (SHR_BITGET(event_handler_p->event_types.w, bcmOAMEventCsfFdi)) {
event_mask |= BHH_BTE_EVENT_CSF_FDI;
}
if (SHR_BITGET(event_handler_p->event_types.w, bcmOAMEventCsfRdi)) {
event_mask |= BHH_BTE_EVENT_CSF_RDI;
}
if (SHR_BITGET(event_handler_p->event_types.w, bcmOAMEventCsfDci)) {
event_mask |= BHH_BTE_EVENT_CSF_DCI;
}
if (SHR_BITGET(event_handler_p->event_types.w,
bcmOAMEventBhhPmCounterRollover)) {
event_mask |= BHH_BTE_EVENT_PM_STATS_COUNTER_ROLLOVER;
}
}
/* Update BHH event mask in uKernel */
if (event_mask != oc->event_mask) {
/* Send BHH Event Mask message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive
(unit,
MOS_MSG_SUBCLASS_BHH_EVENT_MASK_SET,
0, event_mask,
MOS_MSG_SUBCLASS_BHH_EVENT_MASK_SET_REPLY,
&reply_len);
if(BCM_SUCCESS(rv) && (reply_len != 0)) {
rv = BCM_E_INTERNAL;
}
}
oc->event_mask = event_mask;
return (rv);
}
/*
* Function:
* bcm_kt2_oam_loopback_add
* Purpose:
*
* Parameters:
* unit - Device unit number
* Returns:
* None
*/
int
bcm_kt2_oam_loopback_add(int unit, bcm_oam_loopback_t *loopback_p)
{
_bcm_oam_control_t *oc;
_bcm_oam_hash_data_t *h_data_p;
int rv = BCM_E_NONE;
bhh_sdk_msg_ctrl_loopback_add_t msg;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
uint32 flags = 0;
int sess_id = 0;
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (oc->ukernel_not_ready) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
BCM_OAM_BHH_VALIDATE_EP(loopback_p->id);
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(loopback_p->id);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->bhh_pool, sess_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint EP=%d %s.\n"),
loopback_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
h_data_p = &oc->oam_hash_data[loopback_p->id];
if (h_data_p->flags2 & BCM_OAM_ENDPOINT_FLAGS2_REDIRECT_TO_CPU) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_PARAM;
}
/*
* Only BHH is supported
*/
if (BHH_EP_TYPE(h_data_p)) {
/*
* Convert host space flags to uKernel space flags
*/
if (loopback_p->flags & BCM_OAM_LOOPBACK_TX_ENABLE) {
flags |= BCM_BHH_TX_ENABLE;
}
if (loopback_p->flags & BCM_OAM_BHH_INC_REQUESTING_MEP_TLV) {
flags |= BCM_BHH_INC_REQUESTING_MEP_TLV;
}
if (loopback_p->flags & BCM_OAM_BHH_LBM_INGRESS_DISCOVERY_MEP_TLV) {
flags |= BCM_BHH_LBM_INGRESS_DISCOVERY_MEP_TLV;
} else if (loopback_p->flags & BCM_OAM_BHH_LBM_EGRESS_DISCOVERY_MEP_TLV) {
flags |= BCM_BHH_LBM_EGRESS_DISCOVERY_MEP_TLV;
} else if (loopback_p->flags & BCM_OAM_BHH_LBM_ICC_MEP_TLV) {
flags |= BCM_BHH_LBM_ICC_MEP_TLV;
} else if (loopback_p->flags & BCM_OAM_BHH_LBM_ICC_MIP_TLV) {
flags |= BCM_BHH_LBM_ICC_MIP_TLV;
} else {
flags |= BCM_BHH_LBM_ICC_MEP_TLV; /* Default */
}
if (loopback_p->flags & BCM_OAM_BHH_LBR_ICC_MEP_TLV) {
flags |= BCM_BHH_LBR_ICC_MEP_TLV;
} else if (loopback_p->flags & BCM_OAM_BHH_LBR_ICC_MIP_TLV) {
flags |= BCM_BHH_LBR_ICC_MIP_TLV;
}
sal_memset(&msg, 0, sizeof(msg));
msg.flags = flags;
msg.sess_id = sess_id;
msg.int_pri = loopback_p->int_pri;
msg.pkt_pri = loopback_p->pkt_pri;
/*
* Set period
*/
msg.period = _kt2_ccm_intervals[
_bcm_kt2_oam_ccm_msecs_to_hw_encode(
loopback_p->period)];
/*
* Check TTL
*/
if (loopback_p->ttl == 0 || loopback_p->ttl > 255) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_PARAM;
}
if (BHH_EP_MPLS_SECTION_TYPE(h_data_p) ||
(h_data_p->type == bcmOAMEndpointTypeBHHMPLSVccv &&
h_data_p->vccv_type == bcmOamBhhVccvTtl)) {
msg.ttl = 1; /* Only TTL 1 is valid */
} else {
msg.ttl = loopback_p->ttl;
}
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = bhh_sdk_msg_ctrl_loopback_add_pack(buffer, &msg);
buffer_len = buffer_ptr - buffer;
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_LOOPBACK_ADD,
buffer_len, 0,
MOS_MSG_SUBCLASS_BHH_LOOPBACK_ADD_REPLY,
&reply_len);
if(BCM_SUCCESS(rv) && (reply_len != 0)) {
rv = BCM_E_INTERNAL;
}
}
else {
rv = BCM_E_UNAVAIL;
}
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/*
* Function:
* bcm_kt2_oam_loopback_get
* Purpose:
*
* Parameters:
* unit - Device unit number
* Returns:
* None
*/
int
bcm_kt2_oam_loopback_get(int unit, bcm_oam_loopback_t *loopback_p)
{
_bcm_oam_control_t *oc;
int rv = BCM_E_NONE;
bhh_sdk_msg_ctrl_loopback_get_t msg;
_bcm_oam_hash_data_t *h_data_p;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
int sess_id = 0;
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (oc->ukernel_not_ready) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
BCM_OAM_BHH_VALIDATE_EP(loopback_p->id);
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(loopback_p->id);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->bhh_pool, sess_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint EP=%d %s.\n"),
loopback_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
h_data_p = &oc->oam_hash_data[loopback_p->id];
/*
* Only BHH is supported
*/
if(BHH_EP_TYPE(h_data_p)) {
sal_memset(&msg, 0, sizeof(msg));
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_LOOPBACK_GET,
sess_id, 0,
MOS_MSG_SUBCLASS_BHH_LOOPBACK_GET_REPLY,
&reply_len);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint EP=%d %s.\n"),
loopback_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = bhh_sdk_msg_ctrl_loopback_get_unpack(buffer, &msg);
buffer_len = buffer_ptr - buffer;
if (reply_len != buffer_len) {
rv = BCM_E_INTERNAL;
} else {
/*
* Convert kernel space flags to host space flags
*/
if (msg.flags & BCM_BHH_INC_REQUESTING_MEP_TLV) {
loopback_p->flags |= BCM_OAM_BHH_INC_REQUESTING_MEP_TLV;
}
if (msg.flags & BCM_BHH_LBM_INGRESS_DISCOVERY_MEP_TLV) {
loopback_p->flags |= BCM_OAM_BHH_LBM_INGRESS_DISCOVERY_MEP_TLV;
} else if (msg.flags & BCM_BHH_LBM_EGRESS_DISCOVERY_MEP_TLV) {
loopback_p->flags |= BCM_OAM_BHH_LBM_EGRESS_DISCOVERY_MEP_TLV;
} else if (msg.flags & BCM_BHH_LBM_ICC_MEP_TLV) {
loopback_p->flags |= BCM_OAM_BHH_LBM_ICC_MEP_TLV;
} else if (msg.flags & BCM_BHH_LBM_ICC_MIP_TLV) {
loopback_p->flags |= BCM_OAM_BHH_LBM_ICC_MIP_TLV;
}
if (msg.flags & BCM_BHH_LBR_ICC_MEP_TLV) {
loopback_p->flags |= BCM_OAM_BHH_LBR_ICC_MEP_TLV;
} else if (msg.flags & BCM_BHH_LBR_ICC_MIP_TLV) {
loopback_p->flags |= BCM_OAM_BHH_LBR_ICC_MIP_TLV;
}
if (msg.flags & BCM_BHH_TX_ENABLE) {
loopback_p->flags |= BCM_OAM_LOOPBACK_TX_ENABLE;
}
loopback_p->period = msg.period;
loopback_p->ttl = msg.ttl;
loopback_p->discovered_me.flags = msg.discovery_flags;
loopback_p->discovered_me.name = msg.discovery_id;
loopback_p->discovered_me.ttl = msg.discovery_ttl;
loopback_p->rx_count = msg.rx_count;
loopback_p->tx_count = msg.tx_count;
loopback_p->drop_count = msg.drop_count;
loopback_p->unexpected_response = msg.unexpected_response;
loopback_p->out_of_sequence = msg.out_of_sequence;
loopback_p->local_mipid_missmatch = msg.local_mipid_missmatch;
loopback_p->remote_mipid_missmatch = msg.remote_mipid_missmatch;
loopback_p->invalid_target_mep_tlv = msg.invalid_target_mep_tlv;
loopback_p->invalid_mep_tlv_subtype = msg.invalid_mep_tlv_subtype;
loopback_p->invalid_tlv_offset = msg.invalid_tlv_offset;
loopback_p->int_pri = msg.int_pri;
loopback_p->pkt_pri = msg.pkt_pri;
rv = BCM_E_NONE;
}
}
else {
rv = BCM_E_UNAVAIL;
}
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/*
* Function:
* bcm_kt2_oam_loopback_delete
* Purpose:
*
* Parameters:
* unit - Device unit number
* Returns:
* None
*/
int
bcm_kt2_oam_loopback_delete(int unit, bcm_oam_loopback_t *loopback_p)
{
_bcm_oam_control_t *oc;
int rv = BCM_E_NONE;
shr_bhh_msg_ctrl_loopback_delete_t msg;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
int sess_id = 0;
_bcm_oam_hash_data_t *h_data_p;
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (oc->ukernel_not_ready) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
BCM_OAM_BHH_VALIDATE_EP(loopback_p->id);
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(loopback_p->id);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->bhh_pool, sess_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint EP=%d %s.\n"),
loopback_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
h_data_p = &oc->oam_hash_data[loopback_p->id];
/*
* Only BHH is supported
*/
if(BHH_EP_TYPE(h_data_p)) {
msg.sess_id = sess_id;
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = shr_bhh_msg_ctrl_loopback_delete_pack(buffer, &msg);
buffer_len = buffer_ptr - buffer;
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_LOOPBACK_DELETE,
buffer_len, 0,
MOS_MSG_SUBCLASS_BHH_LOOPBACK_DELETE_REPLY,
&reply_len);
if(BCM_SUCCESS(rv) && (reply_len != 0))
rv = BCM_E_INTERNAL;
}
else {
_BCM_OAM_UNLOCK(oc);
return BCM_E_UNAVAIL;
}
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/*
* Function:
* bcm_kt2_oam_loss_add
* Purpose:
* Loss Measurement add
* Parameters:
* unit - Device unit number
* Returns:
* None
*/
int
bcm_kt2_oam_loss_add(int unit, bcm_oam_loss_t *loss_p)
{
_bcm_oam_control_t *oc;
_bcm_oam_hash_data_t *h_data_p;
int rv = BCM_E_NONE;
bhh_sdk_msg_ctrl_loss_add_t msg;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
uint32 flags = 0;
int sess_id = 0;
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (oc->ukernel_not_ready) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
BCM_OAM_BHH_VALIDATE_EP(loss_p->id);
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(loss_p->id);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->bhh_pool, sess_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint EP=%d %s.\n"),
loss_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
h_data_p = &oc->oam_hash_data[loss_p->id];
if (h_data_p->flags2 & BCM_OAM_ENDPOINT_FLAGS2_REDIRECT_TO_CPU) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_PARAM;
}
/*
* Only BHH is supported
*/
if(BHH_EP_TYPE(h_data_p)) {
/*
* Convert host space flags to uKernel space flags
*/
if (loss_p->flags & BCM_OAM_LOSS_SINGLE_ENDED)
flags |= BCM_BHH_LM_SINGLE_ENDED;
if (loss_p->flags & BCM_OAM_LOSS_TX_ENABLE)
flags |= BCM_BHH_LM_TX_ENABLE;
sal_memset(&msg, 0, sizeof(msg));
msg.flags = flags;
msg.sess_id = sess_id;
/*
* Set period
*/
msg.period = _kt2_ccm_intervals[_bcm_kt2_oam_ccm_msecs_to_hw_encode(loss_p->period)];
msg.int_pri = loss_p->int_pri;
msg.pkt_pri = loss_p->pkt_pri;
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = bhh_sdk_msg_ctrl_loss_add_pack(buffer, &msg);
buffer_len = buffer_ptr - buffer;
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_LOSS_MEASUREMENT_ADD,
buffer_len, 0,
MOS_MSG_SUBCLASS_BHH_LOSS_MEASUREMENT_ADD_REPLY,
&reply_len);
if(BCM_SUCCESS(rv) && (reply_len != 0))
rv = BCM_E_INTERNAL;
}
else {
_BCM_OAM_UNLOCK(oc);
return BCM_E_UNAVAIL;
}
_BCM_OAM_UNLOCK(oc);
return (rv);
}
int
bcm_kt2_oam_bhh_convert_ep_to_time_spec(bcm_time_spec_t* bts, int sec, int ns)
{
int rv = BCM_E_NONE;
if (bts != NULL) {
/* if both seconds and nanoseconds are negative or both positive,
* then the ucode's subtraction is ok.
* if seconds and nanoseconds have different signs, then "borrow"
* 1000000000 nanoseconds from seconds.
*/
if ((sec < 0) && (ns > 0)) {
ns -= 1000000000;
sec += 1;
} else if ((sec > 0) && (ns < 0)) {
ns += 1000000000;
sec -= 1;
}
if (ns < 0) {
/* if still negative, then something else is wrong.
* the nanoseconds field is the difference between two
* (non-negative) time-stamps.
*/
rv = BCM_E_INTERNAL;
}
/* if seconds is negative then set the bts is-negative flag,
* and use the absolute value of seconds & nanoseconds.
*/
bts->isnegative = (sec < 0 ? 1 : 0);
bts->seconds = BCM_OAM_BHH_ABS(sec);
bts->nanoseconds = BCM_OAM_BHH_ABS(ns);
} else {
rv = BCM_E_INTERNAL;
}
return rv;
}
#ifdef _KATANA2_DEBUG
/* d (difference) = m (minuend) - s (subtrahend) */
int
bcm_oam_bhh_time_spec_subtract(bcm_time_spec_t* d, bcm_time_spec_t* m, bcm_time_spec_t* s)
{
int rv = BCM_E_NONE;
int32 d_ns = 0;
int32 d_s = 0;
if ((d != NULL) && (m != NULL) && (s != NULL)) {
/* subtract the nanoseconds first, then borrow is necessary. */
d_ns = m->nanoseconds - s->nanoseconds;
if (d_ns < 0) {
m->seconds = m->seconds - 1;
d_ns = 1000000000 + d_ns;
}
if (d_ns < 0) {
/* if still negative, then error */
rv = BCM_E_INTERNAL;
d_ns = abs(d_ns);
}
d->nanoseconds = d_ns;
/* subtract the seconds next, check for negative. */
d_s = m->seconds - s->seconds;
if (d_s < 0) {
d->isnegative = TRUE;
d_s = abs(d_s);
} else {
d->isnegative = FALSE;
}
d->seconds = d_s;
} else {
rv = BCM_E_INTERNAL;
}
return rv;
}
#endif
/*
* Function:
* bcm_kt2_oam_loss_get
* Purpose:
* Loss Measurement get
* Parameters:
* unit - Device unit number
* Returns:
* None
*/
int
bcm_kt2_oam_loss_get(int unit, bcm_oam_loss_t *loss_p)
{
_bcm_oam_control_t *oc;
int rv = BCM_E_NONE;
bhh_sdk_msg_ctrl_loss_get_t msg;
_bcm_oam_hash_data_t *h_data_p;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
int sess_id = 0;
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (oc->ukernel_not_ready) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
BCM_OAM_BHH_VALIDATE_EP(loss_p->id);
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(loss_p->id);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->bhh_pool, sess_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint EP=%d %s.\n"),
loss_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
h_data_p = &oc->oam_hash_data[loss_p->id];
/*
* Only BHH is supported
*/
if(BHH_EP_TYPE(h_data_p)) {
sal_memset(&msg, 0, sizeof(msg));
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_LOSS_MEASUREMENT_GET,
sess_id, 0,
MOS_MSG_SUBCLASS_BHH_LOSS_MEASUREMENT_GET_REPLY,
&reply_len);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint EP=%d %s.\n"),
loss_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = bhh_sdk_msg_ctrl_loss_get_unpack(buffer, &msg);
buffer_len = buffer_ptr - buffer;
if (reply_len != buffer_len) {
rv = BCM_E_INTERNAL;
} else {
/*
* Convert kernel space flags to host space flags
*/
if (msg.flags & BCM_BHH_LM_SINGLE_ENDED) {
loss_p->flags |= BCM_OAM_LOSS_SINGLE_ENDED;
}
if (msg.flags & BCM_BHH_LM_TX_ENABLE) {
loss_p->flags |= BCM_OAM_LOSS_TX_ENABLE;
}
if (msg.flags & BCM_BHH_LM_SLM) {
loss_p->flags |= BCM_OAM_LOSS_SLM;
}
loss_p->period = msg.period;
loss_p->loss_threshold = msg.loss_threshold;
loss_p->loss_nearend = msg.loss_nearend;
loss_p->loss_farend = msg.loss_farend;
loss_p->tx_nearend = msg.tx_nearend;
loss_p->rx_nearend = msg.rx_nearend;
loss_p->tx_farend = msg.tx_farend;
loss_p->rx_farend = msg.rx_farend;
loss_p->rx_oam_packets = msg.rx_oam_packets;
loss_p->tx_oam_packets = msg.tx_oam_packets;
loss_p->int_pri = msg.int_pri;
loss_p->pkt_pri = msg.pkt_pri;
rv = BCM_E_NONE;
}
}
else {
_BCM_OAM_UNLOCK(oc);
return BCM_E_UNAVAIL;
}
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/*
* Function:
* bcm_kt2_oam_loss_delete
* Purpose:
* Loss Measurement Delete
* Parameters:
* unit - Device unit number
* Returns:
* None
*/
int
bcm_kt2_oam_loss_delete(int unit, bcm_oam_loss_t *loss_p)
{
_bcm_oam_control_t *oc;
int rv = BCM_E_NONE;
shr_bhh_msg_ctrl_loss_delete_t msg;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
int sess_id = 0;
_bcm_oam_hash_data_t *h_data_p;
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (oc->ukernel_not_ready) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
BCM_OAM_BHH_VALIDATE_EP(loss_p->id);
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(loss_p->id);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->bhh_pool, sess_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint EP=%d %s.\n"),
loss_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
h_data_p = &oc->oam_hash_data[loss_p->id];
/*
* Only BHH is supported
*/
if(BHH_EP_TYPE(h_data_p)) {
msg.sess_id = sess_id;
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = shr_bhh_msg_ctrl_loss_delete_pack(buffer, &msg);
buffer_len = buffer_ptr - buffer;
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_LOSS_MEASUREMENT_DELETE,
buffer_len, 0,
MOS_MSG_SUBCLASS_BHH_LOSS_MEASUREMENT_DELETE_REPLY,
&reply_len);
if(BCM_SUCCESS(rv) && (reply_len != 0))
rv = BCM_E_INTERNAL;
}
else {
_BCM_OAM_UNLOCK(oc);
return BCM_E_UNAVAIL;
}
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/*
* Function:
* bcm_kt2_oam_delay_add
* Purpose:
* Delay Measurement add
* Parameters:
* unit - Device unit number
* Returns:
* None
*/
int
bcm_kt2_oam_delay_add(int unit, bcm_oam_delay_t *delay_p)
{
_bcm_oam_control_t *oc;
_bcm_oam_hash_data_t *h_data_p;
int rv = BCM_E_NONE;
bhh_sdk_msg_ctrl_delay_add_t msg;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
uint32 flags = 0;
int sess_id = 0;
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (oc->ukernel_not_ready) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
BCM_OAM_BHH_VALIDATE_EP(delay_p->id);
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(delay_p->id);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->bhh_pool, sess_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint EP=%d %s.\n"),
delay_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
h_data_p = &oc->oam_hash_data[delay_p->id];
if (h_data_p->flags2 & BCM_OAM_ENDPOINT_FLAGS2_REDIRECT_TO_CPU) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_PARAM;
}
/*
* Only BHH is supported
*/
if(BHH_EP_TYPE(h_data_p)) {
/*
* Convert host space flags to uKernel space flags
*/
if (delay_p->flags & BCM_OAM_DELAY_ONE_WAY)
flags |= BCM_BHH_DM_ONE_WAY;
if (delay_p->flags & BCM_OAM_DELAY_TX_ENABLE)
flags |= BCM_BHH_DM_TX_ENABLE;
sal_memset(&msg, 0, sizeof(msg));
msg.flags = flags;
msg.sess_id = sess_id;
msg.int_pri = delay_p->int_pri;
msg.pkt_pri = delay_p->pkt_pri;
h_data_p->ts_format = delay_p->timestamp_format;
if(delay_p->timestamp_format == bcmOAMTimestampFormatIEEE1588v1)
msg.dm_format = BCM_BHH_DM_TYPE_PTP;
else
msg.dm_format = BCM_BHH_DM_TYPE_NTP;
/*
* Set period
*/
msg.period = _kt2_ccm_intervals[_bcm_kt2_oam_ccm_msecs_to_hw_encode(delay_p->period)];
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = bhh_sdk_msg_ctrl_delay_add_pack(buffer, &msg);
buffer_len = buffer_ptr - buffer;
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_DELAY_MEASUREMENT_ADD,
buffer_len, 0,
MOS_MSG_SUBCLASS_BHH_DELAY_MEASUREMENT_ADD_REPLY,
&reply_len);
if(BCM_SUCCESS(rv) && (reply_len != 0))
rv = BCM_E_INTERNAL;
}
else {
_BCM_OAM_UNLOCK(oc);
return BCM_E_UNAVAIL;
}
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/*
* Function:
* bcm_kt2_oam_delay_get
* Purpose:
* Delay Measurements get
* Parameters:
* unit - Device unit number
* Returns:
* None
*/
int
bcm_kt2_oam_delay_get(int unit, bcm_oam_delay_t *delay_p)
{
_bcm_oam_control_t *oc;
int rv = BCM_E_NONE;
bhh_sdk_msg_ctrl_delay_get_t msg;
_bcm_oam_hash_data_t *h_data_p;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
int sess_id = 0;
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (oc->ukernel_not_ready) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
BCM_OAM_BHH_VALIDATE_EP(delay_p->id);
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(delay_p->id);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->bhh_pool, sess_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint EP=%d %s.\n"),
delay_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
h_data_p = &oc->oam_hash_data[delay_p->id];
/*
* Only BHH is supported
*/
if(BHH_EP_TYPE(h_data_p)) {
sal_memset(&msg, 0, sizeof(msg));
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_DELAY_MEASUREMENT_GET,
sess_id, 0,
MOS_MSG_SUBCLASS_BHH_DELAY_MEASUREMENT_GET_REPLY,
&reply_len);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint EP=%d %s.\n"),
delay_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = bhh_sdk_msg_ctrl_delay_get_unpack(buffer, &msg);
buffer_len = buffer_ptr - buffer;
if (reply_len != buffer_len) {
rv = BCM_E_INTERNAL;
} else {
/*
* Convert kernel space flags to host space flags
*/
if (msg.flags & BCM_BHH_DM_ONE_WAY)
delay_p->flags |= BCM_OAM_DELAY_ONE_WAY;
if (msg.flags & BCM_BHH_DM_TX_ENABLE)
delay_p->flags |= BCM_OAM_DELAY_TX_ENABLE;
delay_p->period = msg.period;
if(msg.dm_format == BCM_BHH_DM_TYPE_PTP)
delay_p->timestamp_format = bcmOAMTimestampFormatIEEE1588v1;
else
delay_p->timestamp_format = bcmOAMTimestampFormatNTP;
rv = bcm_kt2_oam_bhh_convert_ep_to_time_spec(&(delay_p->delay),
msg.delay_seconds, msg.delay_nanoseconds);
if(BCM_SUCCESS(rv)) {
rv = bcm_kt2_oam_bhh_convert_ep_to_time_spec(&(delay_p->txf),
msg.txf_seconds, msg.txf_nanoseconds);
}
if(BCM_SUCCESS(rv)) {
rv = bcm_kt2_oam_bhh_convert_ep_to_time_spec(&(delay_p->rxf),
msg.rxf_seconds, msg.rxf_nanoseconds);
}
if(BCM_SUCCESS(rv)) {
rv = bcm_kt2_oam_bhh_convert_ep_to_time_spec(&(delay_p->txb),
msg.txb_seconds, msg.txb_nanoseconds);
}
if(BCM_SUCCESS(rv)) {
rv = bcm_kt2_oam_bhh_convert_ep_to_time_spec(&(delay_p->rxb),
msg.rxb_seconds, msg.rxb_nanoseconds);
}
delay_p->rx_oam_packets = msg.rx_oam_packets;
delay_p->tx_oam_packets = msg.tx_oam_packets;
delay_p->int_pri = msg.int_pri;
delay_p->pkt_pri = msg.pkt_pri;
rv = BCM_E_NONE;
}
}
else {
_BCM_OAM_UNLOCK(oc);
return BCM_E_UNAVAIL;
}
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/*
* Function:
* bcm_kt2_oam_delay_delete
* Purpose:
* Delay Measurement Delete
* Parameters:
* unit - Device unit number
* Returns:
* None
*/
int
bcm_kt2_oam_delay_delete(int unit, bcm_oam_delay_t *delay_p)
{
_bcm_oam_control_t *oc;
int rv = BCM_E_NONE;
shr_bhh_msg_ctrl_delay_delete_t msg;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
int sess_id = 0;
_bcm_oam_hash_data_t *h_data_p;
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (oc->ukernel_not_ready) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
BCM_OAM_BHH_VALIDATE_EP(delay_p->id);
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(delay_p->id);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->bhh_pool, sess_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM Error: Endpoint EP=%d %s.\n"),
delay_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
h_data_p = &oc->oam_hash_data[delay_p->id];
/*
* Only BHH is supported
*/
if(BHH_EP_TYPE(h_data_p)) {
msg.sess_id = sess_id;
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = shr_bhh_msg_ctrl_delay_delete_pack(buffer, &msg);
buffer_len = buffer_ptr - buffer;
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_DELAY_MEASUREMENT_DELETE,
buffer_len, 0,
MOS_MSG_SUBCLASS_BHH_DELAY_MEASUREMENT_DELETE_REPLY,
&reply_len);
if(BCM_SUCCESS(rv) && (reply_len != 0))
rv = BCM_E_INTERNAL;
}
else {
_BCM_OAM_UNLOCK(oc);
return BCM_E_UNAVAIL;
}
_BCM_OAM_UNLOCK(oc);
return (rv);
}
int _bcm_kt2_oam_pm_init(int unit)
{
/* Profile data structure initialization */
_bcm_oam_pm_profile_control_t *pmc = NULL;
int rv = BCM_E_NONE;
_bcm_oam_control_t *oc = NULL;
rv = _bcm_kt2_oam_control_get(unit, &oc);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Oam control get failed "
"- %s.\n"), unit, bcm_errmsg(rv)));
return BCM_E_INIT;
}
_BCM_OAM_LOCK(oc);
oc->pm_bhh_lmdm_data_collection_mode = soc_property_get(unit,
spn_BHH_DATA_COLLECTION_MODE, _BCM_OAM_PM_COLLECTION_MODE_NONE);
/* Raw mode is not supported in KT2. Internally it will be taken as none. */
if ( _BCM_KT2_OAM_PM_BHH_DATA_COLLECTION_MODE_IS_RAW_DATA(oc)) {
oc->pm_bhh_lmdm_data_collection_mode = _BCM_OAM_PM_COLLECTION_MODE_NONE;
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Raw mode is not supported in KT2."
"Selected mode is none \n"), unit));
}
/* Allocate profiles only if one of them has processed stats
* collection enabled
*/
if ((_BCM_KT2_OAM_PM_BHH_DATA_COLLECTION_MODE_IS_PROCESSED(oc))) {
_BCM_OAM_ALLOC(pmc, _bcm_oam_pm_profile_control_t,
sizeof (_bcm_oam_pm_profile_control_t),
"OAM PM profile control");
if (!pmc) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_MEMORY;
}
kt2_pm_profile_control[unit] = pmc;
}
/*
* Allocate DMA buffers and raw data buffers.
*
* 1) DMA buffer will be used to send and receive 'long' messages
* between SDK Host and uController (BTE).
*/
/* DMA buffer */
if (_BCM_KT2_OAM_PM_BHH_DATA_COLLECTION_MODE_ENABLED(oc)) {
oc->pm_bhh_dma_buffer_len = sizeof(shr_oam_pm_msg_ctrl_t);
oc->pm_bhh_dma_buffer = soc_cm_salloc(unit, oc->pm_bhh_dma_buffer_len,
"PM DMA buffer");
if (!oc->pm_bhh_dma_buffer) {
_BCM_OAM_UNLOCK(oc);
return (BCM_E_MEMORY);
}
sal_memset(oc->pm_bhh_dma_buffer, 0, oc->pm_bhh_dma_buffer_len);
}
_BCM_OAM_UNLOCK(oc);
return BCM_E_NONE;
}
/*
* Function:
* _bcm_kt2_oam_pm_msg_send_receive
* Purpose:
* Sends given PM control message to the uController.
* Receives and verifies expected reply.
* Performs DMA operation if required.
* Parameters:
* unit - (IN) Unit number.
* s_class - (IN) PM message class.
* s_len - (IN) Value for 'len' field in message struct.
* Length of buffer to flush if DMA send is required.
* s_data - (IN) Value for 'data' field in message struct.
* Ignored if message requires a DMA send/receive
* operation.
* r_subclass - (IN) Expected reply message subclass.
* r_len - (OUT) Returns value in 'len' reply message field.
* Returns:
* BCM_E_XXX
* Notes:
* - The uc_msg 'len' and 'data' fields of mos_msg_data_t
* can take any arbitrary data.
*
* PM Long Control message:
* - PM control messages that require send/receive of information
* that cannot fit in the uc_msg 'len' and 'data' fields need to
* use DMA operations to exchange information (long control message).
*
* - PM convention for long control messages for
* 'mos_msg_data_t' fields:
* 'len' size of the DMA buffer to send to uController
* 'data' physical DMA memory address to send or receive
*
* DMA Operations:
* - DMA read/write operation is performed when a long BHH control
* message is involved.
*
* - Messages that require DMA operation (long control message)
* is indicated by MOS_MSG_DMA_MSG().
*
* - Callers must 'pack' and 'unpack' corresponding information
* into/from DMA buffer indicated by dma_buffer.
*/
STATIC int
_bcm_kt2_oam_pm_msg_send_receive(int unit, uint8 s_class, uint8 s_subclass,
uint16 s_len, uint32 s_data,
uint8 r_subclass, uint16 *r_len)
{
int rv = BCM_E_NONE;
_bcm_oam_control_t *oc;
mos_msg_data_t send, reply;
uint8 *dma_buffer = NULL;
int dma_buffer_len = 0;
uint32 uc_rv;
int uc_num;
/* Lock already taken by the calling routine. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
sal_memset(&send, 0, sizeof(send));
sal_memset(&reply, 0, sizeof(reply));
send.s.mclass = s_class;
send.s.subclass = s_subclass;
send.s.len = bcm_htons(s_len);
/*
* Set 'data' to DMA buffer address if a DMA operation is
* required for send or receive.
*/
dma_buffer = oc->pm_bhh_dma_buffer;
dma_buffer_len = oc->pm_bhh_dma_buffer_len;
if (MOS_MSG_DMA_MSG(s_subclass) ||
MOS_MSG_DMA_MSG(r_subclass)) {
send.s.data = bcm_htonl(soc_cm_l2p(unit, dma_buffer));
} else {
send.s.data = bcm_htonl(s_data);
}
/* Flush DMA memory */
if (MOS_MSG_DMA_MSG(s_subclass)) {
soc_cm_sflush(unit, dma_buffer, s_len);
}
/* Invalidate DMA memory to read */
if (MOS_MSG_DMA_MSG(r_subclass)) {
soc_cm_sinval(unit, dma_buffer, dma_buffer_len);
}
switch (s_class) {
case MOS_MSG_CLASS_BHH:
uc_num = oc->uc_num;
break;
default:
return BCM_E_PARAM;
break;
}
/* coverity[unreachable] */
rv = soc_cmic_uc_msg_send_receive(unit, uc_num,
&send, &reply,
_PM_UC_MSG_TIMEOUT_USECS);
/* Check reply class, subclass */
if ((rv != SOC_E_NONE) ||
(reply.s.mclass != s_class) || /* We can expect the same class that was sent */
(reply.s.subclass != r_subclass)) {
return (BCM_E_INTERNAL);
}
/* Convert OAM PM uController error code to BCM */
uc_rv = bcm_ntohl(reply.s.data);
switch(uc_rv) {
case SHR_OAM_PM_UC_E_NONE:
rv = BCM_E_NONE;
break;
case SHR_OAM_PM_UC_E_INTERNAL:
rv = BCM_E_INTERNAL;
break;
case SHR_OAM_PM_UC_E_MEMORY:
rv = BCM_E_MEMORY;
break;
case SHR_OAM_PM_UC_E_PARAM:
rv = BCM_E_PARAM;
break;
case SHR_OAM_PM_UC_E_RESOURCE:
rv = BCM_E_RESOURCE;
break;
case SHR_OAM_PM_UC_E_EXISTS:
rv = BCM_E_EXISTS;
break;
case SHR_OAM_PM_UC_E_NOT_FOUND:
rv = BCM_E_NOT_FOUND;
break;
case SHR_OAM_PM_UC_E_UNAVAIL:
rv = BCM_E_UNAVAIL;
break;
case SHR_OAM_PM_UC_E_VERSION:
rv = BCM_E_CONFIG;
break;
case SHR_OAM_PM_UC_E_INIT:
rv = BCM_E_INIT;
break;
default:
rv = BCM_E_INTERNAL;
break;
}
*r_len = bcm_ntohs(reply.s.len);
return (rv);
}
int _bcm_kt2_pm_profile_compare(bcm_oam_pm_profile_info_t *profile_info1,
bcm_oam_pm_profile_info_t *profile_info2)
{
if (!(sal_memcmp(profile_info1->bin_edges, profile_info2->bin_edges,
BCM_OAM_MAX_PM_PROFILE_BIN_EDGES * sizeof(uint32)))) {
/* Match */
return 1;
}
return 0;
}
int _bcm_kt2_pm_profile_exists(int unit, bcm_oam_pm_profile_info_t *profile_info)
{
int id = 0;
_bcm_oam_pm_profile_control_t *pmc = NULL;
pmc = kt2_pm_profile_control[unit];
if (!pmc) {
return 0;
}
for (id = 0; id < _BCM_OAM_MAX_PM_PROFILES; id++) {
if (_BCM_KT2_PM_CTRL_PROFILE_IN_USE(pmc, id)) { /* If a profile is in use */
if (_bcm_kt2_pm_profile_compare(
_BCM_KT2_PM_CTRL_PROFILE_PTR(pmc, id),
profile_info)) {
return 1;
}
}
}
return 0;
}
int
_bcm_kt2_find_free_pm_profile_id(int unit, int *pm_profile_id)
{
int id = 0;
_bcm_oam_pm_profile_control_t *pmc = NULL;
pmc = kt2_pm_profile_control[unit];
if (!pmc) {
return BCM_E_INIT;
}
/* return the lowest un used id */
for (id = 0; id < _BCM_OAM_MAX_PM_PROFILES; id++) {
if (!(_BCM_KT2_PM_CTRL_PROFILE_IN_USE(pmc, id))) { /* If a profile is not in use */
*pm_profile_id = id; /* Assign it and return */
return BCM_E_NONE;
}
}
return BCM_E_RESOURCE;
}
int
_bcm_kt2_oam_pm_profile_delete(int unit, bcm_oam_pm_profile_t profile_id, int all)
{
_bcm_oam_pm_profile_control_t *pmc;
pmc = kt2_pm_profile_control[unit];
if (!pmc) {
return BCM_E_INIT;
}
/* Check it only if it is a delete for a particular profile */
if (!(_BCM_KT2_PM_CTRL_PROFILE_IN_USE(pmc, profile_id)) && !all) {
/* It is not in use. Return error */
return BCM_E_NOT_FOUND;
}
/* Zero out that profile */
sal_memset(_BCM_KT2_PM_CTRL_PROFILE_PTR(pmc, profile_id), 0,
sizeof(bcm_oam_pm_profile_info_t));
/* Mark it un used */
_BCM_KT2_SET_PM_CTRL_PROFILE_NOT_IN_USE(pmc, profile_id);
return BCM_E_NONE;
}
int
bcm_kt2_oam_pm_profile_delete(int unit, bcm_oam_pm_profile_t profile_id)
{
_bcm_oam_control_t *oc = NULL;
int rv = BCM_E_NONE;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (!(_BCM_KT2_PM_PROFILE_ID_VALID(profile_id))) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_PARAM;
}
rv = _bcm_kt2_oam_pm_profile_delete(unit, profile_id, 0);
_BCM_OAM_UNLOCK(oc);
return rv;
}
int
bcm_kt2_oam_pm_profile_delete_all(int unit)
{
int id;
_bcm_oam_control_t *oc = NULL;
int rv = BCM_E_NONE;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
for (id = 0; id < _BCM_OAM_MAX_PM_PROFILES; id++) {
rv = _bcm_kt2_oam_pm_profile_delete(unit, id, 1);
if(rv != BCM_E_NONE) {
_BCM_OAM_UNLOCK(oc);
}
}
_BCM_OAM_UNLOCK(oc);
return BCM_E_NONE;
}
int
bcm_kt2_oam_pm_profile_create(int unit, bcm_oam_pm_profile_info_t *profile_info)
{
_bcm_oam_pm_profile_control_t *pmc = NULL;
bcm_oam_pm_profile_info_t *profile_ptr = NULL;
int pm_profile_id = 0;
_bcm_oam_control_t *oc = NULL;
int rv=BCM_E_NONE;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
pmc = kt2_pm_profile_control[unit];
if (!pmc) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
if (_BCM_KT2_PM_PROFILE_WITH_ID_FLAG_SET(profile_info)) {
if (!(_BCM_KT2_PM_PROFILE_ID_VALID(profile_info->id))) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_PARAM;
}
}
if (_BCM_KT2_PM_PROFILE_REPLACE_FLAG_SET(profile_info)) {
if (!(_BCM_KT2_PM_PROFILE_WITH_ID_FLAG_SET(profile_info))) {
/* replace needs the id to be present */
_BCM_OAM_UNLOCK(oc);
return BCM_E_PARAM;
}
if (!(_BCM_KT2_PM_CTRL_PROFILE_IN_USE(pmc, profile_info->id))) {
/* Trying to replace a non-existent PM profile */
_BCM_OAM_UNLOCK(oc);
return BCM_E_NOT_FOUND;
}
/* Delete the previous profile. */
rv = _bcm_kt2_oam_pm_profile_delete(unit,
profile_info->id, 0);
if(rv != BCM_E_NONE) {
_BCM_OAM_UNLOCK(oc);
return rv;
}
}
if (_BCM_KT2_PM_PROFILE_WITH_ID_FLAG_SET(profile_info)) {
if (_BCM_KT2_PM_CTRL_PROFILE_IN_USE(pmc, profile_info->id)) {
/* That profile id is in use. */
_BCM_OAM_UNLOCK(oc);
return BCM_E_EXISTS;
}
/* Else use that as the id */
pm_profile_id = profile_info->id;
} else {
rv = _bcm_kt2_find_free_pm_profile_id(unit,
&pm_profile_id);
if(rv != BCM_E_NONE) {
_BCM_OAM_UNLOCK(oc);
return rv;
}
}
/* Check if such a profile already exists. */
if (_bcm_kt2_pm_profile_exists(unit, profile_info)) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_EXISTS;
}
/* Copy it to local profile information */
profile_ptr = _BCM_KT2_PM_CTRL_PROFILE_PTR(pmc, pm_profile_id);
sal_memcpy(profile_ptr, profile_info,
sizeof(bcm_oam_pm_profile_info_t));
/* Update the id in the profile */
profile_ptr->id = profile_info->id = pm_profile_id;
/* Current flags have only transient meaning, so clear them */
profile_ptr->flags = 0;
/* Mark that profile used */
_BCM_KT2_SET_PM_CTRL_PROFILE_IN_USE(pmc, pm_profile_id);
_BCM_OAM_UNLOCK(oc);
return BCM_E_NONE;
}
int
bcm_kt2_oam_pm_profile_get(int unit, bcm_oam_pm_profile_info_t *profile_info)
{
_bcm_oam_pm_profile_control_t *pmc;
int profile_id = profile_info->id;
pmc = kt2_pm_profile_control[unit];
if (!pmc) {
return BCM_E_INIT;
}
if (!(_BCM_KT2_PM_PROFILE_ID_VALID(profile_id))) {
return BCM_E_PARAM;
}
if (!(_BCM_KT2_PM_CTRL_PROFILE_IN_USE(pmc, profile_id))) {
/* It is not in use. Return error */
return BCM_E_NOT_FOUND;
}
/* Copy the profile to the profile structure passed */
sal_memcpy(profile_info, _BCM_KT2_PM_CTRL_PROFILE_PTR(pmc, profile_id),
sizeof(bcm_oam_pm_profile_info_t));
return BCM_E_NONE;
}
int bcm_kt2_oam_pm_profile_attach(int unit, bcm_oam_endpoint_t endpoint_id,
bcm_oam_pm_profile_t profile_id)
{
_bcm_oam_pm_profile_control_t *pmc = NULL;
bcm_oam_pm_profile_info_t *profile_info = NULL;
_bcm_oam_control_t *oc = NULL;
_bcm_oam_hash_data_t *h_data_p = NULL;
int rv = BCM_E_NONE;
shr_oam_pm_msg_ctrl_profile_attach_t msg_profile_attach;
uint8 msg_class = 0;
uint16 reply_len = 0;
uint8 msg_subclass = 0;
uint8 msg_reply_subclass = 0;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_EP_INDEX_VALIDATE(endpoint_id);
_BCM_OAM_LOCK(oc);
h_data_p = &oc->oam_hash_data[endpoint_id];
if (!BHH_EP_TYPE(h_data_p)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: EP=%d is not BHH endpoint\n"),
unit, endpoint_id));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_UNAVAIL);
}
if (0 == h_data_p->in_use) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: h_data_p not in use EP=%d\n"),
unit, endpoint_id));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_NOT_FOUND);
}
pmc = kt2_pm_profile_control[unit];
if (!pmc) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
if (!(_BCM_KT2_PM_PROFILE_ID_VALID(profile_id))) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_PARAM;
}
/* Check profile id exists or not */
if (!(_BCM_KT2_PM_CTRL_PROFILE_IN_USE(pmc, profile_id))) {
/* It is not in use. Return error */
_BCM_OAM_UNLOCK(oc);
return BCM_E_NOT_FOUND;
}
/* Check if a profile id is already attached to this endpoint */
if (h_data_p->pm_profile_attached != _BCM_OAM_INVALID_INDEX) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_EXISTS;
}
/* Check if delay measurement flag is present for the endpoint */
if (!(h_data_p->flags & BCM_OAM_ENDPOINT_DELAY_MEASUREMENT)) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_PARAM;
}
profile_info = _BCM_KT2_PM_CTRL_PROFILE_PTR(pmc, profile_id);
/* Send info to u-Kernel about the profile attached with the endpoint */
sal_memset(&msg_profile_attach, 0, sizeof(msg_profile_attach));
msg_class = MOS_MSG_CLASS_BHH;
msg_subclass = MOS_MSG_SUBCLASS_BHH_PM_PROFILE_ATTACH;
msg_reply_subclass = MOS_MSG_SUBCLASS_BHH_PM_PROFILE_ATTACH_REPLY;
msg_profile_attach.sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(endpoint_id);
msg_profile_attach.profile_id = profile_id;
msg_profile_attach.profile_flags = profile_info->flags;
sal_memcpy(&(msg_profile_attach.profile_edges), &(profile_info->bin_edges),
(sizeof(uint32) * SHR_OAM_PM_MAX_PM_BIN_EDGES));
/* Pack control message data into DMA buffer */
buffer = oc->pm_bhh_dma_buffer;
buffer_ptr = shr_oam_pm_msg_ctrl_profile_attach_pack(buffer, &msg_profile_attach);
buffer_len = buffer_ptr - buffer;
rv = _bcm_kt2_oam_pm_msg_send_receive
(unit, msg_class,
msg_subclass,
buffer_len, 0,
msg_reply_subclass,
&reply_len);
if(BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INTERNAL;
}
/* Attach it to the endpoint */
h_data_p->pm_profile_attached = profile_id;
_BCM_OAM_UNLOCK(oc);
return BCM_E_NONE;
}
int bcm_kt2_oam_pm_profile_detach(int unit, bcm_oam_endpoint_t endpoint_id,
bcm_oam_pm_profile_t profile_id)
{
_bcm_oam_pm_profile_control_t *pmc = NULL;
_bcm_oam_control_t *oc = NULL;
_bcm_oam_hash_data_t *h_data_p = NULL;
int rv = BCM_E_NONE;
uint8 msg_class = 0;
uint16 reply_len = 0;
uint8 msg_subclass = 0;
uint8 msg_reply_subclass = 0;
uint16 sess_id = 0;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_EP_INDEX_VALIDATE(endpoint_id);
_BCM_OAM_LOCK(oc);
h_data_p = &oc->oam_hash_data[endpoint_id];
if (!BHH_EP_TYPE(h_data_p)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: EP=%d is not BHH endpoint\n"),
unit, endpoint_id));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_UNAVAIL);
}
if (0 == h_data_p->in_use) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: h_data_p not in use EP=%d\n"),
unit, endpoint_id));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_NOT_FOUND);
}
pmc = kt2_pm_profile_control[unit];
if (!pmc) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
if (!(_BCM_KT2_PM_PROFILE_ID_VALID(profile_id))) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_PARAM;
}
/* Check profile id exists or not */
if (!(_BCM_KT2_PM_CTRL_PROFILE_IN_USE(pmc, profile_id))) {
/* It is not in use. Return error */
_BCM_OAM_UNLOCK(oc);
return BCM_E_NOT_FOUND;
}
/* Check if the passed profile id is the one that is attached */
if (profile_id != h_data_p->pm_profile_attached) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_PARAM;
}
msg_class = MOS_MSG_CLASS_BHH;
msg_subclass = MOS_MSG_SUBCLASS_BHH_PM_PROFILE_DETACH;
msg_reply_subclass = MOS_MSG_SUBCLASS_BHH_PM_PROFILE_DETACH_REPLY;
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(endpoint_id);
rv = _bcm_kt2_oam_pm_msg_send_receive
(unit, msg_class,
msg_subclass,
sess_id, profile_id,
msg_reply_subclass,
&reply_len);
if(BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INTERNAL;
}
/* Detach the profile from endpoint */
h_data_p->pm_profile_attached = _BCM_OAM_INVALID_INDEX;
_BCM_OAM_UNLOCK(oc);
return BCM_E_NONE;
}
int bcm_kt2_oam_pm_profile_traverse(int unit,
bcm_oam_pm_profile_traverse_cb cb, void *user_data)
{
int rv = BCM_E_NONE; /* Operation return status. */
bcm_oam_pm_profile_info_t *profile_info = NULL;
_bcm_oam_pm_profile_control_t *pmc = NULL;
int profile_id;
/* Validate input parameter. */
if (NULL == cb) {
return (BCM_E_PARAM);
}
pmc = kt2_pm_profile_control[unit];
if (!pmc) {
return BCM_E_INIT;
}
for (profile_id = 0; profile_id < _BCM_OAM_MAX_PM_PROFILES; profile_id++) {
/* Check profile id is in use or not */
if (!(_BCM_KT2_PM_CTRL_PROFILE_IN_USE(pmc, profile_id))) {
/* Skip that profile id */
continue;
}
profile_info = _BCM_KT2_PM_CTRL_PROFILE_PTR(pmc, profile_id);
rv = cb(unit, profile_info, user_data);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: EP=%d callback failed - %s.\n"),
unit, profile_id, bcm_errmsg(rv)));
return (rv);
}
}
return (rv);
}
void _bcm_kt2_oam_copy_stats_msg_to_stats_ptr(shr_oam_pm_msg_ctrl_pm_stats_get_t *stats_msg,
bcm_oam_pm_stats_t *stats_ptr,
uint8 pm_stat_extra_elem_feature)
{
stats_ptr->far_loss_min = stats_msg->far_loss_min;
stats_ptr->far_tx_min = stats_msg->far_tx_min;
stats_ptr->far_loss_max = stats_msg->far_loss_max;
stats_ptr->far_tx_max = stats_msg->far_tx_max;
stats_ptr->far_loss = stats_msg->far_loss;
stats_ptr->near_loss_min = stats_msg->near_loss_min;
stats_ptr->near_tx_min = stats_msg->near_tx_min;
stats_ptr->near_loss_max = stats_msg->near_loss_max;
stats_ptr->near_tx_max = stats_msg->near_tx_max;
stats_ptr->near_loss = stats_msg->near_loss;
stats_ptr->lm_tx_count = stats_msg->lm_tx_count;
stats_ptr->DM_min = stats_msg->DM_min;
stats_ptr->DM_max = stats_msg->DM_max;
if(stats_msg->dm_rx_count) {
stats_ptr->DM_avg = stats_msg->DM_avg/stats_msg->dm_rx_count;
}
stats_ptr->dm_tx_count = stats_msg->dm_tx_count;
stats_ptr->profile_id = stats_msg->profile_id;
sal_memcpy(&(stats_ptr->bin_counters), &(stats_msg->bin_counters), sizeof(uint32) * (SHR_OAM_PM_MAX_PM_BIN_EDGES + 1));
if(pm_stat_extra_elem_feature) {
stats_ptr->lm_rx_count = stats_msg->lm_rx_count;
stats_ptr->dm_rx_count = stats_msg->dm_rx_count;
stats_ptr->far_total_tx_pkt_count = stats_msg->far_total_tx_pkt_count;
stats_ptr->near_total_tx_pkt_count = stats_msg->near_total_tx_pkt_count;
}
if (stats_msg->flags & SHR_OAM_PM_STATS_FLAG_COUNTER_ROLLOVER) {
stats_ptr->flags |= BCM_OAM_PM_STATS_PROCESSED_COUNTER_ROLLOVER;
}
}
int bcm_kt2_oam_pm_stats_get(int unit,
bcm_oam_endpoint_t endpoint_id, bcm_oam_pm_stats_t *stats_ptr)
{
_bcm_oam_pm_profile_control_t *pmc = NULL;
_bcm_oam_control_t *oc = NULL;
_bcm_oam_hash_data_t *h_data_p = NULL;
shr_oam_pm_msg_ctrl_pm_stats_get_t stats_msg;
int rv = BCM_E_NONE;
uint16 sess_id = 0;
uint8 msg_class = 0;
uint16 reply_len = 0;
uint8 msg_subclass = 0;
uint8 msg_reply_subclass = 0;
uint8 *dma_buffer = NULL;
uint8 pm_stat_extra_elem_feature = 0;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_EP_INDEX_VALIDATE(endpoint_id);
_BCM_OAM_LOCK(oc);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->mep_pool, endpoint_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Endpoint EP=%d %s.\n"),
unit, endpoint_id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
h_data_p = &oc->oam_hash_data[endpoint_id];
if (!BHH_EP_TYPE(h_data_p)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: EP=%d is not BHH endpoint\n"),
unit, endpoint_id));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_UNAVAIL);
}
if (0 == h_data_p->in_use) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: h_data_p not in use EP=%d\n"),
unit, endpoint_id));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_INTERNAL);
}
pmc = kt2_pm_profile_control[unit];
if (!pmc) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
msg_class = MOS_MSG_CLASS_BHH;
msg_subclass = MOS_MSG_SUBCLASS_BHH_PM_STATS_GET;
msg_reply_subclass = MOS_MSG_SUBCLASS_BHH_PM_STATS_GET_REPLY;
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(endpoint_id);
pm_stat_extra_elem_feature = BHH_UC_FEATURE_CHECK(BHH_PM_STAT_EXTRA_ELEM);
dma_buffer = oc->pm_bhh_dma_buffer;
rv = _bcm_kt2_oam_pm_msg_send_receive
(unit, msg_class,
msg_subclass,
/* Pass the buffer pointer in data and sess id in length fields.*/
sess_id, soc_cm_l2p(unit, dma_buffer),
msg_reply_subclass,
&reply_len);
if(BCM_FAILURE(rv) || reply_len == 0) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INTERNAL;
}
shr_oam_pm_msg_ctrl_pm_stats_get_unpack(dma_buffer, &stats_msg, pm_stat_extra_elem_feature);
_bcm_kt2_oam_copy_stats_msg_to_stats_ptr(&stats_msg, stats_ptr, pm_stat_extra_elem_feature);
_BCM_OAM_UNLOCK(oc);
return BCM_E_NONE;
}
int
bcm_kt2_oam_pm_profile_attach_get(
int unit,
bcm_oam_endpoint_t endpoint_id,
bcm_oam_pm_profile_t *profile_id)
{
_bcm_oam_pm_profile_control_t *pmc = NULL;
_bcm_oam_control_t *oc = NULL;
_bcm_oam_hash_data_t *h_data_p = NULL;
int rv = BCM_E_NONE;
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_EP_INDEX_VALIDATE(endpoint_id);
_BCM_OAM_LOCK(oc);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->mep_pool, endpoint_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Endpoint EP=%d %s.\n"),
unit, endpoint_id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
h_data_p = &oc->oam_hash_data[endpoint_id];
if (0 == h_data_p->in_use) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: h_data_p not in use EP=%d\n"),
unit, endpoint_id));
_BCM_OAM_UNLOCK(oc);
return (BCM_E_INTERNAL);
}
pmc = kt2_pm_profile_control[unit];
if (!pmc) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
/* Get the attached profile */
*profile_id = h_data_p->pm_profile_attached;
if (*profile_id == _BCM_OAM_INVALID_INDEX) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_NOT_FOUND;
}
/* Check profile id is in use or not */
if (!(_BCM_KT2_PM_CTRL_PROFILE_IN_USE(pmc, *profile_id))) {
/* It is not in use. Return error */
/* Ideally this should not happen */
_BCM_OAM_UNLOCK(oc);
return BCM_E_INTERNAL;
}
_BCM_OAM_UNLOCK(oc);
return BCM_E_NONE;
}
/*
* Function:
* bcm_kt2_oam_csf_add
* Purpose:
* Start CSF PDU transmission
* Parameters:
* unit - (IN) Unit number.
* csf_ptr - (INOUT) CSF object
* Returns:
* BCM_E_XXX
* Notes:
*/
int bcm_kt2_oam_csf_add(int unit, bcm_oam_csf_t *csf_p)
{
_bcm_oam_control_t *oc;
_bcm_oam_hash_data_t *h_data_p;
bhh_sdk_msg_ctrl_csf_add_t msg;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
int sess_id = 0;
int rv = BCM_E_NONE;
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (oc->ukernel_not_ready) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
BCM_OAM_BHH_VALIDATE_EP(csf_p->id);
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(csf_p->id);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->bhh_pool, sess_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Endpoint EP=%d %s.\n"),
unit, csf_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
if ((csf_p->period != BCM_OAM_ENDPOINT_CCM_PERIOD_1S) &&
(csf_p->period != BCM_OAM_ENDPOINT_CCM_PERIOD_1M)) {
/* Only 1S and 1M are supported */
_BCM_OAM_UNLOCK(oc);
return BCM_E_PARAM;
}
h_data_p = &oc->oam_hash_data[csf_p->id];
/*
* Only BHH is supported
*/
if (!BHH_EP_TYPE(h_data_p)) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_UNAVAIL;
}
sal_memset(&msg, 0, sizeof(msg));
msg.sess_id = sess_id;
msg.int_pri = csf_p->int_pri;
msg.pkt_pri = csf_p->pkt_pri;
switch(csf_p->type) {
case BCM_OAM_CSF_LOS:
msg.type = SHR_CSF_TYPE_LOS;
break;
case BCM_OAM_CSF_FDI:
msg.type = SHR_CSF_TYPE_FDI;
break;
case BCM_OAM_CSF_RDI:
msg.type = SHR_CSF_TYPE_RDI;
break;
case BCM_OAM_CSF_DCI:
msg.type = SHR_CSF_TYPE_DCI;
break;
default:
_BCM_OAM_UNLOCK(oc);
return BCM_E_PARAM;
}
/*
* Set period
*/
msg.period = csf_p->period;
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = bhh_sdk_msg_ctrl_csf_add_pack(buffer, &msg);
buffer_len = buffer_ptr - buffer;
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_CSF_ADD,
buffer_len, 0,
MOS_MSG_SUBCLASS_BHH_CSF_ADD_REPLY,
&reply_len);
if(BCM_SUCCESS(rv) && (reply_len != 0)) {
rv = BCM_E_INTERNAL;
}
_BCM_OAM_UNLOCK(oc);
return rv;
}
/*
* Function:
* bcm_kt2_oam_csf_get
* Purpose:
* Get CSF info
* Parameters:
* unit - (IN) Unit number.
* csf_ptr - (OUT) CSF object
* Returns:
* BCM_E_XXX
* Notes:
*/
int bcm_kt2_oam_csf_get(int unit, bcm_oam_csf_t *csf_p)
{
_bcm_oam_control_t *oc;
bhh_sdk_msg_ctrl_csf_get_t msg;
_bcm_oam_hash_data_t *h_data_p;
uint8 *buffer, *buffer_ptr;
uint16 buffer_len, reply_len;
int sess_id = 0;
int rv = BCM_E_NONE;
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (oc->ukernel_not_ready) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
BCM_OAM_BHH_VALIDATE_EP(csf_p->id);
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(csf_p->id);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->bhh_pool, sess_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Endpoint EP=%d %s.\n"),
unit, csf_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
h_data_p = &oc->oam_hash_data[csf_p->id];
if (!BHH_EP_TYPE(h_data_p)) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_UNAVAIL;
}
sal_memset(&msg, 0, sizeof(msg));
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_CSF_GET,
sess_id, 0,
MOS_MSG_SUBCLASS_BHH_CSF_GET_REPLY,
&reply_len);
if(BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Endpoint EP=%d %s.\n"),
unit, csf_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/* Pack control message data into DMA buffer */
buffer = oc->dma_buffer;
buffer_ptr = bhh_sdk_msg_ctrl_csf_get_unpack(buffer, &msg);
buffer_len = buffer_ptr - buffer;
if (reply_len != buffer_len) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INTERNAL;
}
switch(msg.type) {
case SHR_CSF_TYPE_LOS:
csf_p->type = BCM_OAM_CSF_LOS;
break;
case SHR_CSF_TYPE_FDI:
csf_p->type = BCM_OAM_CSF_FDI;
break;
case SHR_CSF_TYPE_RDI:
csf_p->type = BCM_OAM_CSF_RDI;
break;
case SHR_CSF_TYPE_DCI:
csf_p->type = BCM_OAM_CSF_DCI;
break;
default:
_BCM_OAM_UNLOCK(oc);
return BCM_E_INTERNAL;
}
csf_p->period = msg.period;
csf_p->flags = msg.flags;
csf_p->pkt_pri = msg.pkt_pri;
csf_p->int_pri = msg.int_pri;
_BCM_OAM_UNLOCK(oc);
return BCM_E_NONE;
}
/*
* Function:
* bcm_kt2_oam_csf_delete
* Purpose:
* Stop CSF PDU transmission
* Parameters:
* unit - (IN Unit number.
* csf_ptr - (IN) CSF object
* Returns:
* BCM_E_XXX
* Notes:
*/
int bcm_kt2_oam_csf_delete(int unit, bcm_oam_csf_t *csf_p)
{
_bcm_oam_control_t *oc;
_bcm_oam_hash_data_t *h_data_p;
uint16 reply_len;
int sess_id;
int rv = BCM_E_NONE;
/* Get OAM Control Structure. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if (oc->ukernel_not_ready) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_INIT;
}
BCM_OAM_BHH_VALIDATE_EP(csf_p->id);
sess_id = BCM_OAM_BHH_GET_UKERNEL_EP(csf_p->id);
/* Check endpoint status. */
rv = shr_idxres_list_elem_state(oc->bhh_pool, sess_id);
if ((BCM_E_EXISTS != rv)) {
/* Endpoint not in use. */
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: Endpoint EP=%d %s.\n"),
unit, csf_p->id, bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
h_data_p = &oc->oam_hash_data[csf_p->id];
if (!BHH_EP_TYPE(h_data_p)) {
_BCM_OAM_UNLOCK(oc);
return BCM_E_UNAVAIL;
}
/* Send BHH Session Update message to uC */
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_CSF_DELETE,
sess_id, 0,
MOS_MSG_SUBCLASS_BHH_CSF_DELETE_REPLY,
&reply_len);
_BCM_OAM_UNLOCK(oc);
return (rv);
}
#endif /* defined(INCLUDE_BHH) */
/*
* Function:
* _bcm_kt2_bhh_endpoint_faults_multi_get
* Purpose:
* Function to get faults for multiple BHH endpoints
* in a single call.
* Parameters:
* unit (IN) BCM device number
* flags (IN) Flags is kept unused as of now.
* This is for any future enhancement
* max_endpoints (IN) Number of max endpoint for the protocol
* faults (OUT) Pointer to faults for all endpoints
* endpoint_count (OUT) Pointer to Number of valid endpoints with faults,
* filled by get API.
*
* Returns:
* BCM_E_NONE No error
* BCM_E_XXXX Error
*/
#if defined (INCLUDE_BHH)
int _bcm_kt2_bhh_endpoint_faults_multi_get(
int unit,
uint32 flags,
uint32 max_endpoints,
bcm_oam_endpoint_fault_t *faults,
uint32 *endpoint_count)
{
int rv = BCM_E_NONE;
_bcm_oam_control_t *oc = NULL;
bcm_oam_endpoint_fault_t *faults_temp = faults;
uint8 *buffer;
uint16 reply_len;
uint32 sess_count;
uint32 ep_id;
uint32 ep_count = 0;
uint32 faults_bitmap;
_bcm_oam_hash_data_t *h_data_p; /* Pointer to endpoint hash data. */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
if(max_endpoints != oc->bhh_endpoint_count) {
rv = BCM_E_PARAM;
_BCM_OAM_UNLOCK(oc);
return (rv);
}
sal_memset(faults, 0, ((sizeof(bcm_oam_endpoint_fault_t))*max_endpoints));
rv = _bcm_kt2_oam_bhh_msg_send_receive(unit,
MOS_MSG_SUBCLASS_BHH_FAULTS_MULTI_GET,
0, 0,
MOS_MSG_SUBCLASS_BHH_FAULTS_MULTI_GET_REPLY,
&reply_len);
if (BCM_FAILURE(rv)) {
LOG_ERROR(BSL_LS_BCM_OAM,
(BSL_META_U(unit,
"OAM(unit %d) Error: BHH uKernel msg failed for"
"faults multi get %s.\n"), unit,
bcm_errmsg(rv)));
_BCM_OAM_UNLOCK(oc);
return (rv);
}
if(reply_len != (oc->bhh_endpoint_count * sizeof(uint32))) {
rv = BCM_E_INTERNAL;
_BCM_OAM_UNLOCK(oc);
return (rv);
}
/* Unpack control message data into DMA buffer */
buffer = oc->dma_buffer;
for(sess_count = 0;sess_count < oc->bhh_endpoint_count;sess_count++)
{
ep_id = BCM_OAM_BHH_GET_SDK_EP(sess_count);
/* Validate endpoint index value. */
BCM_OAM_BHH_VALIDATE_EP(ep_id);
h_data_p = &oc->oam_hash_data[ep_id];
/* If endpoint not in use, ignore and continue for next session_id).
* If session is running in hostCPU, skip and Go to ep_id.*/
if(!(h_data_p->in_use) || (h_data_p->flags2 & BCM_OAM_ENDPOINT_FLAGS2_REDIRECT_TO_CPU)) {
continue;
}
_SHR_UNPACK_U32(buffer, faults_bitmap);
faults_temp->endpoint_id = BCM_OAM_BHH_GET_SDK_EP(sess_count);
if(faults_bitmap & BHH_BTE_EVENT_CCM_TIMEOUT){
faults_temp->faults |= BCM_OAM_BHH_FAULT_CCM_TIMEOUT;
}
if(faults_bitmap & BHH_BTE_EVENT_CCM_RDI){
faults_temp->faults |= BCM_OAM_BHH_FAULT_CCM_RDI;
}
if(faults_bitmap & BHH_BTE_EVENT_CCM_UNKNOWN_MEG_LEVEL){
faults_temp->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_MEG_LEVEL;
}
if(faults_bitmap & BHH_BTE_EVENT_CCM_UNKNOWN_MEG_ID){
faults_temp->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_MEG_ID;
}
if(faults_bitmap & BHH_BTE_EVENT_CCM_UNKNOWN_MEP_ID){
faults_temp->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_MEP_ID;
}
if(faults_bitmap & BHH_BTE_EVENT_CCM_UNKNOWN_PERIOD){
faults_temp->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_PERIOD;
}
if(faults_bitmap & BHH_BTE_EVENT_CCM_UNKNOWN_PRIORITY){
faults_temp->faults |=
BCM_OAM_BHH_FAULT_CCM_UNKNOWN_PRIORITY;
}
ep_count++;
faults_temp++;
}
*endpoint_count = ep_count;
_BCM_OAM_UNLOCK(oc);
return (rv);
}
#endif
/* Function:
* bcm_kt2_oam_endpoint_faults_multi_get
* Purpose:
* Function to get faults for multiple endpoints per protocl
* in a single call.
* Parameters:
* unit (IN) BCM device number
* flags (IN) Flags is kept unused as of now.
* This is for any future enhancement
* endpoint_protocol (IN) Protocol type of the endpoints to retrieve faults
* max_endpoints (IN) Number of max endpoint for the protocol
* faults (OUT) Pointer to faults for all endpoints
* endpoint_count (OUT) Pointer to Number of valid endpoints with faults,
* by get API.
*
* Returns:
* BCM_E_NONE No error
* BCM_E_XXXX Error
*/
int bcm_kt2_oam_endpoint_faults_multi_get(
int unit,
uint32 flags,
bcm_oam_protocol_type_t endpoint_protocol,
uint32 max_endpoints,
bcm_oam_endpoint_fault_t *faults,
uint32 *endpoint_count)
{
int rv = BCM_E_NONE;
if(!faults) {
rv = BCM_E_PARAM;
return (rv);
}
switch(endpoint_protocol) {
#if defined(INCLUDE_BHH)
case bcmOamProtocolBhh:
*endpoint_count = 0;
rv = _bcm_kt2_bhh_endpoint_faults_multi_get(
unit,
flags,
max_endpoints,
faults,
endpoint_count);
break;
#endif
default:
rv = BCM_E_UNAVAIL;
break;
}
return rv;
}
/* This function assumes passed endpoint is a PORT MEP configured over trunk. */
int _bcm_kt2_oam_ep_trunk_ports_add_del_internal(int unit, _bcm_oam_hash_data_t *h_data_p,
int max_ports, bcm_gport_t *port_arr, uint8 add)
{
_bcm_oam_hash_data_t old_hash_data;
bcm_oam_endpoint_info_t ep_info;
uint32 sglp = 0, dglp = 0;
bcm_port_t src_pp_port = 0, dst_pp_port = 0, pp_port = 0;
uint32 svp = 0;
int vp_valid = 0;
bcm_trunk_t trunk_id = BCM_TRUNK_INVALID;
bcm_gport_t tx_gport;
int i, module_id = 0;
uint8 active_mdl = 0;
bcm_oam_endpoint_info_t_init(&ep_info);
/* **********************************************
* SW configurations
*/
/* Keep a copy of old hash data for comparisons.
*/
sal_memcpy(&old_hash_data, h_data_p, sizeof(_bcm_oam_hash_data_t));
/* Call endpoint get to get the endpoint info */
BCM_IF_ERROR_RETURN(bcm_kt2_oam_endpoint_get(unit, h_data_p->ep_id, &ep_info));
/* First call endpoint gport resolve to correctly update new
* sglp, dglp, src_pp_port, dst_pp_port variables.
*/
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_endpoint_gport_resolve(unit, &ep_info, &sglp, &dglp,
&src_pp_port, &dst_pp_port, &svp,
&trunk_id, &vp_valid, &tx_gport));
/* Update the h_data_p. */
h_data_p->sglp = sglp;
h_data_p->dglp = dglp;
h_data_p->src_pp_port = src_pp_port;
h_data_p->dst_pp_port = dst_pp_port;
h_data_p->resolved_trunk_gport = tx_gport;
/* ************************************************
* HW configurations
*/
for (i = 0; i < max_ports; i++) {
BCM_IF_ERROR_RETURN
(_bcm_kt2_pp_port_to_modport_get(unit, port_arr[i],
&module_id, &pp_port));
if (add) {
/* _bcm_kt2_oam_trunk_port_mdl_config needs to be called so that
* new trunk members are programmed with MDL_BITMAP correctly.
*/
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_stm_table_update(unit, module_id, pp_port, h_data_p));
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_port_mdl_update(unit, pp_port, 0, h_data_p, &active_mdl));
} else {
/* Delete the old ports' MDL BITMAP using _bcm_kt2_oam_port_mdl_update. And
* clear STM table if this is the last endpoint on this port.
*/
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_port_mdl_update(unit, pp_port, 1, h_data_p, &active_mdl));
if (active_mdl == 0) {
/* Setting index = -1 so that rx indexes are not freed up, since there are
* other ports on the trunk on which this endpoint is placed.
*/
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_stm_table_clear(unit, module_id, -1,
pp_port,
h_data_p));
}
}
}
if (h_data_p->dst_pp_port != old_hash_data.dst_pp_port) {
/* If dst_pp_port has changed, delete the old EGR_PORT MDL_PASSIVE BITMAP and
* program the new port's EGR_PORT MDL_PASSIVE_BITMAP.
*/
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_port_mdl_passive_update(unit,1, &old_hash_data, 0));
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_port_mdl_passive_update(unit, 0 /* not reset */,
h_data_p, active_mdl));
if (ep_info.ccm_period != BCM_OAM_ENDPOINT_CCM_PERIOD_DISABLED) {
/* If dst_pp_port has changed, modify the LMEP table to reflect new
* dst pp port and corresponding queue number.
*/
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_lmep_down_modify_new_dest_port(unit, h_data_p));
}
}
return BCM_E_NONE;
}
int _bcm_kt2_oam_trunk_ports_add_del_internal(int unit, bcm_gport_t trunk_gport,
int max_ports, bcm_gport_t *port_arr, uint8 add)
{
int rv = BCM_E_NONE;
bcm_trunk_t trunk_id = BCM_TRUNK_INVALID;
_bcm_oam_control_t *oc = NULL;
_bcm_oam_hash_data_t *h_data_p = NULL;
int i;
/* Validate port array and max ports inputs */
if (port_arr == NULL || max_ports == 0) {
return BCM_E_PARAM;
}
/* Check the gport is TRUNK */
if (!(BCM_GPORT_IS_TRUNK(trunk_gport))) {
return BCM_E_PARAM;
}
/* Get Trunk ID value from Gport */
trunk_id = BCM_GPORT_TRUNK_GET(trunk_gport);
if (trunk_id == BCM_TRUNK_INVALID) {
return BCM_E_PARAM;
}
/* Get OAM control structure */
BCM_IF_ERROR_RETURN(_bcm_kt2_oam_control_get(unit, &oc));
_BCM_OAM_LOCK(oc);
for (i = 0; i < oc->ep_count; i++) {
h_data_p = &oc->oam_hash_data[i];
/* This API is supported only for PORT MEPs on Trunks */
if ((ETH_TYPE(h_data_p->type)) &&
(h_data_p->in_use) &&
(h_data_p->oam_domain == _BCM_OAM_DOMAIN_PORT) &&
(h_data_p->gport == trunk_gport) &&
(h_data_p->trunk_id == trunk_id)) {
rv = _bcm_kt2_oam_ep_trunk_ports_add_del_internal(unit, h_data_p, max_ports, port_arr, add);
if (BCM_FAILURE(rv)) {
_BCM_OAM_UNLOCK(oc);
return rv;
}
}
}
_BCM_OAM_UNLOCK(oc);
return rv;
}
int bcm_kt2_oam_trunk_ports_add(int unit, bcm_gport_t trunk_gport,
int max_ports, bcm_gport_t *port_arr)
{
return _bcm_kt2_oam_trunk_ports_add_del_internal(unit, trunk_gport, max_ports, port_arr, 1);
}
int bcm_kt2_oam_trunk_ports_delete(int unit, bcm_gport_t trunk_gport,
int max_ports, bcm_gport_t *port_arr)
{
return _bcm_kt2_oam_trunk_ports_add_del_internal(unit, trunk_gport, max_ports, port_arr, 0);
}
#endif /* BCM_KATANA2_SUPPORT */
| 37.513456 | 134 | 0.543046 | [
"object"
] |
d4c76af7df2abac260f336a2a7c00e7ce30566ed | 5,045 | h | C | os/sdl/gl_config.h | tj90241/cen64 | 73ad38ca3fe1fcf46362158ac1c088cda52f0cf0 | [
"BSD-3-Clause"
] | 329 | 2015-01-02T16:38:20.000Z | 2019-01-05T15:00:28.000Z | os/sdl/gl_config.h | tj90241/cen64 | 73ad38ca3fe1fcf46362158ac1c088cda52f0cf0 | [
"BSD-3-Clause"
] | 79 | 2015-06-12T07:21:05.000Z | 2018-12-16T18:58:12.000Z | os/sdl/gl_config.h | tj90241/cen64 | 73ad38ca3fe1fcf46362158ac1c088cda52f0cf0 | [
"BSD-3-Clause"
] | 62 | 2015-01-08T00:03:40.000Z | 2019-01-02T17:43:07.000Z | //
// os/x11/gl_config.h: X11/OpenGL framebuffer configuration.
//
// CEN64: Cycle-Accurate Nintendo 64 Emulator.
// Copyright (C) 2015, Tyler J. Stachecki.
//
// This file is subject to the terms and conditions defined in
// 'LICENSE', which is part of this source code package.
//
#ifndef CEN64_OS_SDL_GL_CONFIG
#define CEN64_OS_SDL_GL_CONFIG
#include "gl_common.h"
#include "gl_display.h"
#include "gl_hints.h"
#include "gl_screen.h"
#include <stddef.h>
#include <SDL.h>
#define CEN64_GL_CONFIG_BAD (NULL)
typedef struct {
}cen64_gl_config;
//
// Creates a matching cen64_gl_config from a cen64_gl_hints struct.
//
// On error, CEN64_GL_CONFIG_BAD is returned. On success, something
// other than CEN64_GL_CONFIG_BAD is returned, and matching is set
// to indicate the number of matches present in the returned array.
//
cen64_gl_config *cen64_gl_config_create(cen64_gl_display display,
cen64_gl_screen screen, const cen64_gl_hints *hints, int *matching);
// Releases resources allocated by cen64_gl_config_create.
static inline void cen64_gl_config_destroy(cen64_gl_config *config)
{
}
//
// Fetches an attribute from the cen64_gl_config object.
//
// Used by the cen64_gl_config_get_* accessors.
//
int cen64_gl_config_fetch_attribute(cen64_gl_display display,
cen64_gl_config *config, int what);
// Wrappers for querying for features/types.
static inline enum cen64_gl_context_type cen64_gl_config_get_context_type(
cen64_gl_display display, cen64_gl_config *config)
{
return CEN64_GL_CONTEXT_TYPE_RGBA;
}
static inline enum cen64_gl_drawable_type cen64_gl_config_get_drawable_type(
cen64_gl_display display, cen64_gl_config *config)
{
return CEN64_GL_DRAWABLE_TYPE_WINDOW;
}
static inline enum cen64_gl_layer_type cen64_gl_config_get_layer_type(
cen64_gl_display display, cen64_gl_config *config)
{
return CEN64_GL_LAYER_TYPE_DEFAULT;
}
static inline int cen64_gl_config_is_double_buffered(
cen64_gl_display display, cen64_gl_config *config)
{
return cen64_gl_config_fetch_attribute(display, config, SDL_GL_DOUBLEBUFFER) == 1;
}
static inline int cen64_gl_config_is_renderable(
cen64_gl_display display, cen64_gl_config *config)
{
return true;
}
static inline int cen64_gl_config_is_stereoscopic(
cen64_gl_display display, cen64_gl_config *config)
{
return cen64_gl_config_fetch_attribute(display, config, SDL_GL_STEREO) == 1;
}
// Wrappers for querying for color depths.
static inline int cen64_gl_config_get_color_depth(
cen64_gl_display display, cen64_gl_config *config)
{
return cen64_gl_config_fetch_attribute(display, config, SDL_GL_BUFFER_SIZE);
}
static inline int cen64_gl_config_get_red_color_depth(
cen64_gl_display display, cen64_gl_config *config)
{
return cen64_gl_config_fetch_attribute(display, config, SDL_GL_RED_SIZE);
}
static inline int cen64_gl_config_get_green_color_depth(
cen64_gl_display display, cen64_gl_config *config)
{
return cen64_gl_config_fetch_attribute(display, config, SDL_GL_GREEN_SIZE);
}
static inline int cen64_gl_config_get_blue_color_depth(
cen64_gl_display display, cen64_gl_config *config)
{
return cen64_gl_config_fetch_attribute(display, config, SDL_GL_BLUE_SIZE);
}
static inline int cen64_gl_config_get_alpha_color_depth(
cen64_gl_display display, cen64_gl_config *config)
{
return cen64_gl_config_fetch_attribute(display, config, SDL_GL_ALPHA_SIZE);
}
// Wrappers for querying for buffer sizes, counts.
static inline int cen64_gl_config_get_depth_buffer_count(
cen64_gl_display display, cen64_gl_config *config)
{
return cen64_gl_config_fetch_attribute(display, config, SDL_GL_DEPTH_SIZE);
}
static inline int cen64_gl_config_get_num_auxiliary_buffers(
cen64_gl_display display, cen64_gl_config *config)
{
return 0;
//cen64_gl_config_fetch_attribute(display, config, SDL_GL_AUX_BUFFERS);
}
static inline int cen64_gl_config_get_stencil_buffer_size(
cen64_gl_display display, cen64_gl_config *config)
{
return cen64_gl_config_fetch_attribute(display, config, SDL_GL_STENCIL_SIZE);
}
// Wrappers for querying for accumulation buffer bits.
static inline int cen64_gl_config_get_red_accum_buffer_bits(
cen64_gl_display display, cen64_gl_config *config)
{
return cen64_gl_config_fetch_attribute(display, config, SDL_GL_ACCUM_RED_SIZE);
}
static inline int cen64_gl_config_get_blue_accum_buffer_bits(
cen64_gl_display display, cen64_gl_config *config)
{
return cen64_gl_config_fetch_attribute(display, config, SDL_GL_ACCUM_BLUE_SIZE);
}
static inline int cen64_gl_config_get_green_accum_buffer_bits(
cen64_gl_display display, cen64_gl_config *config)
{
return cen64_gl_config_fetch_attribute(display, config, SDL_GL_ACCUM_GREEN_SIZE);
}
static inline int cen64_gl_config_get_alpha_accum_buffer_bits(
cen64_gl_display display, cen64_gl_config *config)
{
return cen64_gl_config_fetch_attribute(display, config, SDL_GL_ACCUM_ALPHA_SIZE);
}
#endif
| 31.335404 | 108 | 0.798216 | [
"object"
] |
d4d74d55eacc9ffa01189ea8137fa93364c3877d | 3,786 | h | C | usr/src/uts/common/sys/port.h | AsahiOS/gate | 283d47da4e17a5871d9d575e7ffb81e8f6c52e51 | [
"MIT"
] | null | null | null | usr/src/uts/common/sys/port.h | AsahiOS/gate | 283d47da4e17a5871d9d575e7ffb81e8f6c52e51 | [
"MIT"
] | null | null | null | usr/src/uts/common/sys/port.h | AsahiOS/gate | 283d47da4e17a5871d9d575e7ffb81e8f6c52e51 | [
"MIT"
] | 1 | 2020-12-30T00:04:16.000Z | 2020-12-30T00:04:16.000Z | /*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
*/
#ifndef _SYS_PORT_H
#define _SYS_PORT_H
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/types.h>
/* port sources */
#define PORT_SOURCE_AIO 1
#define PORT_SOURCE_TIMER 2
#define PORT_SOURCE_USER 3
#define PORT_SOURCE_FD 4
#define PORT_SOURCE_ALERT 5
#define PORT_SOURCE_MQ 6
#define PORT_SOURCE_FILE 7
typedef struct port_event {
int portev_events; /* event data is source specific */
ushort_t portev_source; /* event source */
ushort_t portev_pad; /* port internal use */
uintptr_t portev_object; /* source specific object */
void *portev_user; /* user cookie */
} port_event_t;
typedef struct port_notify {
int portnfy_port; /* bind request(s) to port */
void *portnfy_user; /* user defined */
} port_notify_t;
typedef struct file_obj {
timestruc_t fo_atime; /* Access time from stat(2) */
timestruc_t fo_mtime; /* Modification time from stat(2) */
timestruc_t fo_ctime; /* Change time from stat(2) */
uintptr_t fo_pad[3]; /* For future expansion */
char *fo_name; /* Null terminated file name */
} file_obj_t;
#if defined(_SYSCALL32)
typedef struct file_obj32 {
timestruc32_t fo_atime; /* Access time got from stat(2) */
timestruc32_t fo_mtime; /* Modification time from stat(2) */
timestruc32_t fo_ctime; /* Change time from stat(2) */
caddr32_t fo_pad[3]; /* For future expansion */
caddr32_t fo_name; /* Null terminated file name */
} file_obj32_t;
typedef struct port_event32 {
int portev_events; /* events detected */
ushort_t portev_source; /* user, timer, aio, etc */
ushort_t portev_pad; /* reserved */
caddr32_t portev_object; /* fd, timerid, ... */
caddr32_t portev_user; /* user cookie */
} port_event32_t;
typedef struct port_notify32 {
int portnfy_port; /* bind request(s) to port */
caddr32_t portnfy_user; /* user defined */
} port_notify32_t;
#endif /* _SYSCALL32 */
/* port_alert() flags */
#define PORT_ALERT_SET 0x01
#define PORT_ALERT_UPDATE 0x02
#define PORT_ALERT_INVALID (PORT_ALERT_SET | PORT_ALERT_UPDATE)
/*
* PORT_SOURCE_FILE - events
*/
/*
* User watchable file events
*/
#define FILE_ACCESS 0x00000001
#define FILE_MODIFIED 0x00000002
#define FILE_ATTRIB 0x00000004
#define FILE_TRUNC 0x00100000
#define FILE_NOFOLLOW 0x10000000
/*
* exception file events
*/
/*
* The watched file..
*/
#define FILE_DELETE 0x00000010
#define FILE_RENAME_TO 0x00000020
#define FILE_RENAME_FROM 0x00000040
/*
* The filesystem on which the watched file resides got
* unmounted.
*/
#define UNMOUNTED 0x20000000
/*
* Some other file/filesystem got mounted over the
* watched file/directory.
*/
#define MOUNTEDOVER 0x40000000
/*
* Helper type
*/
#define FILE_EXCEPTION (UNMOUNTED|FILE_DELETE|FILE_RENAME_TO \
|FILE_RENAME_FROM|MOUNTEDOVER)
#ifdef __cplusplus
}
#endif
#endif /* _SYS_PORT_H */
| 25.931507 | 70 | 0.735869 | [
"object"
] |
d4df7a14fbb6c1243547037383952475fcf7aaa5 | 3,254 | h | C | include/tvm/function/abstract/Function.h | mcx/tvm | fab0eb3740be7e9156ca1018f7448ec07ac2a125 | [
"BSD-3-Clause"
] | 19 | 2018-12-20T06:44:53.000Z | 2022-02-01T20:16:45.000Z | include/tvm/function/abstract/Function.h | mcx/tvm | fab0eb3740be7e9156ca1018f7448ec07ac2a125 | [
"BSD-3-Clause"
] | 20 | 2019-12-05T13:41:35.000Z | 2022-03-14T16:44:54.000Z | include/tvm/function/abstract/Function.h | mcx/tvm | fab0eb3740be7e9156ca1018f7448ec07ac2a125 | [
"BSD-3-Clause"
] | 10 | 2019-07-22T09:54:15.000Z | 2022-02-05T02:27:05.000Z | /** Copyright 2017-2020 CNRS-AIST JRL and CNRS-UM LIRMM */
#pragma once
#include <tvm/internal/FirstOrderProvider.h>
#include <tvm/utils/internal/map.h>
#include <Eigen/Core>
#include <map>
namespace tvm
{
namespace function
{
namespace abstract
{
/** Base class defining the classical outputs for a function
*
* \dot
* digraph "update graph" {
* rankdir="LR";
* {
* rank = same; node [shape=hexagon];
* Value; Jacobian; Velocity;
* NormalAcceleration; JDot;
* }
* {
* rank = same; node [style=invis, label=""];
* outValue; outJacobian; outVelocity;
* outNormalAcceleration; outJDot;
* }
* Value -> outValue [label="value()"];
* Jacobian -> outJacobian [label="jacobian(x_i)"];
* Velocity -> outVelocity [label="velocity()"];
* NormalAcceleration -> outNormalAcceleration [label="normalAcceleration()"];
* JDot -> outJDot [label="JDot(x_i)"];
* }
* \enddot
*/
class TVM_DLLAPI Function : public tvm::internal::FirstOrderProvider
{
public:
SET_OUTPUTS(Function, Velocity, NormalAcceleration, JDot)
/** Note: by default, these methods return the cached value.
* However, they are virtual in case the user might want to bypass the cache.
* This would be typically the case if he/she wants to directly return the
* output of another method, e.g. return the jacobian of an other Function.
*/
virtual const Eigen::VectorXd & velocity() const;
virtual const Eigen::VectorXd & normalAcceleration() const;
virtual MatrixConstRef JDot(const Variable & x) const;
protected:
struct slice_jdot
{
using Type = MatrixRef;
using ConstType = MatrixConstRef;
static Type get(Eigen::MatrixXd & M, const Range & r) { return M.middleCols(r.start, r.dim); }
static ConstType get(const Eigen::MatrixXd & M, const Range & r) { return M.middleCols(r.start, r.dim); }
};
/** Constructor for a function with value in \f$ \mathbb{R}^m \f$.
*
* \param m the size of the function/constraint image space, i.e. the row
* size of the jacobians (or equivalently in this case the size of the
* output value).
*/
Function(int m = 0);
/** Constructor for a function with value in a specified space.
*
* \param image Description of the image space
*/
Function(Space image);
/** Resize all cache members corresponding to active output*/
void resizeCache() override;
void resizeVelocityCache();
void resizeNormalAccelerationCache();
void resizeJDotCache();
void addVariable_(VariablePtr v) override;
void removeVariable_(VariablePtr v) override;
// cache
Eigen::VectorXd velocity_;
Eigen::VectorXd normalAcceleration_;
utils::internal::MapWithVariableAsKey<Eigen::MatrixXd, slice_jdot> JDot_;
private:
// we retain the variables' derivatives shared_ptr to ensure the reference is never lost
std::vector<VariablePtr> variablesDot_;
};
inline const Eigen::VectorXd & Function::velocity() const { return velocity_; }
inline const Eigen::VectorXd & Function::normalAcceleration() const { return normalAcceleration_; }
inline MatrixConstRef Function::JDot(const Variable & x) const
{
return JDot_.at(&x, tvm::utils::internal::with_sub{});
}
} // namespace abstract
} // namespace function
} // namespace tvm
| 28.79646 | 109 | 0.700369 | [
"shape",
"vector"
] |
d4e31438c476f9b770da386d8e078091de8cbba4 | 1,790 | h | C | helper/quic-helper.h | jianwel/quic-ns3-module | f9d7b7f339803170d75f119d5dcf6d0d96c0b022 | [
"MIT"
] | 5 | 2018-07-12T06:46:46.000Z | 2021-11-01T00:53:42.000Z | helper/quic-helper.h | sarsanaee/quic-ns3-module | ee505c16ce6d6cdeeccf9d3c2820d49659f4d578 | [
"MIT"
] | null | null | null | helper/quic-helper.h | sarsanaee/quic-ns3-module | ee505c16ce6d6cdeeccf9d3c2820d49659f4d578 | [
"MIT"
] | 2 | 2018-02-27T17:33:38.000Z | 2019-01-17T12:59:06.000Z | #ifndef QUIC_HELPER_H
#define QUIC_HELPER_H
#include "ns3/node-container.h"
#include "ns3/packet.h"
#include "ns3/ptr.h"
#include "ns3/object-factory.h"
namespace ns3 {
class Node;
/**
* \ingroup quic
*
* \brief aggregate QUIC functionality to existing Nodes.
*
* This class aggregates an instance of a QUIC object based on the QUIC factory provided, by default, to each node.
* This class assumes the node already has an InternetStack installed.
*/
class QuicHelper
{
public:
/**
* Create a new QuicHelper.
*/
QuicHelper(void);
/**
* Destroy the QuicHelper
*/
virtual ~QuicHelper(void);
/**
* Aggregate implementations of the ns3::Quic classes onto the provided node.
*
* \param nodeName The name of the node on which to install the stack.
*/
void Install (std::string nodeName) const;
/**
* Aggregate implementations of the ns3::Quic classes onto the provided node.
*
* \param node The node on which to install the stack.
*/
void Install (Ptr<Node> node) const;
/**
* For each node in the input container, aggregate implementations of the
* ns3::Quic classes.
*
* \param c NodeContainer that holds the set of nodes on which to install the
* new stacks.
*/
void Install (NodeContainer c) const;
/**
* Aggregate QUIC stack to all nodes in the simulation
*/
void InstallAll (void) const;
/**
* \brief set the QUIC stack which will not need any other parameter.
*
* This function sets up the quic stack to the given TypeId.
*
* \param tid the type id, typically it is set to "ns3::QuicL4Protocol"
*/
void SetQuic (std::string tid);
private:
/**
* \brief QUIC objects factory
*/
ObjectFactory m_quicFactory;
};
} // namespace ns3
#endif /* QUIC_HELPER_H */
| 22.375 | 115 | 0.677654 | [
"object"
] |
d4eaa43ae30d239b808fb6844ee63d5105c9b893 | 16,471 | c | C | source/minsk/CodeAnalysis/Binding/Binder.c | Phytolizer/gc-c-minsk | b94f24b2c22143fd69537bbd19edd3b7d48c2949 | [
"MIT"
] | null | null | null | source/minsk/CodeAnalysis/Binding/Binder.c | Phytolizer/gc-c-minsk | b94f24b2c22143fd69537bbd19edd3b7d48c2949 | [
"MIT"
] | null | null | null | source/minsk/CodeAnalysis/Binding/Binder.c | Phytolizer/gc-c-minsk | b94f24b2c22143fd69537bbd19edd3b7d48c2949 | [
"MIT"
] | null | null | null | #include "minsk-private/CodeAnalysis/Binding/Binder.h"
#include <assert.h>
#include <stdio.h>
#include <IncludeMe.h>
#include <common/Object.h>
#include <minsk-private/CodeAnalysis/Binding/BoundAssignmentExpression.h>
#include <minsk-private/CodeAnalysis/Binding/BoundBinaryExpression.h>
#include <minsk-private/CodeAnalysis/Binding/BoundBinaryOperator.h>
#include <minsk-private/CodeAnalysis/Binding/BoundBlockStatement.h>
#include <minsk-private/CodeAnalysis/Binding/BoundExpression.h>
#include <minsk-private/CodeAnalysis/Binding/BoundExpressionStatement.h>
#include <minsk-private/CodeAnalysis/Binding/BoundForStatement.h>
#include <minsk-private/CodeAnalysis/Binding/BoundGlobalScope.h>
#include <minsk-private/CodeAnalysis/Binding/BoundIfStatement.h>
#include <minsk-private/CodeAnalysis/Binding/BoundLiteralExpression.h>
#include <minsk-private/CodeAnalysis/Binding/BoundScope.h>
#include <minsk-private/CodeAnalysis/Binding/BoundStatement.h>
#include <minsk-private/CodeAnalysis/Binding/BoundUnaryExpression.h>
#include <minsk-private/CodeAnalysis/Binding/BoundUnaryOperator.h>
#include <minsk-private/CodeAnalysis/Binding/BoundVariableDeclaration.h>
#include <minsk-private/CodeAnalysis/Binding/BoundVariableExpression.h>
#include <minsk-private/CodeAnalysis/Binding/BoundWhileStatement.h>
#include <minsk/CodeAnalysis/DiagnosticBag.h>
#include <minsk/CodeAnalysis/Syntax/AssignmentExpressionSyntax.h>
#include <minsk/CodeAnalysis/Syntax/BinaryExpressionSyntax.h>
#include <minsk/CodeAnalysis/Syntax/BlockStatementSyntax.h>
#include <minsk/CodeAnalysis/Syntax/ExpressionStatementSyntax.h>
#include <minsk/CodeAnalysis/Syntax/ForStatementSyntax.h>
#include <minsk/CodeAnalysis/Syntax/IfStatementSyntax.h>
#include <minsk/CodeAnalysis/Syntax/LiteralExpressionSyntax.h>
#include <minsk/CodeAnalysis/Syntax/NameExpressionSyntax.h>
#include <minsk/CodeAnalysis/Syntax/ParenthesizedExpressionSyntax.h>
#include <minsk/CodeAnalysis/Syntax/StatementSyntax.h>
#include <minsk/CodeAnalysis/Syntax/SyntaxKind.h>
#include <minsk/CodeAnalysis/Syntax/UnaryExpressionSyntax.h>
#include <minsk/CodeAnalysis/Syntax/VariableDeclarationSyntax.h>
#include <minsk/CodeAnalysis/Syntax/WhileStatementSyntax.h>
#include <minsk/CodeAnalysis/VariableStore.h>
static struct BoundStatement* bind_statement(struct Binder* binder, struct StatementSyntax* syntax);
static struct BoundStatement* bind_block_statement(struct Binder* binder, struct BlockStatementSyntax* syntax);
static struct BoundStatement* bind_expression_statement(
struct Binder* binder, struct ExpressionStatementSyntax* syntax);
static struct BoundStatement* bind_for_statement(struct Binder* binder, struct ForStatementSyntax* syntax);
static struct BoundStatement* bind_if_statement(struct Binder* binder, struct IfStatementSyntax* syntax);
static struct BoundStatement* bind_variable_declaration(
struct Binder* binder, struct VariableDeclarationSyntax* syntax);
static struct BoundStatement* bind_while_statement(struct Binder* binder, struct WhileStatementSyntax* syntax);
static struct BoundExpression* bind_expression(struct Binder* binder, struct ExpressionSyntax* syntax);
static struct BoundExpression* bind_expression_with_type(
struct Binder* binder, struct ExpressionSyntax* syntax, enum ObjectKind target_type);
static struct BoundExpression* bind_literal_expression(struct Binder* binder, struct LiteralExpressionSyntax* syntax);
static struct BoundExpression* bind_binary_expression(struct Binder* binder, struct BinaryExpressionSyntax* syntax);
static struct BoundExpression* bind_parenthesized_expression(
struct Binder* binder, struct ParenthesizedExpressionSyntax* syntax);
static struct BoundExpression* bind_unary_expression(struct Binder* binder, struct UnaryExpressionSyntax* syntax);
static struct BoundExpression* bind_name_expression(struct Binder* binder, struct NameExpressionSyntax* syntax);
static struct BoundExpression* bind_assignment_expression(
struct Binder* binder, struct AssignmentExpressionSyntax* syntax);
static struct BoundScope* create_parent_scopes(struct BoundGlobalScope* previous);
struct BoundGlobalScope* bind_global_scope(struct BoundGlobalScope* previous, struct CompilationUnitSyntax* syntax)
{
struct BoundScope* parent_scope = create_parent_scopes(previous);
struct Binder* binder = binder_new(parent_scope);
struct BoundStatement* statement = bind_statement(binder, syntax->statement);
struct VariableSymbolList* variables = bound_scope_get_declared_variables(binder->scope);
struct DiagnosticList* diagnostics = mc_malloc(sizeof(struct DiagnosticList));
if (previous)
{
for (long i = 0; i < previous->diagnostics->length; ++i)
{
LIST_PUSH(diagnostics, previous->diagnostics->data[i]);
}
}
for (long i = 0; i < binder->diagnostics->diagnostics->length; ++i)
{
LIST_PUSH(diagnostics, binder->diagnostics->diagnostics->data[i]);
}
return bound_global_scope_new(NULL, diagnostics, variables, statement);
}
struct Binder* binder_new(struct BoundScope* parent)
{
struct Binder* binder = mc_malloc(sizeof(struct Binder));
binder->diagnostics = diagnostic_bag_new();
binder->scope = bound_scope_new(parent);
return binder;
}
static struct BoundStatement* bind_statement(struct Binder* binder, struct StatementSyntax* syntax)
{
switch (syntax->kind)
{
case STATEMENT_SYNTAX_KIND_BLOCK_STATEMENT_SYNTAX:
return bind_block_statement(binder, (struct BlockStatementSyntax*)syntax);
case STATEMENT_SYNTAX_KIND_EXPRESSION_STATEMENT_SYNTAX:
return bind_expression_statement(binder, (struct ExpressionStatementSyntax*)syntax);
case STATEMENT_SYNTAX_KIND_FOR_STATEMENT_SYNTAX:
return bind_for_statement(binder, (struct ForStatementSyntax*)syntax);
case STATEMENT_SYNTAX_KIND_IF_STATEMENT_SYNTAX:
return bind_if_statement(binder, (struct IfStatementSyntax*)syntax);
case STATEMENT_SYNTAX_KIND_VARIABLE_DECLARATION_SYNTAX:
return bind_variable_declaration(binder, (struct VariableDeclarationSyntax*)syntax);
case STATEMENT_SYNTAX_KIND_WHILE_STATEMENT_SYNTAX:
return bind_while_statement(binder, (struct WhileStatementSyntax*)syntax);
}
assert(false && "Unexpected statement syntax");
}
static struct BoundStatement* bind_block_statement(struct Binder* binder, struct BlockStatementSyntax* syntax)
{
struct BoundStatementList* statements = mc_malloc(sizeof(struct BoundStatementList));
LIST_INIT(statements);
binder->scope = bound_scope_new(binder->scope);
for (long i = 0; i < syntax->statements->length; ++i)
{
struct StatementSyntax* statement = syntax->statements->data[i];
LIST_PUSH(statements, bind_statement(binder, statement));
}
binder->scope = binder->scope->parent;
return (struct BoundStatement*)bound_block_statement_new(statements);
}
static struct BoundStatement* bind_expression_statement(struct Binder* binder, struct ExpressionStatementSyntax* syntax)
{
struct BoundExpression* expression = bind_expression(binder, syntax->expression);
return (struct BoundStatement*)bound_expression_statement_new(expression);
}
static struct BoundStatement* bind_for_statement(struct Binder* binder, struct ForStatementSyntax* syntax)
{
struct BoundExpression* lower_bound = bind_expression_with_type(binder, syntax->lower_bound, OBJECT_KIND_INTEGER);
struct BoundExpression* upper_bound = bind_expression_with_type(binder, syntax->upper_bound, OBJECT_KIND_INTEGER);
binder->scope = bound_scope_new(binder->scope);
sds name = syntax->identifier_token->text;
struct VariableSymbol* variable = variable_symbol_new(name, true, OBJECT_KIND_INTEGER);
if (!bound_scope_try_declare(binder->scope, variable))
{
diagnostic_bag_report_variable_already_declared(
binder->diagnostics, syntax_token_get_span(syntax->identifier_token), name);
}
struct BoundStatement* body = bind_statement(binder, syntax->body);
binder->scope = binder->scope->parent;
return (struct BoundStatement*)bound_for_statement_new(variable, lower_bound, upper_bound, body);
}
static struct BoundStatement* bind_if_statement(struct Binder* binder, struct IfStatementSyntax* syntax)
{
struct BoundExpression* condition = bind_expression_with_type(binder, syntax->condition, OBJECT_KIND_BOOLEAN);
struct BoundStatement* then_statement = bind_statement(binder, syntax->then_statement);
struct BoundStatement* else_statement =
syntax->else_clause ? bind_statement(binder, syntax->else_clause->else_statement) : NULL;
return (struct BoundStatement*)bound_if_statement_new(condition, then_statement, else_statement);
}
static struct BoundStatement* bind_variable_declaration(struct Binder* binder, struct VariableDeclarationSyntax* syntax)
{
sds name = syntax->identifier_token->text;
bool is_read_only = syntax->keyword_token->kind == SYNTAX_KIND_LET_KEYWORD;
struct BoundExpression* initializer = bind_expression(binder, syntax->initializer);
struct VariableSymbol* variable = variable_symbol_new(name, is_read_only, bound_expression_get_type(initializer));
if (!bound_scope_try_declare(binder->scope, variable))
{
diagnostic_bag_report_variable_already_declared(
binder->diagnostics, syntax_token_get_span(syntax->identifier_token), name);
}
return (struct BoundStatement*)bound_variable_declaration_new(variable, initializer);
}
static struct BoundStatement* bind_while_statement(struct Binder* binder, struct WhileStatementSyntax* syntax)
{
struct BoundExpression* condition = bind_expression_with_type(binder, syntax->condition, OBJECT_KIND_BOOLEAN);
struct BoundStatement* body = bind_statement(binder, syntax->body);
return (struct BoundStatement*)bound_while_statement_new(condition, body);
}
static struct BoundExpression* bind_expression(struct Binder* binder, struct ExpressionSyntax* syntax)
{
switch (syntax->kind)
{
case EXPRESSION_SYNTAX_KIND_LITERAL_EXPRESSION_SYNTAX:
return bind_literal_expression(binder, (struct LiteralExpressionSyntax*)syntax);
case EXPRESSION_SYNTAX_KIND_BINARY_EXPRESSION_SYNTAX:
return bind_binary_expression(binder, (struct BinaryExpressionSyntax*)syntax);
case EXPRESSION_SYNTAX_KIND_PARENTHESIZED_EXPRESSION_SYNTAX:
return bind_parenthesized_expression(binder, (struct ParenthesizedExpressionSyntax*)syntax);
case EXPRESSION_SYNTAX_KIND_UNARY_EXPRESSION_SYNTAX:
return bind_unary_expression(binder, (struct UnaryExpressionSyntax*)syntax);
case EXPRESSION_SYNTAX_KIND_NAME_EXPRESSION_SYNTAX:
return bind_name_expression(binder, (struct NameExpressionSyntax*)syntax);
case EXPRESSION_SYNTAX_KIND_ASSIGNMENT_EXPRESSION_SYNTAX:
return bind_assignment_expression(binder, (struct AssignmentExpressionSyntax*)syntax);
}
assert(false && "Unexpected expression syntax");
}
static struct BoundExpression* bind_expression_with_type(
struct Binder* binder, struct ExpressionSyntax* syntax, enum ObjectKind target_type)
{
struct BoundExpression* result = bind_expression(binder, syntax);
if (bound_expression_get_type(result) != target_type)
{
diagnostic_bag_report_cannot_convert(
binder->diagnostics,
syntax_node_get_span((struct SyntaxNode*)syntax),
bound_expression_get_type(result),
target_type);
}
return result;
}
static struct BoundExpression* bind_literal_expression(struct Binder* binder, struct LiteralExpressionSyntax* syntax)
{
(void)binder;
struct Object* value = syntax->value;
struct Object* actual_value;
if (value->kind == OBJECT_KIND_NULL)
{
actual_value = OBJECT_INTEGER(0);
}
else
{
actual_value = value;
}
return (struct BoundExpression*)bound_literal_expression_new(actual_value);
}
static struct BoundExpression* bind_binary_expression(struct Binder* binder, struct BinaryExpressionSyntax* syntax)
{
struct BoundExpression* bound_left = bind_expression(binder, syntax->left);
struct BoundExpression* bound_right = bind_expression(binder, syntax->right);
struct BoundBinaryOperator* bound_operator = bind_binary_operator(
syntax->operator_token->kind, bound_expression_get_type(bound_left), bound_expression_get_type(bound_right));
if (!bound_operator)
{
diagnostic_bag_report_undefined_binary_operator(
binder->diagnostics,
syntax_token_get_span(syntax->operator_token),
syntax->operator_token->text,
bound_expression_get_type(bound_left),
bound_expression_get_type(bound_right));
return bound_left;
}
return (struct BoundExpression*)bound_binary_expression_new(bound_left, bound_operator, bound_right);
}
static struct BoundExpression* bind_parenthesized_expression(
struct Binder* binder, struct ParenthesizedExpressionSyntax* syntax)
{
return bind_expression(binder, syntax->expression);
}
static struct BoundExpression* bind_unary_expression(struct Binder* binder, struct UnaryExpressionSyntax* syntax)
{
struct BoundExpression* bound_operand = bind_expression(binder, syntax->operand);
struct BoundUnaryOperator* bound_operator =
bind_unary_operator(syntax->operator_token->kind, bound_expression_get_type(bound_operand));
if (!bound_operator)
{
diagnostic_bag_report_undefined_unary_operator(
binder->diagnostics,
syntax_token_get_span(syntax->operator_token),
syntax->operator_token->text,
bound_expression_get_type(bound_operand));
return bound_operand;
}
return (struct BoundExpression*)bound_unary_expression_new(bound_operator, bound_operand);
}
static struct BoundExpression* bind_name_expression(struct Binder* binder, struct NameExpressionSyntax* syntax)
{
sds name = syntax->identifier_token->text;
if (sdslen(name) == 0)
{
return (struct BoundExpression*)bound_literal_expression_new(OBJECT_INTEGER(0));
}
struct VariableSymbol** variable = bound_scope_try_lookup(binder->scope, name);
if (!variable)
{
diagnostic_bag_report_undefined_name(
binder->diagnostics, syntax_token_get_span(syntax->identifier_token), name);
return (struct BoundExpression*)bound_literal_expression_new(OBJECT_INTEGER(0));
}
return (struct BoundExpression*)bound_variable_expression_new(*variable);
}
static struct BoundExpression* bind_assignment_expression(
struct Binder* binder, struct AssignmentExpressionSyntax* syntax)
{
sds name = syntax->identifier_token->text;
struct BoundExpression* bound_expression = bind_expression(binder, syntax->expression);
struct VariableSymbol** pvar = bound_scope_try_lookup(binder->scope, name);
struct VariableSymbol* variable = NULL;
if (!pvar)
{
diagnostic_bag_report_undefined_name(
binder->diagnostics, syntax_token_get_span(syntax->identifier_token), name);
return bound_expression;
}
else
{
variable = *pvar;
}
if (variable->is_read_only)
{
diagnostic_bag_report_cannot_assign(binder->diagnostics, syntax_token_get_span(syntax->equals_token), name);
}
if (bound_expression_get_type(bound_expression) != variable->type)
{
diagnostic_bag_report_cannot_convert(
binder->diagnostics,
syntax_node_get_span((struct SyntaxNode*)syntax->expression),
bound_expression_get_type(bound_expression),
variable->type);
}
return (struct BoundExpression*)bound_assignment_expression_new(variable, bound_expression);
}
static struct BoundScope* create_parent_scopes(struct BoundGlobalScope* previous)
{
struct BoundGlobalScopeList* stack = mc_malloc(sizeof(struct BoundGlobalScopeList));
LIST_INIT(stack);
while (previous != NULL)
{
LIST_PUSH(stack, previous);
previous = previous->previous;
}
struct BoundScope* parent = NULL;
while (stack->length > 0)
{
previous = LIST_POP(stack);
struct BoundScope* scope = bound_scope_new(parent);
for (long i = 0; i < previous->variables->length; ++i)
{
bound_scope_try_declare(scope, previous->variables->data[i]);
}
parent = scope;
}
return parent;
}
| 46.00838 | 120 | 0.772631 | [
"object"
] |
d4eae96f9bd337e19800ef85d21cf2cb5e28d8d3 | 4,566 | h | C | Modules/Classification/CLCore/include/mitkAbstractClassifier.h | ZP-Hust/MITK | ca11353183c5ed4bc30f938eae8bde43a0689bf6 | [
"BSD-3-Clause"
] | null | null | null | Modules/Classification/CLCore/include/mitkAbstractClassifier.h | ZP-Hust/MITK | ca11353183c5ed4bc30f938eae8bde43a0689bf6 | [
"BSD-3-Clause"
] | null | null | null | Modules/Classification/CLCore/include/mitkAbstractClassifier.h | ZP-Hust/MITK | ca11353183c5ed4bc30f938eae8bde43a0689bf6 | [
"BSD-3-Clause"
] | 1 | 2019-01-09T08:20:18.000Z | 2019-01-09T08:20:18.000Z | /*===================================================================
The Medical Imaging Interaction Toolkit (MITK)
Copyright (c) German Cancer Research Center,
Division of Medical and Biological Informatics.
All rights reserved.
This software is distributed WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
See LICENSE.txt or http://www.mitk.org for details.
===================================================================*/
#ifndef mitkAbstractClassifier_h
#define mitkAbstractClassifier_h
#include <MitkCLCoreExports.h>
#include <mitkBaseData.h>
// Eigen
#include <Eigen/Dense>
// STD Includes
// MITK includes
#include <mitkConfigurationHolder.h>
namespace mitk
{
class MITKCLCORE_EXPORT AbstractClassifier : public BaseData
{
public:
mitkClassMacro(AbstractClassifier,BaseData)
///
/// @brief Build a forest of trees from the training set (X, y).
/// @param X, The training input samples. Matrix of shape = [n_samples, n_features]
/// @param Y, The target values (class labels in classification, real numbers in regression). Matrix of shape = [n_samples, 1]
///
virtual void Train(const Eigen::MatrixXd &X, const Eigen::MatrixXi &Y) = 0;
///
/// @brief Predict class for X.
/// @param X, The input samples.
/// @return The predicted classes. Y matrix of shape = [n_samples, 1]
///
virtual Eigen::MatrixXi Predict(const Eigen::MatrixXd &X) = 0;
///
/// @brief GetPointWiseWeightCopy
/// @return return label matrix of shape = [n_samples , 1]
///
Eigen::MatrixXi & GetLabels()
{
return m_OutLabel;
}
protected:
Eigen::MatrixXi m_OutLabel;
public:
// * --------------- *
// PointWiseWeight
// * --------------- *
///
/// @brief SupportsPointWiseWeight
/// @return True if the classifier supports pointwise weighting else false
///
virtual bool SupportsPointWiseWeight() = 0;
///
/// @brief GetPointWiseWeightCopy
/// @return Create and return a copy of W
///
virtual Eigen::MatrixXd & GetPointWiseWeight()
{
return m_PointWiseWeight;
}
///
/// @brief SetPointWiseWeight
/// @param W, The pointwise weights. W matrix of shape = [n_samples, 1]
///
virtual void SetPointWiseWeight(const Eigen::MatrixXd& W)
{
this->m_PointWiseWeight = W;
}
///
/// @brief UsePointWiseWeight
/// @param toggle weighting on/off
///
virtual void UsePointWiseWeight(bool value)
{
this->m_IsUsingPointWiseWeight = value;
}
///
/// @brief IsUsingPointWiseWeight
/// @return true if pointewise weighting is enabled.
///
virtual bool IsUsingPointWiseWeight()
{
return this->m_IsUsingPointWiseWeight;
}
protected:
Eigen::MatrixXd m_PointWiseWeight;
bool m_IsUsingPointWiseWeight;
// * --------------- *
// PointWiseProbabilities
// * --------------- *
public:
///
/// @brief SupportsPointWiseProbability
/// @return True if the classifier supports pointwise class probability calculation else false
///
virtual bool SupportsPointWiseProbability() = 0;
///
/// @brief GetPointWiseWeightCopy
/// @return return probability matrix
///
virtual Eigen::MatrixXd & GetPointWiseProbabilities()
{
return m_OutProbability;
}
///
/// \brief UsePointWiseProbabilities
/// \param value
///
virtual void UsePointWiseProbability(bool value)
{
m_IsUsingPointWiseProbability = value;
}
///
/// \brief IsUsingPointWiseProbabilities
/// \return
///
virtual bool IsUsingPointWiseProbability()
{
return m_IsUsingPointWiseProbability;
}
protected:
Eigen::MatrixXd m_OutProbability;
bool m_IsUsingPointWiseProbability;
private:
void MethodForBuild();
public:
void SetNthItems(const char *val, unsigned int idx);
std::string GetNthItems(unsigned int idx) const;
void SetItemList(std::vector<std::string>);
std::vector<std::string> GetItemList() const;
#ifndef DOXYGEN_SKIP
virtual void SetRequestedRegionToLargestPossibleRegion(){}
virtual bool RequestedRegionIsOutsideOfTheBufferedRegion(){return true;}
virtual bool VerifyRequestedRegion(){return false;}
virtual void SetRequestedRegion(const itk::DataObject* /*data*/){}
// Override
virtual bool IsEmpty() const override
{
if(IsInitialized() == false)
return true;
const TimeGeometry* timeGeometry = const_cast<AbstractClassifier*>(this)->GetUpdatedTimeGeometry();
if(timeGeometry == nullptr)
return true;
return false;
}
#endif // Skip Doxygen
};
}
#endif //mitkAbstractClassifier_h
| 22.83 | 128 | 0.675646 | [
"shape",
"vector"
] |
d4f0e4df87619890ad6222b2990e45b46b38fd08 | 515 | h | C | projects/boules/Headers/Equation2.h | Kigs-framework/publicKigsProjects | 88506fdeca34aab2b5602e494ad602beb49d4ba0 | [
"MIT"
] | null | null | null | projects/boules/Headers/Equation2.h | Kigs-framework/publicKigsProjects | 88506fdeca34aab2b5602e494ad602beb49d4ba0 | [
"MIT"
] | null | null | null | projects/boules/Headers/Equation2.h | Kigs-framework/publicKigsProjects | 88506fdeca34aab2b5602e494ad602beb49d4ba0 | [
"MIT"
] | null | null | null | #pragma once
#include <vector>
// manage second degree equation
class Equation2
{
protected:
// three coefficients
double mA;
double mB;
double mC;
// compute delta for given Y
double delta(double Y);
public:
// init equation
Equation2(double a, double b, double c) : mA(a), mB(b), mC(c)
{
}
// change coeffs
void Set(double a, double b, double c)
{
mA = a;
mB = b;
mC = c;
}
// solve equation for given Y and return vector of solutions
std::vector<double> Solve(double forY = 0.0);
}; | 15.147059 | 62 | 0.656311 | [
"vector"
] |
d4f4cc8f8fb045e8cd92065fe356b5fdd46541d9 | 13,002 | c | C | money.c | jpauli/money | c33d513962fcab714601d078a404ea7cf5412ebe | [
"MIT"
] | 2 | 2015-07-26T13:00:29.000Z | 2015-09-22T10:03:42.000Z | money.c | jpauli/money | c33d513962fcab714601d078a404ea7cf5412ebe | [
"MIT"
] | null | null | null | money.c | jpauli/money | c33d513962fcab714601d078a404ea7cf5412ebe | [
"MIT"
] | null | null | null | /*
+----------------------------------------------------------------------+
| PHP Version 5 |
+----------------------------------------------------------------------+
| Copyright (c) 1997-2014 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
| Author: Julien Pauli <jpauli@php.net> |
+----------------------------------------------------------------------+
*/
/* $Id$ */
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "php.h"
#include "php_ini.h"
#include "ext/standard/info.h"
#include "ext/spl/spl_exceptions.h"
#include "ext/standard/php_math.h"
#include "Zend/zend_exceptions.h"
#include "php_money.h"
#if ZEND_MODULE_API_NO < 20131226
#error "Please compile for PHP>=5.6"
#endif
static zend_class_entry *money_ce, *currency_ce, *CurrencyMismatchException_ce;
static zend_object_handlers money_object_handlers;
static const zend_function_entry money_functions[] = {
PHP_ME(Money, __construct, arginfo_money__construct, ZEND_ACC_PUBLIC)
PHP_ME(Money, getAmount, 0, ZEND_ACC_PUBLIC)
PHP_ME(Money, getCurrency, 0, ZEND_ACC_PUBLIC)
PHP_ME(Money, add, arginfo_money_add, ZEND_ACC_PUBLIC)
PHP_ME(Money, substract, arginfo_money_add, ZEND_ACC_PUBLIC)
PHP_ME(Money, negate, 0, ZEND_ACC_PUBLIC)
PHP_ME(Money, multiply, arginfo_money_multiply, ZEND_ACC_PUBLIC)
PHP_ME(Money, compareTo, arginfo_money_add, ZEND_ACC_PUBLIC)
PHP_ME(Money, equals, arginfo_money_add, ZEND_ACC_PUBLIC)
PHP_ME(Money, greaterThan, arginfo_money_add, ZEND_ACC_PUBLIC)
PHP_ME(Money, greaterThanOrEqual, arginfo_money_add, ZEND_ACC_PUBLIC)
PHP_ME(Money, lessThan, arginfo_money_add, ZEND_ACC_PUBLIC)
PHP_ME(Money, lessThanOrEqual, arginfo_money_add, ZEND_ACC_PUBLIC)
PHP_ME(Money, extractPercentage, arginfo_money_extractPercentage, ZEND_ACC_PUBLIC)
PHP_FE_END
};
static const zend_function_entry currency_functions[] = {
PHP_ME(Currency, __construct, arginfo_currency__construct, ZEND_ACC_PUBLIC)
PHP_FE_END
};
PHP_METHOD(Money, extractPercentage)
{
long percentage;
double result;
zval *new_money_percentage, *amount, *new_money_subtotal;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &percentage) == FAILURE) {
return;
}
ALLOC_INIT_ZVAL(new_money_percentage);ALLOC_INIT_ZVAL(new_money_subtotal);
amount = zend_read_property(money_ce, getThis(), MONEY_PROP_AMOUNT_WS, 0);
result = Z_LVAL_P(amount) / (100 + percentage) * percentage;
CREATE_NEW_MONEY_OBJ(new_money_percentage, zend_dval_to_lval(result), zend_read_property(money_ce, getThis(), MONEY_PROP_CURRENCY_WS, 0))
array_init(return_value);
add_assoc_zval(return_value, "percentage", new_money_percentage);
money_handler_do_operation(ZEND_SUB, new_money_subtotal, getThis(), new_money_percentage);
add_assoc_zval(return_value, "subtotal", new_money_subtotal);
}
PHP_METHOD(Money, lessThan)
{
CHECK_MONEY_PARAMS
RETURN_BOOL(money_handler_compare_objects(getThis(), other_money) == -1);
}
PHP_METHOD(Money, lessThanOrEqual)
{
int result;
CHECK_MONEY_PARAMS
result = money_handler_compare_objects(getThis(), other_money);
RETURN_BOOL(result == -1 || result == 0);
}
PHP_METHOD(Money, greaterThan)
{
CHECK_MONEY_PARAMS
RETURN_BOOL(money_handler_compare_objects(getThis(), other_money) == 1);
}
PHP_METHOD(Money, greaterThanOrEqual)
{
int result;
CHECK_MONEY_PARAMS
result = money_handler_compare_objects(getThis(), other_money);
RETURN_BOOL(result == 1 || result == 0);
}
PHP_METHOD(Money, equals)
{
CHECK_MONEY_PARAMS
RETURN_BOOL(money_handler_compare_objects(getThis(), other_money) == 0);
}
PHP_METHOD(Money, compareTo)
{
CHECK_MONEY_PARAMS
RETURN_LONG(money_handler_compare_objects(getThis(), other_money));
}
PHP_METHOD(Money, negate)
{
zval sub_zval;
CHECK_NO_PARAMS
ZVAL_LONG(&sub_zval, 0);
object_init_ex(return_value, money_ce);
money_handler_do_operation(ZEND_SUB, return_value, &sub_zval, getThis());
}
PHP_METHOD(Money, add)
{
CHECK_MONEY_PARAMS
object_init_ex(return_value, money_ce);
money_handler_do_operation(ZEND_ADD, return_value, getThis(), other_money);
}
PHP_METHOD(Money, substract)
{
CHECK_MONEY_PARAMS
object_init_ex(return_value, money_ce);
money_handler_do_operation(ZEND_SUB, return_value, getThis(), other_money);
}
PHP_METHOD(Money, getAmount)
{
CHECK_NO_PARAMS
RETURN_ZVAL_FAST(zend_read_property(Z_OBJCE_P(getThis()), getThis(), MONEY_PROP_AMOUNT_WS, 0));
}
PHP_METHOD(Money, getCurrency)
{
CHECK_NO_PARAMS
RETURN_ZVAL_FAST(zend_read_property(Z_OBJCE_P(getThis()), getThis(), MONEY_PROP_CURRENCY_WS, 0));
}
PHP_METHOD(Money, multiply)
{
double factor;
volatile double dresult;
long rounding_mode = PHP_ROUND_HALF_UP, lresult;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "d|l", &factor, &rounding_mode) == FAILURE) {
return;
}
if (UNEXPECTED(rounding_mode < 0 || rounding_mode > PHP_ROUND_HALF_ODD)) {
zend_throw_exception(spl_ce_InvalidArgumentException, "$roundingMode must be a valid rounding mode (PHP_ROUND_*)", 0);
return;
}
dresult = _php_math_round(factor * Z_LVAL_P(zend_read_property(money_ce, getThis(), MONEY_PROP_AMOUNT_WS, 0)), 0, rounding_mode);
lresult = zend_dval_to_lval(dresult);
if (UNEXPECTED(lresult & LONG_SIGN_MASK)) {
zend_throw_exception(spl_ce_OverflowException, "Integer overflow", 0);
return;
}
CREATE_NEW_MONEY_OBJ(return_value, lresult, zend_read_property(money_ce, getThis(), MONEY_PROP_CURRENCY_WS, 0));
}
PHP_METHOD(Money, __construct)
{
long amount;
zval *currency, *currency_obj;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "lz", &amount, ¤cy) == FAILURE) {
return;
}
switch(Z_TYPE_P(currency)) {
case IS_OBJECT:
if (!instanceof_function(currency_ce, Z_OBJCE_P(currency))) {
zend_throw_exception(spl_ce_InvalidArgumentException, "Invalid currency object", 0);
return;
}
break;
case IS_STRING:
ALLOC_INIT_ZVAL(currency_obj);
object_init_ex(currency_obj, currency_ce);
zend_update_property_stringl(currency_ce, currency_obj, CURRENCY_PROP_CURRENCYCODE_WS, Z_STRVAL_P(currency), Z_STRLEN_P(currency));
currency = currency_obj;
Z_DELREF_P(currency);
break;
default:
zend_throw_exception(spl_ce_InvalidArgumentException, "Invalid currency value", 0);
return;
}
zend_update_property_long(Z_OBJCE_P(getThis()), getThis(), MONEY_PROP_AMOUNT_WS, amount);
zend_update_property(Z_OBJCE_P(getThis()), getThis(), MONEY_PROP_CURRENCY_WS, currency);
}
PHP_METHOD(Currency, __construct)
{
char *currency_code;
int currency_code_len;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "s", ¤cy_code, ¤cy_code_len) == FAILURE) {
return;
}
zend_update_property_stringl(Z_OBJCE_P(getThis()), getThis(), CURRENCY_PROP_CURRENCYCODE_WS, currency_code, currency_code_len);
}
static int money_handler_compare_objects(zval *object1, zval *object2)
{
zval *amount1, *amount2, *currency1, *currency2;
long compare_result;
currency1 = zend_read_property(Z_OBJCE_P(object1), object1, MONEY_PROP_CURRENCY_WS, 0);
currency2 = zend_read_property(Z_OBJCE_P(object2), object2, MONEY_PROP_CURRENCY_WS, 0);
if ((compare_result = Z_OBJ_HANDLER_P(currency1, compare_objects)(currency1, currency2)) != 0) {
zend_throw_exception(CurrencyMismatchException_ce, "Currencies don't match", 0);
return compare_result;
}
amount1 = zend_read_property(Z_OBJCE_P(object1), object1, MONEY_PROP_AMOUNT_WS, 0);
amount2 = zend_read_property(Z_OBJCE_P(object2), object2, MONEY_PROP_AMOUNT_WS, 0);
if (Z_LVAL_P(amount1) == Z_LVAL_P(amount2)) {
return 0;
} else if(Z_LVAL_P(amount1) < Z_LVAL_P(amount2)) {
return -1;
}
return 1;
}
static int money_handler_do_operation(zend_uchar opcode, zval *result, zval *op1, zval *op2)
{
zval *currency1 = NULL, *currency2 = NULL, *currency_result = NULL;
long amount1, amount2, amount_result;
switch (TYPE_PAIR(Z_TYPE_P(op1), Z_TYPE_P(op2))) {
case TYPE_PAIR(IS_OBJECT, IS_OBJECT):
if (!instanceof_function(Z_OBJCE_P(op1), money_ce) || !instanceof_function(Z_OBJCE_P(op2), money_ce)) {
return FAILURE;
}
currency1 = zend_read_property(Z_OBJCE_P(op1), op1, MONEY_PROP_CURRENCY_WS, 0);
currency2 = zend_read_property(Z_OBJCE_P(op2), op2, MONEY_PROP_CURRENCY_WS, 0);
if (Z_OBJ_HANDLER_P(currency1, compare_objects)(currency1, currency2) != 0) {
zend_throw_exception(CurrencyMismatchException_ce, "Currencies don't match", 0);
ZVAL_NULL(result);
return SUCCESS;
}
amount1 = Z_LVAL_P(zend_read_property(Z_OBJCE_P(op1), op1, MONEY_PROP_AMOUNT_WS, 0));
amount2 = Z_LVAL_P(zend_read_property(Z_OBJCE_P(op2), op2, MONEY_PROP_AMOUNT_WS, 0));
currency_result = currency1;
break;
case TYPE_PAIR(IS_LONG, IS_OBJECT): /* negate */
if (!instanceof_function(Z_OBJCE_P(op2), money_ce)) {
return FAILURE;
}
if (Z_LVAL_P(op1) != 0) {
return FAILURE; /* I said negate */
}
amount1 = 0;
amount2 = Z_LVAL_P(zend_read_property(Z_OBJCE_P(op2), op2, MONEY_PROP_AMOUNT_WS, 0));
currency_result = zend_read_property(Z_OBJCE_P(op2), op2, MONEY_PROP_CURRENCY_WS, 0);
break;
default :
return FAILURE;
}
INIT_ZVAL(*result);
switch (opcode) {
case ZEND_ADD:
if (UNEXPECTED((amount1 & LONG_SIGN_MASK) == (amount2 & LONG_SIGN_MASK)
&& (amount1 & LONG_SIGN_MASK) != ((amount1 + amount2) & LONG_SIGN_MASK))) {
zend_throw_exception(spl_ce_OverflowException, "Integer overflow", 0);
return FAILURE;
}
amount_result = amount1 + amount2;
goto success;
break;
case ZEND_SUB:
{
amount_result = amount1 - amount2;
if (amount_result == LONG_MIN) {
zend_throw_exception(spl_ce_OverflowException, "Integer negative overflow", 0);
return FAILURE;
}
goto success;
}
break;
default:
return FAILURE;
break;
}
success:
CREATE_NEW_MONEY_OBJ(result, amount_result, currency_result);
return SUCCESS;
}
static zend_object_value money_create_object(zend_class_entry *ce)
{
zend_object_value retval;
zend_object *obj;
obj = emalloc(sizeof(zend_object));
zend_object_std_init(obj, ce);
object_properties_init(obj, ce);
retval.handle = zend_objects_store_put(obj, (zend_objects_store_dtor_t) zend_objects_destroy_object, (zend_objects_free_object_storage_t)zend_objects_free_object_storage, NULL);
retval.handlers = &money_object_handlers;
return retval;
}
PHP_MINIT_FUNCTION(money)
{
zend_class_entry money_tmp, currency_tmp, CurrencyMismatchException_tmp;
INIT_CLASS_ENTRY(money_tmp, "Money", money_functions);
money_ce = zend_register_internal_class(&money_tmp);
money_ce->create_object = money_create_object;
zend_declare_property_long(money_ce, MONEY_PROP_AMOUNT, sizeof(MONEY_PROP_AMOUNT) -1, 0, ZEND_ACC_PRIVATE);
zend_declare_property_long(money_ce, MONEY_PROP_CURRENCY, sizeof(MONEY_PROP_CURRENCY) -1, 0, ZEND_ACC_PRIVATE);
memcpy(&money_object_handlers, &std_object_handlers, sizeof(std_object_handlers));
money_object_handlers.do_operation = money_handler_do_operation;
money_object_handlers.compare_objects = money_handler_compare_objects;
INIT_CLASS_ENTRY(currency_tmp, "Currency", currency_functions);
currency_ce = zend_register_internal_class(¤cy_tmp);
currency_ce->ce_flags |= ZEND_ACC_FINAL_CLASS;
zend_declare_property_stringl(currency_ce, CURRENCY_PROP_CURRENCYCODE_WS, ZEND_STRL(""), ZEND_ACC_PRIVATE);
INIT_CLASS_ENTRY(CurrencyMismatchException_tmp, "CurrencyMismatchException", NULL);
CurrencyMismatchException_ce = zend_register_internal_class_ex(&CurrencyMismatchException_tmp, spl_ce_InvalidArgumentException, NULL);
return SUCCESS;
}
PHP_MINFO_FUNCTION(money)
{
php_info_print_table_start();
php_info_print_table_header(2, "money support", "enabled");
php_info_print_table_colspan_header(2, "Based on https://github.com/sebastianbergmann/money");
php_info_print_table_end();
}
zend_module_entry money_module_entry = {
STANDARD_MODULE_HEADER,
"money",
NULL,
PHP_MINIT(money),
NULL,
NULL,
NULL,
PHP_MINFO(money),
PHP_MONEY_VERSION,
STANDARD_MODULE_PROPERTIES
};
#ifdef COMPILE_DL_MONEY
ZEND_GET_MODULE(money)
#endif
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: noet sw=4 ts=4 fdm=marker
* vim<600: noet sw=4 ts=4
*/
| 30.88361 | 180 | 0.728657 | [
"object"
] |
d4f9302ff679a7e6d936d3a4227c94394eccf1b8 | 12,867 | c | C | external/graphviz/cmd/lefty/str.c | andraantariksa/cit205-nfa-to-dfa | 6eb407309f4efad852863d534166e71a0be1d1ef | [
"MIT"
] | null | null | null | external/graphviz/cmd/lefty/str.c | andraantariksa/cit205-nfa-to-dfa | 6eb407309f4efad852863d534166e71a0be1d1ef | [
"MIT"
] | 1 | 2020-02-10T19:20:08.000Z | 2020-02-21T01:52:17.000Z | external/graphviz/cmd/lefty/str.c | andraantariksa/cit205-nfa-to-dfa | 6eb407309f4efad852863d534166e71a0be1d1ef | [
"MIT"
] | null | null | null | /* $Id$ $Revision$ */
/* vim:set shiftwidth=4 ts=8: */
/*************************************************************************
* Copyright (c) 2011 AT&T Intellectual Property
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors: See CVS logs. Details at http://www.graphviz.org/
*************************************************************************/
/* Lefteris Koutsofios - AT&T Labs Research */
#include "common.h"
#include "mem.h"
#include "code.h"
#include "tbl.h"
#include "str.h"
#include "internal.h"
static int highci;
static int indent;
#define INDINC(i) (indent += (i))
#define INDDEC(i) (indent -= (i))
static char *sbufp;
static int sbufi, sbufn;
#define SBUFINCR 1000
#define SBUFSIZE sizeof (char)
static void scalarstr (Tobj);
static void codestr (Tobj, int);
static void appends (char *);
static void appendi (long);
static void appendd (double);
static void appendnl (void);
static void growsbuf (int);
static char *copysbuf (void);
void Sinit (void) {
if (!(sbufp = malloc (SBUFINCR * SBUFSIZE)))
panic1 (POS, "Sinit", "sbuf malloc failed");
sbufi = 0;
sbufn = SBUFINCR;
indent = 0;
highci = -1;
}
void Sterm (void) {
indent = 0;
free (sbufp), sbufp = NULL;
sbufn = sbufi = 0;
}
char *Spath (char *path, Tobj ko) {
sbufp[(sbufi = 0)] = '\000';
appends ((path) ? path : "");
scalarstr (ko);
return copysbuf ();
}
char *Sseen (Tobj ko, char *path) {
sbufp[(sbufi = 0)] = '\000';
scalarstr (ko), appends (" = "), appends (path), appends (";");
return copysbuf ();
}
char *Sabstract (Tobj ko, Tobj vo) {
sbufp[(sbufi = 0)] = '\000';
scalarstr (ko), appends (" = ");
switch (Tgettype (vo)) {
case T_STRING:
case T_INTEGER:
case T_REAL:
scalarstr (vo);
break;
case T_CODE:
appends ("function (...) { ... }");
break;
case T_TABLE:
appends ("[ ... ]");
break;
}
appends (";");
return copysbuf ();
}
char *Stfull (Tobj ko) {
sbufp[(sbufi = 0)] = '\000';
scalarstr (ko), appends (" = [");
return copysbuf ();
}
char *Ssfull (Tobj ko, Tobj vo) {
sbufp[(sbufi = 0)] = '\000';
if (ko)
scalarstr (ko), appends (" = ");
switch (Tgettype (vo)) {
case T_STRING:
case T_INTEGER:
case T_REAL:
case T_CODE:
scalarstr (vo);
break;
}
appends (";");
return copysbuf ();
}
char *Scfull (Tobj co, int ci, int mci) {
sbufp[(sbufi = 0)] = '\000';
highci = mci;
codestr (co, ci);
highci = -1;
return copysbuf ();
}
static void scalarstr (Tobj to) {
switch (Tgettype (to)) {
case T_INTEGER:
appendi (Tgetinteger (to));
break;
case T_REAL:
appendd (Tgetreal (to));
break;
case T_STRING:
appends ("\""), appends (Tgetstring (to)), appends ("\"");
break;
case T_CODE:
codestr (to, 0);
break;
}
}
static void codestr (Tobj co, int ci) {
int ct, ct1;
int ci1, ci2;
if (highci == ci)
appends (" >> ");
switch ((ct = TCgettype (co, ci))) {
case C_ASSIGN:
codestr (co, (ci1 = TCgetfp (co, ci)));
appends (" = ");
codestr (co, TCgetnext (co, ci1));
break;
case C_OR:
case C_AND:
case C_EQ:
case C_NE:
case C_LT:
case C_LE:
case C_GT:
case C_GE:
case C_PLUS:
case C_MINUS:
case C_MUL:
case C_DIV:
case C_MOD:
codestr (co, (ci1 = TCgetfp (co, ci)));
switch (ct) {
case C_OR: appends (" | "); break;
case C_AND: appends (" & "); break;
case C_EQ: appends (" == "); break;
case C_NE: appends (" ~= "); break;
case C_LT: appends (" < "); break;
case C_LE: appends (" <= "); break;
case C_GT: appends (" > "); break;
case C_GE: appends (" >= "); break;
case C_PLUS: appends (" + "); break;
case C_MINUS: appends (" - "); break;
case C_MUL: appends (" * "); break;
case C_DIV: appends (" / "); break;
case C_MOD: appends (" % "); break;
}
codestr (co, TCgetnext (co, ci1));
break;
case C_NOT:
appends ("~");
codestr (co, TCgetfp (co, ci));
break;
case C_UMINUS:
appends ("-");
codestr (co, TCgetfp (co, ci));
break;
case C_PEXPR:
appends ("(");
codestr (co, TCgetfp (co, ci));
appends (")");
break;
case C_FCALL:
codestr (co, (ci1 = TCgetfp (co, ci)));
appends (" (");
codestr (co, TCgetnext (co, ci1));
appends (")");
break;
case C_INTEGER:
appendi (TCgetinteger (co, ci));
break;
case C_REAL:
appendd (TCgetreal (co, ci));
break;
case C_STRING:
appends ("\""), appends (TCgetstring (co, ci)), appends ("\"");
break;
case C_GVAR:
case C_LVAR:
ci1 = TCgetfp (co, ci);
appends (TCgetstring (co, ci1));
if (ct == C_LVAR)
ci1 = TCgetnext (co, ci1);
for (
ci1 = TCgetnext (co, ci1); ci1 != C_NULL;
ci1 = TCgetnext (co, ci1)
) {
switch (TCgettype (co, ci1)) {
case C_STRING:
appends ("."), appends (TCgetstring (co, ci1));
break;
case C_INTEGER:
appends ("[");
appendi (TCgetinteger (co, ci1));
appends ("]");
break;
case C_REAL:
appends ("[");
appendd (TCgetreal (co, ci1));
appends ("]");
break;
default:
appends ("[");
codestr (co, ci1);
appends ("]");
}
}
break;
case C_PVAR:
appends ("<var>");
break;
case C_FUNCTION:
ci1 = TCgetnext (co, TCgetnext (co, TCgetfp (co, ci)));
appends ("function (");
codestr (co, ci1);
ci1 = TCgetnext (co, ci1);
if (TCgettype (co, ci1) == C_INTERNAL) {
appends (") internal \"");
appends (Ifuncs[TCgetinteger (co, TCgetfp (co, ci1))].name);
appends ("\"");
} else {
appends (") {");
INDINC (2);
for (; ci1 != C_NULL; ci1 = TCgetnext (co, ci1)) {
appendnl ();
if (TCgettype (co, ci1) == C_DECL)
appends ("local "), codestr (co, ci1), appends (";");
else
codestr (co, ci1);
}
INDDEC (2);
appendnl ();
appends ("}");
}
break;
case C_TCONS:
appends ("[");
INDINC (2);
ci1 = TCgetfp (co, ci);
while (ci1 != C_NULL) {
appendnl ();
codestr (co, ci1);
appends (" = ");
ci1 = TCgetnext (co, ci1);
codestr (co, ci1);
appends (";");
ci1 = TCgetnext (co, ci1);
}
INDDEC (2);
appendnl ();
appends ("]");
break;
case C_DECL:
ci1 = TCgetfp (co, ci);
while (ci1 != C_NULL) {
appends (TCgetstring (co, ci1));
ci1 = TCgetnext (co, ci1);
if (ci1 != C_NULL)
appends (", ");
}
break;
case C_STMT:
ci1 = TCgetfp (co, ci);
if (ci1 == C_NULL) {
appends (";");
break;
}
if (TCgetnext (co, ci1) == C_NULL) {
codestr (co, ci1);
ct1 = TCgettype (co, ci1);
if (!C_ISSTMT (ct1))
appends (";");
} else {
appends (" {");
INDINC (2);
for (; ci1 != C_NULL; ci1 = TCgetnext (co, ci1)) {
appendnl ();
codestr (co, ci1);
}
INDDEC (2);
appendnl ();
appends ("}");
}
break;
case C_IF:
ci1 = TCgetfp (co, ci);
appends ("if (");
codestr (co, ci1);
appends (")");
ci1 = TCgetnext (co, ci1);
ci2 = TCgetfp (co, ci1);
if (ci2 == C_NULL || TCgetnext (co, ci2) == C_NULL) {
INDINC (2);
appendnl ();
codestr (co, ci1);
INDDEC (2);
} else {
codestr (co, ci1);
}
ci1 = TCgetnext (co, ci1);
if (ci1 == C_NULL)
break;
if (ci2 == C_NULL || TCgetnext (co, ci2) == C_NULL) {
appendnl ();
appends ("else");
} else {
appends (" else");
}
ci2 = TCgetfp (co, ci1);
if (ci2 == C_NULL || TCgetnext (co, ci2) == C_NULL) {
INDINC (2);
appendnl ();
codestr (co, ci1);
INDDEC (2);
} else {
codestr (co, ci1);
}
break;
case C_WHILE:
ci1 = TCgetfp (co, ci);
appends ("while (");
codestr (co, ci1);
ci1 = TCgetnext (co, ci1);
ci2 = TCgetfp (co, ci1);
if (ci2 == C_NULL || TCgetnext (co, ci2) == C_NULL) {
appends (")");
INDINC (2);
appendnl ();
codestr (co, ci1);
INDDEC (2);
} else {
appends (")");
codestr (co, ci1);
}
break;
case C_FOR:
ci1 = TCgetfp (co, ci);
appends ("for (");
codestr (co, ci1);
appends ("; ");
ci1 = TCgetnext (co, ci1);
codestr (co, ci1);
appends ("; ");
ci1 = TCgetnext (co, ci1);
codestr (co, ci1);
ci1 = TCgetnext (co, ci1);
ci2 = TCgetfp (co, ci1);
if (ci2 == C_NULL || TCgetnext (co, ci2) == C_NULL) {
appends (")");
INDINC (2);
appendnl ();
codestr (co, ci1);
INDDEC (2);
} else {
appends (")");
codestr (co, ci1);
}
break;
case C_FORIN:
ci1 = TCgetfp (co, ci);
appends ("for (");
codestr (co, ci1);
appends (" in ");
ci1 = TCgetnext (co, ci1);
codestr (co, ci1);
ci1 = TCgetnext (co, ci1);
ci2 = TCgetfp (co, ci1);
if (ci2 == C_NULL || TCgetnext (co, ci2) == C_NULL) {
appends (")");
INDINC (2);
appendnl ();
codestr (co, ci1);
INDDEC (2);
} else {
appends (")");
codestr (co, ci1);
}
break;
case C_BREAK:
appends ("break;");
break;
case C_CONTINUE:
appends ("continue;");
break;
case C_RETURN:
ci1 = TCgetfp (co, ci);
appends ("return");
if (ci1 != C_NULL) {
appends (" ");
codestr (co, ci1);
}
appends (";");
break;
case C_ARGS:
ci1 = TCgetfp (co, ci);
while (ci1 != C_NULL) {
codestr (co, ci1);
ci1 = TCgetnext (co, ci1);
if (ci1 != C_NULL)
appends (", ");
}
break;
default:
panic1 (POS, "codestr", "bad object type: %d", ct);
}
}
static void appends (char *s) {
int n;
n = strlen (s) + 1;
if (sbufi + n > sbufn)
growsbuf (n);
strcpy (&sbufp[sbufi], s);
sbufi += (n - 1);
}
static void appendi (long i) {
char buf[40];
int n;
sprintf (buf, "%ld", i);
n = strlen (buf) + 1;
if (sbufi + n > sbufn)
growsbuf (n);
strcpy (&sbufp[sbufi], buf);
sbufi += (n - 1);
}
static void appendd (double d) {
char buf[40];
int n;
sprintf (buf, "%lf", d);
n = strlen (buf) + 1;
if (sbufi + n > sbufn)
growsbuf (n);
strcpy (&sbufp[sbufi], buf);
sbufi += (n - 1);
}
static void appendnl (void) {
int i, n;
n = indent + 1;
if (sbufi + n > sbufn)
growsbuf (n);
sbufp[sbufi++] = '\n';
for (i = 0; i < indent; i++)
sbufp[sbufi++] = ' ';
}
static void growsbuf (int ssize) {
int nsize;
nsize = ((sbufn + ssize) / SBUFINCR + 1) * SBUFINCR;
if (!(sbufp = realloc (sbufp, nsize * SBUFSIZE)))
panic1 (POS, "growsbuf", "sbuf realloc failed");
sbufn = nsize;
}
static char *copysbuf (void) {
char *newsbufp;
sbufp[sbufi++] = '\000';
if (!(newsbufp = malloc (sbufi * sizeof (char))))
panic1 (POS, "copysbuf", "newsbuf malloc failed");
strcpy (newsbufp, sbufp);
return newsbufp;
}
| 25.682635 | 75 | 0.451387 | [
"object"
] |
d4fa3d77c7494dd75ccf97499fedbd2eb73f349b | 2,783 | h | C | syzygy/reorder/reorder_app.h | dandv/syzygy | 2444520c8e6e0b45b2f45b680d878d60b9636f45 | [
"Apache-2.0"
] | null | null | null | syzygy/reorder/reorder_app.h | dandv/syzygy | 2444520c8e6e0b45b2f45b680d878d60b9636f45 | [
"Apache-2.0"
] | null | null | null | syzygy/reorder/reorder_app.h | dandv/syzygy | 2444520c8e6e0b45b2f45b680d878d60b9636f45 | [
"Apache-2.0"
] | null | null | null | // Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef SYZYGY_REORDER_REORDER_APP_H_
#define SYZYGY_REORDER_REORDER_APP_H_
#include "base/command_line.h"
#include "base/files/file_path.h"
#include "base/memory/scoped_ptr.h"
#include "base/strings/string_piece.h"
#include "base/time/time.h"
#include "syzygy/common/application.h"
#include "syzygy/pe/image_layout.h"
#include "syzygy/pe/pe_file.h"
#include "syzygy/reorder/reorderer.h"
namespace reorder {
// This class implements the command-line reorder utility.
class ReorderApp : public common::AppImplBase {
public:
ReorderApp();
// @name Implementation of the AppImplBase interface.
// @{
bool ParseCommandLine(const CommandLine* command_line);
bool SetUp();
int Run();
// @}
protected:
typedef std::vector<base::FilePath> FilePathVector;
enum Mode {
kInvalidMode,
kLinearOrderMode,
kRandomOrderMode,
kDeadCodeFinderMode
};
// @name Utility members.
// @{
bool Usage(const CommandLine* command_line,
const base::StringPiece& message) const;
bool OptimizeBasicBlocks(const pe::PEFile::Signature& signature,
const pe::ImageLayout& image_layout,
Reorderer::Order* order);
// @}
Mode mode_;
scoped_ptr<Reorderer::OrderGenerator> order_generator_;
// @name Command-line parameters.
// @{
base::FilePath instrumented_image_path_;
base::FilePath input_image_path_;
base::FilePath output_file_path_;
base::FilePath bb_entry_count_file_path_;
FilePathVector trace_file_paths_;
uint32 seed_;
bool pretty_print_;
Reorderer::Flags flags_;
// @}
// Command-line parameter names. Exposed as protected for unit-testing.
// @{
static const char kInstrumentedImage[];
static const char kOutputFile[];
static const char kInputImage[];
static const char kBasicBlockEntryCounts[];
static const char kSeed[];
static const char kListDeadCode[];
static const char kPrettyPrint[];
static const char kReordererFlags[];
static const char kInstrumentedDll[];
static const char kInputDll[];
// @}
private:
DISALLOW_COPY_AND_ASSIGN(ReorderApp);
};
} // namespace reorder
#endif // SYZYGY_REORDER_REORDER_APP_H_
| 29.294737 | 75 | 0.727273 | [
"vector"
] |
be124ecc8dea99b6e1beb2aeac0247db4c8daeea | 16,284 | c | C | lk/kernel/port.c | chrisdearman/mips-lk2 | 67b845aec1eb3609db4266551609a8243e7ab826 | [
"Apache-2.0"
] | 1 | 2019-04-19T10:04:43.000Z | 2019-04-19T10:04:43.000Z | lk/kernel/port.c | chrisdearman/mips-lk2 | 67b845aec1eb3609db4266551609a8243e7ab826 | [
"Apache-2.0"
] | 1 | 2018-03-21T22:32:38.000Z | 2018-03-21T22:59:49.000Z | lk/kernel/port.c | chrisdearman/mips-lk2 | 67b845aec1eb3609db4266551609a8243e7ab826 | [
"Apache-2.0"
] | null | null | null | /*
* Copyright (c) 2015 Carlos Pizano-Uribe cpu@chromium.org
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files
* (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/**
* @file
* @brief Port object functions
* @defgroup event Events
*
*/
#include <debug.h>
#include <list.h>
#include <malloc.h>
#include <string.h>
#include <pow2.h>
#include <err.h>
#include <kernel/thread.h>
#include <kernel/port.h>
// write ports can be in two states, open and closed, which have a
// different magic number.
#define WRITEPORT_MAGIC_W (0x70727477) // 'prtw'
#define WRITEPORT_MAGIC_X (0x70727478) // 'prtx'
#define READPORT_MAGIC (0x70727472) // 'prtr'
#define PORTGROUP_MAGIC (0x70727467) // 'prtg'
#define PORT_BUFF_SIZE 8
#define PORT_BUFF_SIZE_BIG 64
#define RESCHEDULE_POLICY 1
#define MAX_PORT_GROUP_COUNT 256
typedef struct {
uint log2;
uint avail;
uint head;
uint tail;
port_packet_t packet[1];
} port_buf_t;
typedef struct {
int magic;
struct list_node node;
port_buf_t *buf;
struct list_node rp_list;
port_mode_t mode;
char name[PORT_NAME_LEN];
} write_port_t;
typedef struct {
int magic;
wait_queue_t wait;
struct list_node rp_list;
} port_group_t;
typedef struct {
int magic;
struct list_node w_node;
struct list_node g_node;
port_buf_t *buf;
void *ctx;
wait_queue_t wait;
write_port_t *wport;
port_group_t *gport;
} read_port_t;
static struct list_node write_port_list;
static port_buf_t *make_buf(uint pk_count)
{
uint size = sizeof(port_buf_t) + ((pk_count - 1) * sizeof(port_packet_t));
port_buf_t *buf = (port_buf_t *) malloc(size);
if (!buf)
return NULL;
buf->log2 = log2_uint(pk_count);
buf->head = buf->tail = 0;
buf->avail = pk_count;
return buf;
}
static inline bool buf_is_empty(port_buf_t *buf)
{
return buf->avail == valpow2(buf->log2);
}
static status_t buf_write(port_buf_t *buf, const port_packet_t *packets, size_t count)
{
if (buf->avail < count)
return ERR_NOT_ENOUGH_BUFFER;
for (size_t ix = 0; ix != count; ix++) {
buf->packet[buf->tail] = packets[ix];
buf->tail = modpow2(++buf->tail, buf->log2);
}
buf->avail -= count;
return NO_ERROR;
}
static status_t buf_read(port_buf_t *buf, port_result_t *pr)
{
if (buf_is_empty(buf))
return ERR_NO_MSG;
pr->packet = buf->packet[buf->head];
buf->head = modpow2(++buf->head, buf->log2);
++buf->avail;
return NO_ERROR;
}
// must be called before any use of ports.
void port_init(void)
{
list_initialize(&write_port_list);
}
status_t port_create(const char *name, port_mode_t mode, port_t *port)
{
if (!name || !port)
return ERR_INVALID_ARGS;
// only unicast ports can have a large buffer.
if (mode & PORT_MODE_BROADCAST) {
if (mode & PORT_MODE_BIG_BUFFER)
return ERR_INVALID_ARGS;
}
if (strlen(name) >= PORT_NAME_LEN)
return ERR_INVALID_ARGS;
// lookup for existing port, return that if found.
write_port_t *wp = NULL;
THREAD_LOCK(state1);
list_for_every_entry(&write_port_list, wp, write_port_t, node) {
if (strcmp(wp->name, name) == 0) {
// can't return closed ports.
if (wp->magic == WRITEPORT_MAGIC_X)
wp = NULL;
THREAD_UNLOCK(state1);
if (wp) {
*port = (void *) wp;
return ERR_ALREADY_EXISTS;
} else {
return ERR_BUSY;
}
}
}
THREAD_UNLOCK(state1);
// not found, create the write port and the circular buffer.
wp = calloc(1, sizeof(write_port_t));
if (!wp)
return ERR_NO_MEMORY;
wp->magic = WRITEPORT_MAGIC_W;
wp->mode = mode;
strlcpy(wp->name, name, sizeof(wp->name));
list_initialize(&wp->rp_list);
uint size = (mode & PORT_MODE_BIG_BUFFER) ? PORT_BUFF_SIZE_BIG : PORT_BUFF_SIZE;
wp->buf = make_buf(size);
if (!wp->buf) {
free(wp);
return ERR_NO_MEMORY;
}
// todo: race condtion! a port with the same name could have been created
// by another thread at is point.
THREAD_LOCK(state2);
list_add_tail(&write_port_list, &wp->node);
THREAD_UNLOCK(state2);
*port = (void *)wp;
return NO_ERROR;
}
status_t port_open(const char *name, void *ctx, port_t *port)
{
if (!name || !port)
return ERR_INVALID_ARGS;
// assume success; create the read port and buffer now.
read_port_t *rp = calloc(1, sizeof(read_port_t));
if (!rp)
return ERR_NO_MEMORY;
rp->magic = READPORT_MAGIC;
wait_queue_init(&rp->wait);
rp->ctx = ctx;
// |buf| might not be needed, but we always allocate outside the lock.
// this buffer is only needed for broadcast ports, but we don't know
// that here.
port_buf_t *buf = make_buf(PORT_BUFF_SIZE);
if (!buf) {
free(rp);
return ERR_NO_MEMORY;
}
// find the named write port and associate it with read port.
status_t rc = ERR_NOT_FOUND;
THREAD_LOCK(state);
write_port_t *wp = NULL;
list_for_every_entry(&write_port_list, wp, write_port_t, node) {
if (strcmp(wp->name, name) == 0) {
// found; add read port to write port list.
rp->wport = wp;
if (wp->buf) {
// this is the first read port; transfer the circular buffer.
list_add_tail(&wp->rp_list, &rp->w_node);
rp->buf = wp->buf;
wp->buf = NULL;
rc = NO_ERROR;
} else if (buf) {
// not first read port.
if (wp->mode & PORT_MODE_UNICAST) {
// cannot add a second listener.
rc = ERR_NOT_ALLOWED;
break;
}
// use the new (small) circular buffer.
list_add_tail(&wp->rp_list, &rp->w_node);
rp->buf = buf;
buf = NULL;
rc = NO_ERROR;
} else {
// |buf| allocation failed and the buffer was needed.
rc = ERR_NO_MEMORY;
}
break;
}
}
THREAD_UNLOCK(state);
if (buf)
free(buf);
if (rc == NO_ERROR) {
*port = (void *)rp;
} else {
free(rp);
}
return rc;
}
status_t port_group(port_t *ports, size_t count, port_t *group)
{
if (count > MAX_PORT_GROUP_COUNT)
return ERR_TOO_BIG;
// Allow empty port groups.
if (count && !ports)
return ERR_INVALID_ARGS;
if (!group)
return ERR_INVALID_ARGS;
// assume success; create port group now.
port_group_t *pg = calloc(1, sizeof(port_group_t));
if (!pg)
return ERR_NO_MEMORY;
pg->magic = PORTGROUP_MAGIC;
wait_queue_init(&pg->wait);
list_initialize(&pg->rp_list);
status_t rc = NO_ERROR;
THREAD_LOCK(state);
for (size_t ix = 0; ix != count; ix++) {
read_port_t *rp = (read_port_t *)ports[ix];
if ((rp->magic != READPORT_MAGIC) || rp->gport) {
// wrong type of port, or port already part of a group,
// in any case, undo the changes to the previous read ports.
for (size_t jx = 0; jx != ix; jx++) {
((read_port_t *)ports[jx])->gport = NULL;
}
rc = ERR_BAD_HANDLE;
break;
}
// link port group and read port.
rp->gport = pg;
list_add_tail(&pg->rp_list, &rp->g_node);
}
THREAD_UNLOCK(state);
if (rc == NO_ERROR) {
*group = (port_t *)pg;
} else {
free(pg);
}
return rc;
}
status_t port_group_add(port_t group, port_t port)
{
if (!port || !group)
return ERR_INVALID_ARGS;
// Make sure the user has actually passed in a port group and a read-port.
port_group_t *pg = (port_group_t *)group;
if (pg->magic != PORTGROUP_MAGIC)
return ERR_INVALID_ARGS;
read_port_t *rp = (read_port_t *)port;
if (rp->magic != READPORT_MAGIC || rp->gport)
return ERR_BAD_HANDLE;
status_t rc = NO_ERROR;
THREAD_LOCK(state);
if (list_length(&pg->rp_list) == MAX_PORT_GROUP_COUNT) {
rc = ERR_TOO_BIG;
} else {
rp->gport = pg;
list_add_tail(&pg->rp_list, &rp->g_node);
// If the new read port being added has messages available, try to wake
// any readers that might be present.
if (!buf_is_empty(rp->buf)) {
wait_queue_wake_one(&pg->wait, false, NO_ERROR);
}
}
THREAD_UNLOCK(state);
return rc;
}
status_t port_group_remove(port_t group, port_t port)
{
if (!port || !group)
return ERR_INVALID_ARGS;
// Make sure the user has actually passed in a port group and a read-port.
port_group_t *pg = (port_group_t *)group;
if (pg->magic != PORTGROUP_MAGIC)
return ERR_INVALID_ARGS;
read_port_t *rp = (read_port_t *)port;
if (rp->magic != READPORT_MAGIC || rp->gport != pg)
return ERR_BAD_HANDLE;
THREAD_LOCK(state);
bool found = false;
read_port_t *current_rp;
list_for_every_entry(&pg->rp_list, current_rp, read_port_t, g_node) {
if (current_rp == rp) {
found = true;
}
}
if (!found)
return ERR_BAD_HANDLE;
list_delete(&rp->g_node);
THREAD_UNLOCK(state);
return NO_ERROR;
}
status_t port_write(port_t port, const port_packet_t *pk, size_t count)
{
if (!port || !pk)
return ERR_INVALID_ARGS;
write_port_t *wp = (write_port_t *)port;
THREAD_LOCK(state);
if (wp->magic != WRITEPORT_MAGIC_W) {
// wrong port type.
THREAD_UNLOCK(state);
return ERR_BAD_HANDLE;
}
status_t status = NO_ERROR;
int awake_count = 0;
if (wp->buf) {
// there are no read ports, just write to the buffer.
status = buf_write(wp->buf, pk, count);
} else {
// there are read ports. for each, write and attempt to wake a thread
// from the port group or from the read port itself.
read_port_t *rp;
list_for_every_entry(&wp->rp_list, rp, read_port_t, w_node) {
if (buf_write(rp->buf, pk, count) < 0) {
// buffer full.
status = ERR_PARTIAL_WRITE;
continue;
}
int awaken = 0;
if (rp->gport) {
awaken = wait_queue_wake_one(&rp->gport->wait, false, NO_ERROR);
}
if (!awaken) {
awaken = wait_queue_wake_one(&rp->wait, false, NO_ERROR);
}
awake_count += awaken;
}
}
THREAD_UNLOCK(state);
#if RESCHEDULE_POLICY
if (awake_count)
thread_yield();
#endif
return status;
}
static inline status_t read_no_lock(read_port_t *rp, lk_time_t timeout, port_result_t *result)
{
status_t status = buf_read(rp->buf, result);
result->ctx = rp->ctx;
if (status != ERR_NO_MSG)
return status;
// early return allows compiler to elide the rest for the group read case.
if (!timeout)
return ERR_TIMED_OUT;
status_t wr = wait_queue_block(&rp->wait, timeout);
if (wr != NO_ERROR)
return wr;
// recursive tail call is usually optimized away with a goto.
return read_no_lock(rp, timeout, result);
}
status_t port_read(port_t port, lk_time_t timeout, port_result_t *result)
{
if (!port || !result)
return ERR_INVALID_ARGS;
status_t rc = ERR_GENERIC;
read_port_t *rp = (read_port_t *)port;
THREAD_LOCK(state);
if (rp->magic == READPORT_MAGIC) {
// dealing with a single port.
rc = read_no_lock(rp, timeout, result);
} else if (rp->magic == PORTGROUP_MAGIC) {
// dealing with a port group.
port_group_t *pg = (port_group_t *)port;
do {
// read each port with no timeout.
// todo: this order is fixed, probably a bad thing.
list_for_every_entry(&pg->rp_list, rp, read_port_t, g_node) {
rc = read_no_lock(rp, 0, result);
if (rc != ERR_TIMED_OUT)
goto read_exit;
}
// no data, block on the group waitqueue.
rc = wait_queue_block(&pg->wait, timeout);
} while (rc == NO_ERROR);
} else {
// wrong port type.
rc = ERR_BAD_HANDLE;
}
read_exit:
THREAD_UNLOCK(state);
return rc;
}
status_t port_destroy(port_t port)
{
if (!port)
return ERR_INVALID_ARGS;
write_port_t *wp = (write_port_t *) port;
port_buf_t *buf = NULL;
THREAD_LOCK(state);
if (wp->magic != WRITEPORT_MAGIC_X) {
// wrong port type.
THREAD_UNLOCK(state);
return ERR_BAD_HANDLE;
}
// remove self from global named ports list.
list_delete(&wp->node);
if (wp->buf) {
// we have no readers.
buf = wp->buf;
} else {
// for each reader:
read_port_t *rp;
list_for_every_entry(&wp->rp_list, rp, read_port_t, w_node) {
// wake the read and group ports.
wait_queue_wake_all(&rp->wait, false, ERR_CANCELLED);
if (rp->gport) {
wait_queue_wake_all(&rp->gport->wait, false, ERR_CANCELLED);
}
// remove self from reader ports.
rp->wport = NULL;
}
}
wp->magic = 0;
THREAD_UNLOCK(state);
free(buf);
free(wp);
return NO_ERROR;
}
status_t port_close(port_t port)
{
if (!port)
return ERR_INVALID_ARGS;
read_port_t *rp = (read_port_t *) port;
port_buf_t *buf = NULL;
THREAD_LOCK(state);
if (rp->magic == READPORT_MAGIC) {
// dealing with a read port.
if (rp->wport) {
// remove self from write port list and reassign the bufer if last.
list_delete(&rp->w_node);
if (list_is_empty(&rp->wport->rp_list)) {
rp->wport->buf = rp->buf;
rp->buf = NULL;
} else {
buf = rp->buf;
}
}
if (rp->gport) {
// remove self from port group list.
list_delete(&rp->g_node);
}
// wake up waiters, the return code is ERR_OBJECT_DESTROYED.
wait_queue_destroy(&rp->wait, true);
rp->magic = 0;
} else if (rp->magic == PORTGROUP_MAGIC) {
// dealing with a port group.
port_group_t *pg = (port_group_t *) port;
// wake up waiters.
wait_queue_destroy(&pg->wait, true);
// remove self from reader ports.
rp = NULL;
list_for_every_entry(&pg->rp_list, rp, read_port_t, g_node) {
rp->gport = NULL;
}
pg->magic = 0;
} else if (rp->magic == WRITEPORT_MAGIC_W) {
// dealing with a write port.
write_port_t *wp = (write_port_t *) port;
// mark it as closed. Now it can be read but not written to.
wp->magic = WRITEPORT_MAGIC_X;
THREAD_UNLOCK(state);
return NO_ERROR;
} else {
THREAD_UNLOCK(state);
return ERR_BAD_HANDLE;
}
THREAD_UNLOCK(state);
free(buf);
free(port);
return NO_ERROR;
}
| 27.230769 | 94 | 0.592545 | [
"object"
] |
be12cb420322acc9ef55a80350fdae92ad9e0934 | 1,412 | h | C | include/CRForestEstimator.h | ndretn/CV_project | 51deb8e9d084dd0de873524b132460cca0d42de2 | [
"BSD-3-Clause"
] | null | null | null | include/CRForestEstimator.h | ndretn/CV_project | 51deb8e9d084dd0de873524b132460cca0d42de2 | [
"BSD-3-Clause"
] | null | null | null | include/CRForestEstimator.h | ndretn/CV_project | 51deb8e9d084dd0de873524b132460cca0d42de2 | [
"BSD-3-Clause"
] | null | null | null | #pragma once
#include "CRForest.h"
#include <fstream>
#include "opencv2/core/core.hpp"
struct Vote {
cv::Vec<float,POSE_SIZE> vote;
const float* trace;
const float* conf;
bool operator<(const Vote& a) const { return trace<a.trace; }
};
class CRForestEstimator {
public:
CRForestEstimator(){ crForest = 0; };
~CRForestEstimator(){ if(crForest) delete crForest; };
bool loadForest(const char* treespath, int ntrees = 0);
void estimate( const cv::Mat & im3D, //input: 3d image (x,y,z coordinates for each pixel)
std::vector< cv::Vec<float,POSE_SIZE> >& means, //output: heads' centers and orientations (x,y,z,pitch,yaw,roll)
std::vector< std::vector< Vote > >& clusters, //all clusters
std::vector< Vote >& votes, //all votes
int stride = 5, //stride
float max_variance = 1000, //max leaf variance
float prob_th = 1.0, //threshold on the leaf's probability of belonging to a head
float larger_radius_ratio = 1.0, //for clustering heads
float smaller_radius_ratio = 6.0, //for mean shift
bool verbose = false, //print out more info
int threshold = 400 //head threshold
);
private:
cv::Rect getBoundingBox(const cv::Mat& im3D);
CRForest* crForest;
};
| 28.816327 | 132 | 0.590652 | [
"vector",
"3d"
] |
be153462fdd84e80ac554c94e0e38d0833c86355 | 3,558 | h | C | src/ThirdParty/cppassist/cppassist/source/cppassist/include/cppassist/cmdline/ArgumentParser.h | Sasha7b9/U-Cube | 442927ff1391bfe78cdf520ad303c7dc29086b46 | [
"MIT"
] | 23 | 2016-04-18T19:02:20.000Z | 2022-01-31T00:54:36.000Z | src/ThirdParty/cppassist/cppassist/source/cppassist/include/cppassist/cmdline/ArgumentParser.h | Sasha7b9/U-Cube | 442927ff1391bfe78cdf520ad303c7dc29086b46 | [
"MIT"
] | 33 | 2016-04-28T10:02:39.000Z | 2018-10-23T07:06:43.000Z | src/ThirdParty/cppassist/cppassist/source/cppassist/include/cppassist/cmdline/ArgumentParser.h | Sasha7b9/U-Cube | 442927ff1391bfe78cdf520ad303c7dc29086b46 | [
"MIT"
] | 9 | 2016-08-10T13:06:23.000Z | 2020-03-15T23:28:45.000Z |
#pragma once
#include <string>
#include <vector>
#include <map>
#include <cppassist/cppassist_api.h>
namespace cppassist
{
/**
* @brief
* Command line arguments parser
*
* Can be used to parse the arguments from the command line.
* The following command line syntax is assumed:
*
* `<executable> [--<option> <value>]* [-<option>]* [<param>]*`
*
* `<executable>`:
* The first argument is assumed to be the current executable name.
*
* `--<option>`:
* Options with two slashes (`--`) must be followed by a value.
*
* `-<option>`:
* Options with a single slash (`-`) do not have a value. If present, their value is set to `true`.
*
* `<param>`:
* All other arguments found in the command line are assumed to be additional parameters
* and are added to the parameter list.
*/
class CPPASSIST_API ArgumentParser
{
public:
/**
* @brief
* Constructor
*/
ArgumentParser();
/**
* @brief
* Destructor
*/
~ArgumentParser();
/**
* @brief
* Parse command line
*
* @param[in] argc
* Number of arguments
* @param[in] argv
* List of arguments
*/
void parse(int argc, char * argv[]);
/**
* @brief
* Get options
*
* @return
* Options (key->value)
*
* @remark
* Options without values (single "-") have the value `true` if set.
*/
const std::map<std::string, std::string> & options() const;
/**
* @brief
* Check if option is set
*
* @param[in] option
* Name of option (e.g., "-v")
*
* @return
* `true` if option is set, else `false`
*/
bool isSet(const std::string & option) const;
/**
* @brief
* Get option value
*
* @param[in] option
* Name of option (e.g., "--v")
* @param[in] defaultValue
* Default value returned if option is not set
*
* @return
* Value of the option or defaultValue if the option is not set
*/
std::string value(const std::string & option, const std::string & defaultValue = "") const;
/**
* @brief
* Get option value
*
* @param[in] option
* Name of option (e.g., "--v")
* @param[in] defaultValue
* Default value returned if option is not set
*
* @return
* Value of the option or defaultValue if the option is not set
*/
const std::string & value(const std::string & option, std::string & defaultValue) const;
/**
* @brief
* Get option value converted to specific data type
*
* @param[in] option
* Name of option (e.g., "--v")
* @param[in] defaultValue
* Default value returned if option is not set
*
* @return
* Value of the option
*
* @remark
* Uses string::fromString() to convert to specified type
*/
template <typename T>
T value(const std::string & option, const T & defaultValue = T()) const;
/**
* @brief
* Get additional parameters
*
* @return
* Additional parameters
*/
const std::vector<std::string> & params() const;
/**
* @brief
* Print command line parameters to console
*/
void print() const;
private:
std::map<std::string, std::string> m_options; ///< Map (key->value) of options
std::vector<std::string> m_params; ///< Additional parameters (e.g., file or object list)
};
} // namespace cppassist
#include <cppassist/cmdline/ArgumentParser.inl>
| 22.2375 | 104 | 0.565486 | [
"object",
"vector"
] |
be154dde096dcc94fbfcf0784c590f56ddc26c52 | 8,730 | c | C | src/bwish/main.c | dservin/brlcad | 34b72d3efd24ac2c84abbccf9452323231751cd1 | [
"BSD-4-Clause",
"BSD-3-Clause"
] | null | null | null | src/bwish/main.c | dservin/brlcad | 34b72d3efd24ac2c84abbccf9452323231751cd1 | [
"BSD-4-Clause",
"BSD-3-Clause"
] | null | null | null | src/bwish/main.c | dservin/brlcad | 34b72d3efd24ac2c84abbccf9452323231751cd1 | [
"BSD-4-Clause",
"BSD-3-Clause"
] | null | null | null | /* M A I N . C
* BRL-CAD
*
* Copyright (c) 1998-2022 United States Government as represented by
* the U.S. Army Research Laboratory.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* version 2.1 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this file; see the file named COPYING for more
* information.
*
*/
/** @file bwish/main.c
*
* This file provides the main() function for both BWISH and BTCLSH.
* While initializing Tcl, Itcl and various BRL-CAD libraries it sets
* things up to provide command history and command line editing.
*
*/
#include "common.h"
#include <ctype.h>
#include "tcl.h"
#ifdef BWISH
#include "tk.h"
#endif
#include <locale.h>
#include "bio.h"
#include "libtermio.h"
#ifdef HAVE_SYS_SELECT_H
# include <sys/select.h> /* for select */
#endif
#include "bu/app.h"
#include "vmath.h"
#include "tclcad.h"
extern int cmdInit(Tcl_Interp *interp);
#ifdef BWISH
Tk_Window tkwin;
#endif
#ifndef HAVE_WINDOWS_H
Tcl_Interp *INTERP;
#endif
#if defined(HAVE_WINDOWS_H) && defined(BWISH)
static BOOL consoleRequired = TRUE;
#endif
/* We need to be careful about tty resetting - xcodebuild
* and resetTty were locking up. Add a tty check
* along the lines of
* http://stackoverflow.com/questions/1594251/how-to-check-if-stdin-is-still-opened-without-blocking
*/
#ifdef HAVE_SYS_SELECT_H
int tty_usable(int fd) {
fd_set fdset;
struct timeval timeout;
FD_ZERO(&fdset);
FD_SET(fd, &fdset);
timeout.tv_sec = 0;
timeout.tv_usec = 1;
return select(fd+1, &fdset, NULL, NULL, &timeout) == 1 ? 1 : 0;
}
#endif
/* defined in input.c */
extern void initInput(void);
#ifdef BWISH
# if defined(HAVE_WINDOWS_H)
# define CAD_RCFILENAME "~/.bwishrc.tcl"
# else
# define CAD_RCFILENAME "~/.bwishrc"
# endif
#else
# if defined(HAVE_WINDOWS_H)
# define CAD_RCFILENAME "~/.btclshrc.tcl"
# else
# define CAD_RCFILENAME "~/.btclshrc"
# endif
#endif
#if defined(BWISH) && defined(HAVE_WINDOWS_H)
/*
* BwishPanic --
*
* Display a message and exit.
*
* Results: None.
* Side effects: Exits the program.
*/
void
BwishPanic(const char *format, ...)
{
va_list argList;
char buf[1024];
va_start(argList, format);
vsnprintf(buf, 1024, format, argList);
MessageBeep(MB_ICONEXCLAMATION);
MessageBox(NULL, (LPCSTR)buf, (LPCSTR)"Fatal Error in bwish",
MB_ICONSTOP | MB_OK | MB_TASKMODAL | MB_SETFOREGROUND);
#ifdef _MSC_VER
DebugBreak();
#endif
ExitProcess(1);
}
#endif
#ifdef HAVE_WINDOWS_H
int
Tcl_WinInit(Tcl_Interp *interp)
{
struct bu_vls tlog = BU_VLS_INIT_ZERO;
int status = TCL_OK;
#ifdef BWISH
status = tclcad_init(interp, 1, &tlog);
if (status != TCL_ERROR) {
if (consoleRequired) status = Tk_CreateConsoleWindow(interp);
}
#else
status = tclcad_init(interp, 0, &tlog);
#endif
if (status == TCL_ERROR) {
#ifdef BWISH
struct bu_vls errstr = BU_VLS_INIT_ZERO;
bu_vls_sprintf(&errstr, "Error in bwish:\n%s\n", bu_vls_addr(&tlog));
MessageBeep(MB_ICONEXCLAMATION);
MessageBox(NULL, (LPCSTR)Tcl_GetStringResult(interp), (LPCSTR)bu_vls_addr(&errstr),
MB_ICONSTOP | MB_OK | MB_TASKMODAL | MB_SETFOREGROUND);
bu_vls_free(&errstr);
bu_vls_free(&tlog);
ExitProcess(1);
#else
bu_log("tclcad_init failure:\n%s\n", bu_vls_addr(&tlog));
#endif
}
Tcl_SetVar(interp, "tcl_rcFileName", CAD_RCFILENAME, TCL_GLOBAL_ONLY);
bu_vls_free(&tlog);
return TCL_OK;
}
#endif /* HAVE_WINDOWS_H */
void
Cad_Exit(int status)
{
#ifndef HAVE_WINDOWS_H
#ifdef HAVE_SYS_SELECT_H
if (tty_usable(fileno(stdin))) {
reset_Tty(fileno(stdin));
}
#else
reset_Tty(fileno(stdin));
#endif
#endif
Tcl_Exit(status);
}
#ifdef BWISH
static void
displayWarning(
const char *msg, /* Message to be displayed. */
const char *title) /* Title of warning. */
{
Tcl_Channel errChannel = Tcl_GetStdChannel(TCL_STDERR);
if (errChannel) {
Tcl_WriteChars(errChannel, title, -1);
Tcl_WriteChars(errChannel, ": ", 2);
Tcl_WriteChars(errChannel, msg, -1);
Tcl_WriteChars(errChannel, "\n", 1);
}
}
#endif
#if defined(BWISH) && defined(HAVE_WINDOWS_H)
int APIENTRY
WinMain(HINSTANCE hInstance,
HINSTANCE hPrevInstance,
LPSTR lpszCmdLine,
int nCmdShow)
{
char **argv;
int argc;
#else
int
main(int argc, char **argv)
{
#endif
#if defined(BWISH) && defined(HAVE_WINDOWS_H)
Tcl_SetPanicProc(BwishPanic);
/* Create the console channels and install them as the standard channels.
* All I/O will be discarded until Tk_CreateConsoleWindow is called to
* attach the console to a text widget. */
consoleRequired = TRUE;
#endif
#if defined(HAVE_WINDOWS_H)
setlocale(LC_ALL, "C");
#endif
#if defined(BWISH) && defined(HAVE_WINDOWS_H)
/* Get our args from the c-runtime. Ignore lpszCmdLine. */
argc = __argc;
argv = __argv;
#endif
#if !defined(HAVE_WINDOWS_H)
struct bu_vls tlog = BU_VLS_INIT_ZERO;
int status = TCL_OK;
char *filename = NULL;
char *args = NULL;
char buf[TCL_INTEGER_SPACE] = {0};
Tcl_DString argString;
/* Create the interpreter */
INTERP = Tcl_CreateInterp();
Tcl_FindExecutable(argv[0]);
#endif
#if defined(HAVE_WINDOWS_H)
{
/* Forward slashes substituted for backslashes. */
char *p;
for (p = argv[0]; *p != '\0'; p++) {
if (*p == '\\') {
*p = '/';
}
}
}
#endif
bu_setprogname(argv[0]);
#if defined(HAVE_WINDOWS_H)
# ifdef BWISH
Tk_Main(argc, argv, Tcl_WinInit);
# else
Tcl_Main(argc, argv, Tcl_WinInit);
# endif
#else
if ((argc > 1) && (argv[1][0] != '-')) {
filename = argv[1];
argc--;
argv++;
}
/*
* Make command-line arguments available in the Tcl variables "argc"
* and "argv".
*/
args = Tcl_Merge(argc-1, (const char * const *)argv+1);
Tcl_ExternalToUtfDString(NULL, args, -1, &argString);
Tcl_SetVar(INTERP, "argv", Tcl_DStringValue(&argString), TCL_GLOBAL_ONLY);
Tcl_DStringFree(&argString);
ckfree(args);
if (filename == NULL) {
(void)Tcl_ExternalToUtfDString(NULL, argv[0], -1, &argString);
} else {
filename = Tcl_ExternalToUtfDString(NULL, filename, -1, &argString);
}
sprintf(buf, "%ld", (long)(argc-1));
Tcl_SetVar(INTERP, "argc", buf, TCL_GLOBAL_ONLY);
Tcl_SetVar(INTERP, "argv0", Tcl_DStringValue(&argString), TCL_GLOBAL_ONLY);
#ifdef BWISH
status = tclcad_init(INTERP, 1, &tlog);
#else
status = tclcad_init(INTERP, 0, &tlog);
#endif
if (status == TCL_ERROR) {
#ifdef BWISH
struct bu_vls errstr = BU_VLS_INIT_ZERO;
bu_vls_sprintf(&errstr, "Application initialization failed:\n%s\n", bu_vls_addr(&tlog));
displayWarning(bu_vls_addr(&errstr), "ERROR");
bu_vls_free(&errstr);
#else
bu_log("tclcad_init failure:\n%s\n", bu_vls_addr(&tlog));
#endif
} else {
/* register bwish/btclsh commands */
cmdInit(INTERP);
}
bu_vls_free(&tlog);
if (filename != NULL) {
int fstatus;
/* ??? need to arrange for a bu_log handler and or handlers
* for stdout/stderr?
*/
#ifdef HAVE_SYS_SELECT_H
if (tty_usable(fileno(stdin))) {
save_Tty(fileno(stdin));
}
#else
save_Tty(fileno(stdin));
#endif
Tcl_ResetResult(INTERP);
fstatus = Tcl_EvalFile(INTERP, filename);
if (fstatus != TCL_OK) {
Tcl_AddErrorInfo(INTERP, "");
#ifdef BWISH
displayWarning(Tcl_GetVar(INTERP, "errorInfo",
TCL_GLOBAL_ONLY), "Error in startup script");
#else
bu_log("Error in startup script: %s\n", Tcl_GetVar(INTERP, "errorInfo", TCL_GLOBAL_ONLY));
#endif
}
#ifndef BWISH
Cad_Exit(fstatus);
#endif
} else {
/* We're running interactively. */
/* Set up to handle commands from user as well as
provide a command line editing capability. */
initInput();
/* Set the name of the startup file. */
Tcl_SetVar(INTERP, "tcl_rcFileName", CAD_RCFILENAME, TCL_GLOBAL_ONLY);
/* Source the startup file if it exists. */
Tcl_SourceRCFile(INTERP);
}
Tcl_DStringFree(&argString);
#ifdef BWISH
while (Tk_GetNumMainWindows() > 0) {
#else
while (1) {
#endif
Tcl_DoOneEvent(0);
}
Cad_Exit(TCL_OK);
#endif /* HAVE_WINDOWS_H */
return 0;
}
/*
* Local Variables:
* mode: C
* tab-width: 8
* indent-tabs-mode: t
* c-file-style: "stroustrup"
* End:
* ex: shiftwidth=4 tabstop=8
*/
| 23.722826 | 100 | 0.67858 | [
"cad"
] |
be204da57766f0add51e92899c767854f759ac15 | 13,966 | h | C | aws-cpp-sdk-sagemaker/include/aws/sagemaker/model/ScheduleConfig.h | lintonv/aws-sdk-cpp | 15e19c265ffce19d2046b18aa1b7307fc5377e58 | [
"Apache-2.0"
] | 1 | 2021-12-06T20:36:35.000Z | 2021-12-06T20:36:35.000Z | aws-cpp-sdk-sagemaker/include/aws/sagemaker/model/ScheduleConfig.h | lintonv/aws-sdk-cpp | 15e19c265ffce19d2046b18aa1b7307fc5377e58 | [
"Apache-2.0"
] | 1 | 2022-01-03T23:59:37.000Z | 2022-01-03T23:59:37.000Z | aws-cpp-sdk-sagemaker/include/aws/sagemaker/model/ScheduleConfig.h | ravindra-wagh/aws-sdk-cpp | 7d5ff01b3c3b872f31ca98fb4ce868cd01e97696 | [
"Apache-2.0"
] | 1 | 2022-03-23T15:17:18.000Z | 2022-03-23T15:17:18.000Z | /**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/sagemaker/SageMaker_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <utility>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
class JsonView;
} // namespace Json
} // namespace Utils
namespace SageMaker
{
namespace Model
{
/**
* <p>Configuration details about the monitoring schedule.</p><p><h3>See Also:</h3>
* <a
* href="http://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ScheduleConfig">AWS
* API Reference</a></p>
*/
class AWS_SAGEMAKER_API ScheduleConfig
{
public:
ScheduleConfig();
ScheduleConfig(Aws::Utils::Json::JsonView jsonValue);
ScheduleConfig& operator=(Aws::Utils::Json::JsonView jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>A cron expression that describes details about the monitoring schedule.</p>
* <p>Currently the only supported cron expressions are:</p> <ul> <li> <p>If you
* want to set the job to start every hour, please use the following:</p> <p>
* <code>Hourly: cron(0 * ? * * *)</code> </p> </li> <li> <p>If you want to start
* the job daily:</p> <p> <code>cron(0 [00-23] ? * * *)</code> </p> </li> </ul>
* <p>For example, the following are valid cron expressions:</p> <ul> <li> <p>Daily
* at noon UTC: <code>cron(0 12 ? * * *)</code> </p> </li> <li> <p>Daily at
* midnight UTC: <code>cron(0 0 ? * * *)</code> </p> </li> </ul> <p>To support
* running every 6, 12 hours, the following are also supported:</p> <p>
* <code>cron(0 [00-23]/[01-24] ? * * *)</code> </p> <p>For example, the following
* are valid cron expressions:</p> <ul> <li> <p>Every 12 hours, starting at 5pm
* UTC: <code>cron(0 17/12 ? * * *)</code> </p> </li> <li> <p>Every two hours
* starting at midnight: <code>cron(0 0/2 ? * * *)</code> </p> </li> </ul>
* <ul> <li> <p>Even though the cron expression is set to start at 5PM UTC, note
* that there could be a delay of 0-20 minutes from the actual requested time to
* run the execution. </p> </li> <li> <p>We recommend that if you would like a
* daily schedule, you do not provide this parameter. Amazon SageMaker will pick a
* time for running every day.</p> </li> </ul>
*/
inline const Aws::String& GetScheduleExpression() const{ return m_scheduleExpression; }
/**
* <p>A cron expression that describes details about the monitoring schedule.</p>
* <p>Currently the only supported cron expressions are:</p> <ul> <li> <p>If you
* want to set the job to start every hour, please use the following:</p> <p>
* <code>Hourly: cron(0 * ? * * *)</code> </p> </li> <li> <p>If you want to start
* the job daily:</p> <p> <code>cron(0 [00-23] ? * * *)</code> </p> </li> </ul>
* <p>For example, the following are valid cron expressions:</p> <ul> <li> <p>Daily
* at noon UTC: <code>cron(0 12 ? * * *)</code> </p> </li> <li> <p>Daily at
* midnight UTC: <code>cron(0 0 ? * * *)</code> </p> </li> </ul> <p>To support
* running every 6, 12 hours, the following are also supported:</p> <p>
* <code>cron(0 [00-23]/[01-24] ? * * *)</code> </p> <p>For example, the following
* are valid cron expressions:</p> <ul> <li> <p>Every 12 hours, starting at 5pm
* UTC: <code>cron(0 17/12 ? * * *)</code> </p> </li> <li> <p>Every two hours
* starting at midnight: <code>cron(0 0/2 ? * * *)</code> </p> </li> </ul>
* <ul> <li> <p>Even though the cron expression is set to start at 5PM UTC, note
* that there could be a delay of 0-20 minutes from the actual requested time to
* run the execution. </p> </li> <li> <p>We recommend that if you would like a
* daily schedule, you do not provide this parameter. Amazon SageMaker will pick a
* time for running every day.</p> </li> </ul>
*/
inline bool ScheduleExpressionHasBeenSet() const { return m_scheduleExpressionHasBeenSet; }
/**
* <p>A cron expression that describes details about the monitoring schedule.</p>
* <p>Currently the only supported cron expressions are:</p> <ul> <li> <p>If you
* want to set the job to start every hour, please use the following:</p> <p>
* <code>Hourly: cron(0 * ? * * *)</code> </p> </li> <li> <p>If you want to start
* the job daily:</p> <p> <code>cron(0 [00-23] ? * * *)</code> </p> </li> </ul>
* <p>For example, the following are valid cron expressions:</p> <ul> <li> <p>Daily
* at noon UTC: <code>cron(0 12 ? * * *)</code> </p> </li> <li> <p>Daily at
* midnight UTC: <code>cron(0 0 ? * * *)</code> </p> </li> </ul> <p>To support
* running every 6, 12 hours, the following are also supported:</p> <p>
* <code>cron(0 [00-23]/[01-24] ? * * *)</code> </p> <p>For example, the following
* are valid cron expressions:</p> <ul> <li> <p>Every 12 hours, starting at 5pm
* UTC: <code>cron(0 17/12 ? * * *)</code> </p> </li> <li> <p>Every two hours
* starting at midnight: <code>cron(0 0/2 ? * * *)</code> </p> </li> </ul>
* <ul> <li> <p>Even though the cron expression is set to start at 5PM UTC, note
* that there could be a delay of 0-20 minutes from the actual requested time to
* run the execution. </p> </li> <li> <p>We recommend that if you would like a
* daily schedule, you do not provide this parameter. Amazon SageMaker will pick a
* time for running every day.</p> </li> </ul>
*/
inline void SetScheduleExpression(const Aws::String& value) { m_scheduleExpressionHasBeenSet = true; m_scheduleExpression = value; }
/**
* <p>A cron expression that describes details about the monitoring schedule.</p>
* <p>Currently the only supported cron expressions are:</p> <ul> <li> <p>If you
* want to set the job to start every hour, please use the following:</p> <p>
* <code>Hourly: cron(0 * ? * * *)</code> </p> </li> <li> <p>If you want to start
* the job daily:</p> <p> <code>cron(0 [00-23] ? * * *)</code> </p> </li> </ul>
* <p>For example, the following are valid cron expressions:</p> <ul> <li> <p>Daily
* at noon UTC: <code>cron(0 12 ? * * *)</code> </p> </li> <li> <p>Daily at
* midnight UTC: <code>cron(0 0 ? * * *)</code> </p> </li> </ul> <p>To support
* running every 6, 12 hours, the following are also supported:</p> <p>
* <code>cron(0 [00-23]/[01-24] ? * * *)</code> </p> <p>For example, the following
* are valid cron expressions:</p> <ul> <li> <p>Every 12 hours, starting at 5pm
* UTC: <code>cron(0 17/12 ? * * *)</code> </p> </li> <li> <p>Every two hours
* starting at midnight: <code>cron(0 0/2 ? * * *)</code> </p> </li> </ul>
* <ul> <li> <p>Even though the cron expression is set to start at 5PM UTC, note
* that there could be a delay of 0-20 minutes from the actual requested time to
* run the execution. </p> </li> <li> <p>We recommend that if you would like a
* daily schedule, you do not provide this parameter. Amazon SageMaker will pick a
* time for running every day.</p> </li> </ul>
*/
inline void SetScheduleExpression(Aws::String&& value) { m_scheduleExpressionHasBeenSet = true; m_scheduleExpression = std::move(value); }
/**
* <p>A cron expression that describes details about the monitoring schedule.</p>
* <p>Currently the only supported cron expressions are:</p> <ul> <li> <p>If you
* want to set the job to start every hour, please use the following:</p> <p>
* <code>Hourly: cron(0 * ? * * *)</code> </p> </li> <li> <p>If you want to start
* the job daily:</p> <p> <code>cron(0 [00-23] ? * * *)</code> </p> </li> </ul>
* <p>For example, the following are valid cron expressions:</p> <ul> <li> <p>Daily
* at noon UTC: <code>cron(0 12 ? * * *)</code> </p> </li> <li> <p>Daily at
* midnight UTC: <code>cron(0 0 ? * * *)</code> </p> </li> </ul> <p>To support
* running every 6, 12 hours, the following are also supported:</p> <p>
* <code>cron(0 [00-23]/[01-24] ? * * *)</code> </p> <p>For example, the following
* are valid cron expressions:</p> <ul> <li> <p>Every 12 hours, starting at 5pm
* UTC: <code>cron(0 17/12 ? * * *)</code> </p> </li> <li> <p>Every two hours
* starting at midnight: <code>cron(0 0/2 ? * * *)</code> </p> </li> </ul>
* <ul> <li> <p>Even though the cron expression is set to start at 5PM UTC, note
* that there could be a delay of 0-20 minutes from the actual requested time to
* run the execution. </p> </li> <li> <p>We recommend that if you would like a
* daily schedule, you do not provide this parameter. Amazon SageMaker will pick a
* time for running every day.</p> </li> </ul>
*/
inline void SetScheduleExpression(const char* value) { m_scheduleExpressionHasBeenSet = true; m_scheduleExpression.assign(value); }
/**
* <p>A cron expression that describes details about the monitoring schedule.</p>
* <p>Currently the only supported cron expressions are:</p> <ul> <li> <p>If you
* want to set the job to start every hour, please use the following:</p> <p>
* <code>Hourly: cron(0 * ? * * *)</code> </p> </li> <li> <p>If you want to start
* the job daily:</p> <p> <code>cron(0 [00-23] ? * * *)</code> </p> </li> </ul>
* <p>For example, the following are valid cron expressions:</p> <ul> <li> <p>Daily
* at noon UTC: <code>cron(0 12 ? * * *)</code> </p> </li> <li> <p>Daily at
* midnight UTC: <code>cron(0 0 ? * * *)</code> </p> </li> </ul> <p>To support
* running every 6, 12 hours, the following are also supported:</p> <p>
* <code>cron(0 [00-23]/[01-24] ? * * *)</code> </p> <p>For example, the following
* are valid cron expressions:</p> <ul> <li> <p>Every 12 hours, starting at 5pm
* UTC: <code>cron(0 17/12 ? * * *)</code> </p> </li> <li> <p>Every two hours
* starting at midnight: <code>cron(0 0/2 ? * * *)</code> </p> </li> </ul>
* <ul> <li> <p>Even though the cron expression is set to start at 5PM UTC, note
* that there could be a delay of 0-20 minutes from the actual requested time to
* run the execution. </p> </li> <li> <p>We recommend that if you would like a
* daily schedule, you do not provide this parameter. Amazon SageMaker will pick a
* time for running every day.</p> </li> </ul>
*/
inline ScheduleConfig& WithScheduleExpression(const Aws::String& value) { SetScheduleExpression(value); return *this;}
/**
* <p>A cron expression that describes details about the monitoring schedule.</p>
* <p>Currently the only supported cron expressions are:</p> <ul> <li> <p>If you
* want to set the job to start every hour, please use the following:</p> <p>
* <code>Hourly: cron(0 * ? * * *)</code> </p> </li> <li> <p>If you want to start
* the job daily:</p> <p> <code>cron(0 [00-23] ? * * *)</code> </p> </li> </ul>
* <p>For example, the following are valid cron expressions:</p> <ul> <li> <p>Daily
* at noon UTC: <code>cron(0 12 ? * * *)</code> </p> </li> <li> <p>Daily at
* midnight UTC: <code>cron(0 0 ? * * *)</code> </p> </li> </ul> <p>To support
* running every 6, 12 hours, the following are also supported:</p> <p>
* <code>cron(0 [00-23]/[01-24] ? * * *)</code> </p> <p>For example, the following
* are valid cron expressions:</p> <ul> <li> <p>Every 12 hours, starting at 5pm
* UTC: <code>cron(0 17/12 ? * * *)</code> </p> </li> <li> <p>Every two hours
* starting at midnight: <code>cron(0 0/2 ? * * *)</code> </p> </li> </ul>
* <ul> <li> <p>Even though the cron expression is set to start at 5PM UTC, note
* that there could be a delay of 0-20 minutes from the actual requested time to
* run the execution. </p> </li> <li> <p>We recommend that if you would like a
* daily schedule, you do not provide this parameter. Amazon SageMaker will pick a
* time for running every day.</p> </li> </ul>
*/
inline ScheduleConfig& WithScheduleExpression(Aws::String&& value) { SetScheduleExpression(std::move(value)); return *this;}
/**
* <p>A cron expression that describes details about the monitoring schedule.</p>
* <p>Currently the only supported cron expressions are:</p> <ul> <li> <p>If you
* want to set the job to start every hour, please use the following:</p> <p>
* <code>Hourly: cron(0 * ? * * *)</code> </p> </li> <li> <p>If you want to start
* the job daily:</p> <p> <code>cron(0 [00-23] ? * * *)</code> </p> </li> </ul>
* <p>For example, the following are valid cron expressions:</p> <ul> <li> <p>Daily
* at noon UTC: <code>cron(0 12 ? * * *)</code> </p> </li> <li> <p>Daily at
* midnight UTC: <code>cron(0 0 ? * * *)</code> </p> </li> </ul> <p>To support
* running every 6, 12 hours, the following are also supported:</p> <p>
* <code>cron(0 [00-23]/[01-24] ? * * *)</code> </p> <p>For example, the following
* are valid cron expressions:</p> <ul> <li> <p>Every 12 hours, starting at 5pm
* UTC: <code>cron(0 17/12 ? * * *)</code> </p> </li> <li> <p>Every two hours
* starting at midnight: <code>cron(0 0/2 ? * * *)</code> </p> </li> </ul>
* <ul> <li> <p>Even though the cron expression is set to start at 5PM UTC, note
* that there could be a delay of 0-20 minutes from the actual requested time to
* run the execution. </p> </li> <li> <p>We recommend that if you would like a
* daily schedule, you do not provide this parameter. Amazon SageMaker will pick a
* time for running every day.</p> </li> </ul>
*/
inline ScheduleConfig& WithScheduleExpression(const char* value) { SetScheduleExpression(value); return *this;}
private:
Aws::String m_scheduleExpression;
bool m_scheduleExpressionHasBeenSet;
};
} // namespace Model
} // namespace SageMaker
} // namespace Aws
| 61.79646 | 142 | 0.606258 | [
"model"
] |
0e6c761d96f6cb355c1810f9383ba4f8a981aa19 | 7,655 | c | C | gui/drv.c | charlesdaniels/libagar | 099ce716e2ca01a7904b23f22610bf589295f5b5 | [
"BSD-2-Clause"
] | null | null | null | gui/drv.c | charlesdaniels/libagar | 099ce716e2ca01a7904b23f22610bf589295f5b5 | [
"BSD-2-Clause"
] | null | null | null | gui/drv.c | charlesdaniels/libagar | 099ce716e2ca01a7904b23f22610bf589295f5b5 | [
"BSD-2-Clause"
] | null | null | null | /*
* Copyright (c) 2009-2019 Julien Nadeau Carriere <vedge@csoft.net>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Implementation of base AG_Driver object.
*/
#include <agar/config/have_sdl.h>
#include <agar/config/have_opengl.h>
#include <agar/config/have_glx.h>
#include <agar/config/have_wgl.h>
#include <agar/config/have_cocoa.h>
#include <agar/core/core.h>
#include <agar/core/config.h>
#include <agar/gui/window.h>
#include <agar/gui/text.h>
#if defined(HAVE_GLX)
extern AG_DriverClass agDriverGLX;
#endif
#if defined(HAVE_SDL)
extern AG_DriverClass agDriverSDLFB;
#endif
#if defined(HAVE_SDL) && defined(HAVE_OPENGL)
extern AG_DriverClass agDriverSDLGL;
#endif
#if defined(HAVE_WGL)
extern AG_DriverClass agDriverWGL;
#endif
#if defined(HAVE_COCOA)
extern AG_DriverClass agDriverCocoa;
#endif
AG_Object agDrivers; /* Drivers VFS */
AG_DriverClass *agDriverOps = NULL; /* Current driver class */
AG_DriverClass *agDriverList[] = {
#if defined(HAVE_GLX)
&agDriverGLX,
#endif
#if defined(HAVE_WGL)
&agDriverWGL,
#endif
#if defined(HAVE_COCOA)
&agDriverCocoa,
#endif
#if defined(HAVE_SDL) && defined(HAVE_OPENGL)
&agDriverSDLGL,
#endif
#if defined(HAVE_SDL)
&agDriverSDLFB,
#endif
NULL
};
const char *agDriverTypeNames[] = {
"Framebuffer",
"Vector"
};
const char *agDriverWmTypeNames[] = {
"Single-window",
"Multi-window"
};
/* Return a string with the available drivers. */
void
AG_ListDriverNames(char *buf, AG_Size buf_len)
{
AG_DriverClass **pd;
if (buf_len == 0) {
return;
}
for (pd = &agDriverList[0], buf[0] = '\0';
*pd != NULL;
pd++) {
Strlcat(buf, (*pd)->name, buf_len);
Strlcat(buf, " ", buf_len);
}
buf[strlen(buf)-1] = '\0';
}
/* Create a new driver instance. */
AG_Driver *
AG_DriverOpen(AG_DriverClass *dc)
{
AG_Driver *drv;
if ((drv = AG_ObjectNew(NULL, dc->name, AGCLASS(dc))) == NULL) {
return (NULL);
}
if (dc->open(drv, NULL) == -1) {
AG_ObjectDestroy(drv);
return (NULL);
}
for (drv->id = 1; ; drv->id++) {
if (AG_GetDriverByID(drv->id) == NULL)
break;
}
AG_ObjectSetName(drv, "%s%u", dc->name, drv->id);
AG_ObjectAttach(&agDrivers, drv);
return (drv);
}
/* Close and destroy a driver. */
void
AG_DriverClose(AG_Driver *drv)
{
AG_ObjectDetach(drv);
AGDRIVER_CLASS(drv)->close(drv);
AG_ObjectDestroy(drv);
}
/*
* Lookup a driver instance by ID.
* The agDrivers VFS must be locked.
*/
AG_Driver *
AG_GetDriverByID(Uint id)
{
AG_Driver *drv;
AGOBJECT_FOREACH_CHILD(drv, &agDrivers, ag_driver) {
if (drv->id == id)
return (drv);
}
return (NULL);
}
/* Enter GUI rendering context. */
void
AG_BeginRendering(void *drv)
{
#if defined(HAVE_CLOCK_GETTIME) && defined(HAVE_PTHREADS)
if (agTimeOps == &agTimeOps_renderer) /* Renderer-aware ops */
AG_CondBroadcast(&agCondBeginRender);
#endif
agRenderingContext = 1;
AGDRIVER_CLASS(drv)->beginRendering(drv);
}
/* Leave GUI rendering context. */
void
AG_EndRendering(void *drv)
{
AGDRIVER_CLASS(drv)->endRendering(drv);
agRenderingContext = 0;
#if defined(HAVE_CLOCK_GETTIME) && defined(HAVE_PTHREADS)
if (agTimeOps == &agTimeOps_renderer) /* Renderer-aware ops */
AG_CondBroadcast(&agCondEndRender);
#endif
}
#ifdef AG_SERIALIZATION
/*
* Dump the display surface(s) to a jpeg in ~/.appname/screenshot/.
* It is customary to assign a AG_GlobalKeys(3) shortcut for this function.
*/
void
AG_ViewCapture(void)
{
AG_Surface *s;
AG_Config *cfg;
char *pname;
char dir[AG_PATHNAME_MAX];
char file[AG_PATHNAME_MAX+8];
Uint seq;
if (agDriverSw == NULL) {
Verbose("AG_ViewCapture() is not implemented under "
"multiple-window drivers\n");
return;
}
AG_LockVFS(&agDrivers);
if ((s = AGDRIVER_SW_CLASS(agDriverSw)->videoCapture(agDriverSw)) != NULL) {
Verbose("Capture failed: %s\n", AG_GetError());
goto out;
}
/* Save to a new file. */
cfg = AG_ConfigObject();
AG_GetString(cfg, "save-path", dir, sizeof(dir));
Strlcat(dir, AG_PATHSEP, sizeof(dir));
Strlcat(dir, "screenshot", sizeof(dir));
if (!AG_FileExists(dir) && AG_MkPath(dir) == -1) {
Verbose("Capture failed: %s\n", AG_GetError());
goto out;
}
pname = (agProgName != NULL) ? agProgName : "agarapp";
for (seq = 0; ; seq++) {
Snprintf(file, sizeof(file), "%s%c%s%u.jpg",
dir, AG_PATHSEPCHAR, pname, seq++);
if (!AG_FileExists(file))
break; /* XXX race condition */
}
if (AG_SurfaceExportJPEG(s, file, 100, 0) == 0) {
Verbose("Saved capture to: %s\n", file);
} else {
Verbose("Capture failed: %s\n", AG_GetError());
}
AG_SurfaceFree(s);
out:
AG_UnlockVFS(&agDrivers);
}
#endif /* AG_SERIALIZATION */
/* Return whether Agar is using OpenGL. */
int
AG_UsingGL(void *drv)
{
if (drv != NULL) {
return (AGDRIVER_CLASS(drv)->flags & AG_DRIVER_OPENGL);
} else {
return (agDriverOps->flags & AG_DRIVER_OPENGL);
}
}
/* Return whether Agar is using SDL. */
int
AG_UsingSDL(void *drv)
{
AG_DriverClass *dc = (drv != NULL) ? AGDRIVER_CLASS(drv) : agDriverOps;
return (dc->flags & AG_DRIVER_SDL);
}
/* Return the resolution (px) of the parent display device, if applicable. */
int
AG_GetDisplaySize(void *drv, Uint *w, Uint *h)
{
AG_DriverClass *dc = (drv != NULL) ? AGDRIVER_CLASS(drv) : agDriverOps;
AG_DriverSw *dsw = (drv != NULL) ? (AG_DriverSw *)drv : agDriverSw;
switch (dc->wm) {
case AG_WM_SINGLE:
*w = dsw->w;
*h = dsw->h;
return (0);
case AG_WM_MULTIPLE:
return dc->getDisplaySize(w, h);
}
return (-1);
}
static void
Init(void *_Nonnull obj)
{
AG_Driver *drv = obj;
drv->id = 0;
drv->flags = 0;
drv->sRef = AG_SurfaceRGBA(1,1, 32, 0,
#if AG_BYTEORDER == AG_BIG_ENDIAN
0xff000000, 0x00ff0000, 0x0000ff00, 0x000000ff
#else
0x000000ff, 0x0000ff00, 0x00ff0000, 0xff000000
#endif
);
if (drv->sRef == NULL) {
AG_FatalError(NULL);
}
drv->videoFmt = NULL;
drv->kbd = NULL;
drv->mouse = NULL;
drv->activeCursor = NULL;
drv->gl = NULL;
AG_TextInitGlyphCache(drv);
TAILQ_INIT(&drv->cursors);
drv->nCursors = 0;
}
static void
Destroy(void *_Nonnull obj)
{
AG_Driver *drv = obj;
if (drv->sRef != NULL) {
AG_SurfaceFree(drv->sRef);
}
if (drv->videoFmt != NULL) {
AG_PixelFormatFree(drv->videoFmt);
free(drv->videoFmt);
}
AG_TextDestroyGlyphCache(drv);
}
AG_ObjectClass agDriverClass = {
"AG_Driver",
sizeof(AG_Driver),
{ 1,6 },
Init,
NULL, /* reset */
Destroy,
NULL, /* load */
NULL, /* save */
NULL /* edit */
};
| 23.69969 | 80 | 0.695624 | [
"object",
"vector"
] |
0e7efb08374d346c8d198903f37793d7d4ce5032 | 8,061 | h | C | src/ui.h | dorodnic/playground | 1c64aa0ce4a5697a33cac19896705ec7de430952 | [
"Unlicense"
] | null | null | null | src/ui.h | dorodnic/playground | 1c64aa0ce4a5697a33cac19896705ec7de430952 | [
"Unlicense"
] | null | null | null | src/ui.h | dorodnic/playground | 1c64aa0ce4a5697a33cac19896705ec7de430952 | [
"Unlicense"
] | null | null | null | #pragma once
#include <memory>
#include <iostream>
#include <vector>
#include <unordered_map>
#include <chrono>
#include <map>
#include "../easyloggingpp/easylogging++.h"
#include "types.h"
#include "bind.h"
#include "render.h"
class Font;
class IVisualElement : public INotifyPropertyChanged
{
public:
virtual Rect arrange(const Rect& origin) = 0;
virtual void invalidate_layout() = 0;
virtual void render(const Rect& origin) = 0;
virtual Size2 get_size() const = 0;
virtual Size2 get_intrinsic_size() const = 0;
virtual void update_mouse_position(Int2 cursor) = 0;
virtual void update_mouse_state(MouseButton button, MouseState state) = 0;
virtual void update_mouse_scroll(Int2 scroll) = 0;
virtual void set_focused(bool on) = 0;
virtual bool is_focused() const = 0;
virtual const std::string& get_name() const = 0;
virtual std::string to_string() const = 0;
virtual const char* get_type() const = 0;
virtual Alignment get_align() const = 0;
virtual void set_enabled(bool on) = 0;
virtual bool is_enabled() const = 0;
virtual void set_visible(bool on) = 0;
virtual bool is_visible() const = 0;
virtual IVisualElement* find_element(const std::string& name) = 0;
virtual void set_on_click(std::function<void()> on_click,
MouseButton button = MouseButton::left) = 0;
virtual void set_on_double_click(std::function<void()> on_click) = 0;
virtual void set_data_context(std::shared_ptr<INotifyPropertyChanged> dc) = 0;
virtual std::shared_ptr<INotifyPropertyChanged> get_data_context() const = 0;
virtual void set_render_context(const RenderContext& context) = 0;
virtual void set_font(std::shared_ptr<INotifyPropertyChanged> font) = 0;
virtual const std::shared_ptr<INotifyPropertyChanged>& get_font() const = 0;
virtual ~IVisualElement() {}
};
typedef std::chrono::time_point<std::chrono::high_resolution_clock> TimePoint;
class ControlBase : public IVisualElement
{
public:
ControlBase(std::string name,
const Size2& position,
const Size2& size,
Alignment alignment);
void update_mouse_state(MouseButton button, MouseState state) override;
void update_mouse_scroll(Int2 scroll) override { // TODO
};
void set_on_click(std::function<void()> on_click,
MouseButton button = MouseButton::left) override
{
_on_click[button] = on_click;
}
void set_on_double_click(std::function<void()> on_click) override {
_on_double_click = on_click;
}
bool is_pressed(MouseButton button) const
{
auto it = _state.find(button);
if (it != _state.end())
return it->second == MouseState::down;
else
return false;
}
Size2 get_position() const { return _position; }
void set_position(const Size2& val)
{
_position = val;
fire_property_change("position");
}
Rect arrange(const Rect& origin) override;
void invalidate_layout() override
{
if (get_parent()) get_parent()->invalidate_layout();
else LOG(INFO) << "UI Layout has invalidated! " << get_name();
}
void update_mouse_position(Int2 cursor) override {}
Size2 get_size() const override;
void set_size(const Size2& val)
{
_size = val;
fire_property_change("size");
}
Size2 get_intrinsic_size() const override { return _size; };
void set_focused(bool on) override
{
_focused = on;
fire_property_change("focused");
}
bool is_focused() const override { return _focused; }
const std::string& get_name() const override { return _name; }
void set_name(const std::string& val)
{
_name = val;
fire_property_change("name");
}
Alignment get_align() const override { return _align; }
void set_align(Alignment align)
{
_align = align;
fire_property_change("alignment");
}
void set_enabled(bool on) override
{
_enabled = on;
fire_property_change("enabled");
}
bool is_enabled() const override { return _enabled; }
void set_visible(bool on) override
{
_visible = on;
invalidate_layout();
fire_property_change("visible");
}
bool is_visible() const override { return _visible; }
IVisualElement* find_element(const std::string& name) override
{
if (get_name() == name) return this;
else return nullptr;
}
IVisualElement* get_parent() { return _parent; }
const IVisualElement* get_parent() const { return _parent; }
void update_parent(IVisualElement* new_parent);
std::string to_string() const override { return get_name() + "(" + get_type() + ")"; }
void add_binding(std::unique_ptr<Binding> binding)
{
_bindings.push_back(std::move(binding));
}
void set_data_context(std::shared_ptr<INotifyPropertyChanged> dc) override
{
_dc = dc;
fire_property_change("data_context");
}
std::shared_ptr<INotifyPropertyChanged> get_data_context() const override
{
return _dc;
}
void fire_property_change(const char* property_name) override
{
_base.fire_property_change(property_name);
}
void subscribe_on_change(void* owner,
OnFieldChangeCallback on_change) override
{
_base.subscribe_on_change(owner, on_change);
}
void unsubscribe_on_change(void* owner) override
{
_base.unsubscribe_on_change(owner);
}
void set_render_context(const RenderContext& context) override
{
_render_context = context;
}
void set_font(std::shared_ptr<INotifyPropertyChanged> font) override
{
_font = font;
fire_property_change("font");
}
const std::shared_ptr<INotifyPropertyChanged>& get_font() const
{
if (_font.get())
{
return _font;
}
if (_parent)
{
return _parent->get_font();
}
return _font;
}
~ControlBase();
protected:
ControlBase();
const RenderContext& get_render_context() const
{
return _render_context;
}
private:
Size2 _position = {0,0};
Size2 _size = {0,0};
bool _focused = false;
std::string _name = "";
Alignment _align = Alignment::left;
bool _enabled = true;
bool _visible = true;
IVisualElement* _parent = nullptr;
std::vector<std::unique_ptr<Binding>> _bindings;
std::shared_ptr<INotifyPropertyChanged> _dc = nullptr;
BindableObjectBase _base;
std::map<MouseButton, MouseState> _state;
std::map<MouseButton, TimePoint> _last_update;
std::map<MouseButton, TimePoint> _last_click;
std::map<MouseButton, std::function<void()>> _on_click;
std::function<void()> _on_double_click;
const int CLICK_TIME_MS = 200;
RenderContext _render_context = { nullptr, nullptr };
std::shared_ptr<INotifyPropertyChanged> _font = nullptr;
};
template<>
struct TypeDefinition<ControlBase>
{
static std::shared_ptr<ITypeDefinition> make()
{
DefineClass(ControlBase)
->AddProperty(get_position, set_position)
->AddProperty(get_size, set_size)
->AddField(get_intrinsic_size)
->AddProperty(is_focused, set_focused)
->AddField(to_string)
->AddProperty(get_name, set_name)
->AddProperty(get_align, set_align)
->AddProperty(is_visible, set_visible)
->AddProperty(is_enabled, set_enabled)
->AddProperty(get_data_context, set_data_context)
->AddProperty(get_font, set_font);
}
};
| 28.996403 | 90 | 0.625729 | [
"render",
"vector"
] |
0e7f8e99f12ebfc0f4f1a21f5ca36b86252e4797 | 247 | h | C | include/apsis/geometry/point.h | wilkie/apsis | 9e6a37ad9dfc8931b25b9429d7e4a770b4e760bf | [
"WTFPL"
] | 2 | 2015-11-05T03:47:29.000Z | 2020-01-24T18:48:09.000Z | include/apsis/geometry/point.h | wilkie/apsis | 9e6a37ad9dfc8931b25b9429d7e4a770b4e760bf | [
"WTFPL"
] | null | null | null | include/apsis/geometry/point.h | wilkie/apsis | 9e6a37ad9dfc8931b25b9429d7e4a770b4e760bf | [
"WTFPL"
] | null | null | null | #ifndef APSIS_GEOMETRY_POINT_H
#define APSIS_GEOMETRY_POINT_H
namespace Apsis {
namespace Geometry {
struct Point {
// The x and y coordinates of the object's footprint in the world.
float x;
float y;
};
}
}
#endif
| 16.466667 | 72 | 0.668016 | [
"geometry",
"object"
] |
0e83502b9b30590d172c8de2476a105be14737fa | 1,712 | h | C | minesweeper_v1/game_model.h | sangpham2710/CS161-Project | 7051cc17bc64bdcb128884ef02ec70e1552c982e | [
"MIT"
] | 6 | 2021-12-28T08:07:16.000Z | 2022-03-13T06:17:45.000Z | minesweeper_v1/game_model.h | sangpham2710/CS161-Project | 7051cc17bc64bdcb128884ef02ec70e1552c982e | [
"MIT"
] | null | null | null | minesweeper_v1/game_model.h | sangpham2710/CS161-Project | 7051cc17bc64bdcb128884ef02ec70e1552c982e | [
"MIT"
] | 1 | 2021-12-24T07:19:16.000Z | 2021-12-24T07:19:16.000Z | #ifndef GAME_LOGIC_H_INCLUDED
#define GAME_LOGIC_H_INCLUDED
#include <algorithm>
#include <string>
#include <vector>
#include "global.h"
enum boardStatusTypes { WIN, LOSE };
const std::vector<std::string> boardStatusOptions = {
"Congratulation! You won!", "Umm... Quite a big explosion, right?"};
void constructBoard(GameBoard &gameBoard, const int ¤tLevel);
void copyBoard(GameBoard &dest, const GameBoard &src);
bool revealACell(GameBoard &gameBoard, const int &row, const int &col);
bool revealNeighboringCells(GameBoard &gameBoard, const int &row,
const int &col);
void revealAllMines(GameBoard &gameBoard, const bool &won);
void resetBoard(GameBoard &gameBoard);
bool isValidCell(const int &width, const int &height, const int &row,
const int &col);
void generateMineBoard(GameBoard &gameBoard, int mines);
void replaceMine(GameBoard &gameBoard, const int &row, const int &col);
std::vector<std::pair<int, int>> getNeighborsPositions(const int &width,
const int &height,
const int &row,
const int &col);
int countNeighboringCellStates(const int &width, const int &height,
int board[][MAX_BOARD_SIZE], const int &row,
const int &col, const int &cellState);
void uncoverBoard(GameBoard &gameBoard, const int &row, const int &col);
void saveBoard(GameBoard &gameBoard);
bool loadSavedGameBoardDataFile();
void updateGameBoardDataFile();
void loadSavedGameBoardData(GameBoard &gameBoard);
#endif // GAME_MODEL_H_INCLUDED
| 41.756098 | 75 | 0.650117 | [
"vector"
] |
0e98836ee0901cc7830246f0ecdc8f6473807ba1 | 4,119 | h | C | src/torrent_protocol/bencoding/BenDictionary.h | LeFroid/BitTorrentProject | 960b8b72c5afe9d45291af2af3b8ded81c8ac884 | [
"BSL-1.0"
] | 3 | 2019-12-16T02:40:49.000Z | 2021-11-24T10:43:39.000Z | src/torrent_protocol/bencoding/BenDictionary.h | LeFroid/BitTorrentProject | 960b8b72c5afe9d45291af2af3b8ded81c8ac884 | [
"BSL-1.0"
] | null | null | null | src/torrent_protocol/bencoding/BenDictionary.h | LeFroid/BitTorrentProject | 960b8b72c5afe9d45291af2af3b8ded81c8ac884 | [
"BSL-1.0"
] | null | null | null | /*
Copyright (c) 2017, Timothy Vaccarelli
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <memory>
#include <unordered_map>
#include "BenObject.h"
namespace bencoding
{
/**
* @class BenDictionary
* @brief Object used to store key-value pairs of strings to objects of any bencoded type
*/
class BenDictionary : public BenObject< std::unordered_map< std::string, std::shared_ptr<BenObjectBase> > >
{
public:
/// Default constructor
BenDictionary();
/// Implements the BenObject's accept method
virtual void accept(BenObjectVisitor &visitor) override;
/// Wrapper methods and type definitions for the map
public:
typedef std::unordered_map< std::string, std::shared_ptr<BenObjectBase> > DataType;
typedef DataType::iterator iterator;
typedef DataType::const_iterator const_iterator;
typedef DataType::size_type size_type;
/// Returns an iterator pointing to the first element in the BenDictionary
iterator begin();
/// Returns a const_iterator pointing to the first element in the BenDictionary
const_iterator cbegin();
/// Returns a const_iterator pointing just past the end of the BenDictionary
const_iterator cend();
/// Returns an iterator pointing just past end of the BenDictionary
iterator end();
/// Returns true if the dictionary is empty, false if else
bool empty() const;
/// Returns the number of elements in the list
size_type size() const;
/**
* Finds an element with key equivalent to key
* @return Iterator to element with associated key, or
* a past-the-end iterator if the key was not found.
*/
iterator find(const std::string &key);
/**
* Finds an element with key equivalent to key
* @return Iterator to element with associated key, or
* a past-the-end iterator if the key was not found.
*/
const_iterator find(const std::string &key) const;
/**
* @brief Inserts the key-value pair into the map
* @return A pair whose first element is an iterator pointing to either the
* newly inserted element, or the element whose key is equivalent, and
* a boolean value indicating whether or not the element was successfully
* inserted.
*/
std::pair<iterator, bool> insert(const std::pair< const std::string, std::shared_ptr<BenObjectBase> > &val);
/// Bracket operator to access a pointer to the value associated with the given key, or
/// it will return a reference to the newly inserted item if the key had not been found
/// in the map
std::shared_ptr<BenObjectBase> &operator [](std::string key);
};
}
| 40.782178 | 116 | 0.692401 | [
"object"
] |
0ea1c1e5ea3adac4d512e0d1aa98e76798c07fe9 | 1,795 | h | C | include/cito_control.h | lyltc1/cito | da5ff4d42d01a6f0d0aef2556b2458a31f44e717 | [
"BSD-3-Clause"
] | 1 | 2020-06-23T17:36:15.000Z | 2020-06-23T17:36:15.000Z | include/cito_control.h | lyltc1/cito | da5ff4d42d01a6f0d0aef2556b2458a31f44e717 | [
"BSD-3-Clause"
] | null | null | null | include/cito_control.h | lyltc1/cito | da5ff4d42d01a6f0d0aef2556b2458a31f44e717 | [
"BSD-3-Clause"
] | null | null | null | /*! Control and Contact Model */
/**
* \brief CitoControl class consists of functions for control and contact model
*
* This class defines functions for calculating and setting control- and contact-
* related variables, i.e., joint torques and external forces on the free bodies.
*
* \author Aykut Onol
*/
#ifndef CITO_CONTROL_H
#define CITO_CONTROL_H
#include "cito_params.h"
#include "mj_savelog.h"
class CitoControl
{
public:
/// Constructor
CitoControl(const mjModel* model);
/// Destructor
~CitoControl();
/// This function takes a full control step given a control input
void takeStep(mjData*d, const eigVd u, bool save, double compensateBias);
/// This function sets generalized forces on joints and free bodies
void setControl(mjData* d, const eigVd u, double compensateBias);
/** This function converts free joints' quaternions to Euler angles so that
* the dimensionality of the state vector is 2*nv instead of nq+nv */
eigVm getState(const mjData* d);
/// This function gets bounds on joint positions, actuator forces from the model
void getBounds();
/// position & torque limits
double *qposLB, *qposUB, *tauLB, *tauUB;
int *isJFree, *isAFree;
private:
/// This function returns contact wrench given current state and control input
eigMd contactModel(const mjData* d, const eigVd u);
/// MuJoCo model
const mjModel* m;
/// Contact wrench
eigMd h, hCon;
/// Contact model variables
double phiE, phiN, zeta, phiC, gamma, alpha, phiR;
Eigen::Matrix<double, 3, 1> pSR, pSE, pBF, nCS, vRE, vEF, lambda;
/// getState variables
eigVm x;
Eigen::Matrix<mjtNum, 4, 1> jFreeQuat;
/// Objects
CitoParams cp;
MjSaveLog sl;
};
#endif //CITO_CONTROL_H
| 32.053571 | 84 | 0.690808 | [
"vector",
"model"
] |
0ea7b3fe0e655ad92a4a612938c3c87f38d44c2f | 3,560 | h | C | src/CMGDB/_cmgdb/include/database/PointerGrid.h | marciogameiro/CMGDB_temp | 282b8a4489ff1e611d202ced4108b81129dc50fc | [
"MIT"
] | 1 | 2022-01-11T21:07:58.000Z | 2022-01-11T21:07:58.000Z | src/CMGDB/_cmgdb/include/database/PointerGrid.h | marciogameiro/CMGDB_temp | 282b8a4489ff1e611d202ced4108b81129dc50fc | [
"MIT"
] | null | null | null | src/CMGDB/_cmgdb/include/database/PointerGrid.h | marciogameiro/CMGDB_temp | 282b8a4489ff1e611d202ced4108b81129dc50fc | [
"MIT"
] | 2 | 2021-03-25T21:13:26.000Z | 2021-05-04T18:47:00.000Z | // PointerGrid.h
// Shaun Harker
// 9/16/11
#ifndef CMDP_POINTERGRID_H
#define CMDP_POINTERGRID_H
#include <vector>
#include <stack>
#include <deque>
#include <exception>
#include <boost/unordered_set.hpp>
#include "TreeGrid.h"
#include "Tree.h"
#include "PointerTree.h"
#include <memory>
#include "boost/serialization/serialization.hpp"
#include "boost/serialization/vector.hpp"
#include "boost/serialization/export.hpp"
#include "boost/serialization/shared_ptr.hpp"
#include <boost/archive/text_oarchive.hpp>
#include <boost/archive/text_iarchive.hpp>
/// class PointerGrid
class PointerGrid : public TreeGrid {
public:
PointerGrid ( void );
virtual ~PointerGrid ( void );
virtual Tree::iterator GridToTree ( Grid::iterator it ) const;
virtual Grid::iterator TreeToGrid ( Tree::iterator it ) const;
virtual const PointerTree & tree ( void ) const;
virtual PointerTree & tree ( void );
virtual PointerGrid * spawn ( void ) const;
virtual void rebuild ( std::shared_ptr<const CompressedTreeGrid> compressed );
void rebuildFromTree ( void );
private:
std::shared_ptr<PointerTree> tree_;
std::vector < Grid::iterator > grid_iterators_;
std::vector < Tree::iterator > tree_iterators_;
public:
virtual uint64_t memory ( void ) const {
return sizeof ( PointerGrid ) +
tree_ -> memory () +
sizeof ( Grid::iterator ) * grid_iterators_ . size () +
sizeof ( Tree::iterator ) * tree_iterators_ . size ();
}
friend class boost::serialization::access;
template<typename Archive>
void serialize(Archive & ar, const unsigned int file_version) {
ar & boost::serialization::base_object<TreeGrid>(*this);
ar & tree_;
rebuildFromTree();
}
// file operations
void save ( const char * filename ) const {
std::ofstream ofs(filename);
assert(ofs.good());
boost::archive::text_oarchive oa(ofs);
oa << * this;
}
void load ( const char * filename ) {
std::ifstream ifs(filename);
if ( not ifs . good () ) {
std::cout << "Could not load " << filename << "\n";
exit ( 1 );
}
boost::archive::text_iarchive ia(ifs);
ia >> * this;
}
};
BOOST_CLASS_EXPORT_KEY(PointerGrid);
inline
PointerGrid::PointerGrid ( void ) : tree_ ( new PointerTree ) {
rebuildFromTree ();
}
inline
PointerGrid::~PointerGrid ( void ) {
}
inline Grid::iterator
PointerGrid::TreeToGrid ( Tree::iterator tree_it ) const {
return grid_iterators_ [ * tree_it ];
}
inline Tree::iterator
PointerGrid::GridToTree ( Grid::iterator grid_it ) const {
return tree_iterators_ [ * grid_it ];
}
inline const PointerTree &
PointerGrid::tree ( void ) const {
return * tree_ . get ();
}
inline PointerTree &
PointerGrid::tree ( void ) {
return * tree_ . get ();
}
inline PointerGrid *
PointerGrid::spawn ( void ) const {
return new PointerGrid;
}
inline void
PointerGrid::rebuild ( std::shared_ptr<const CompressedTreeGrid> compressed ) {
rebuildFromTree ();
}
inline void
PointerGrid::rebuildFromTree ( void ) {
// Now we rebuild the GridIterator to TreeIterator conversions
grid_iterators_ . clear ();
tree_iterators_ . clear ();
uint64_t leaf_count = 0;
Tree::iterator end = tree () . end ();
for ( Tree::iterator it = tree () . begin (); it != end; ++ it ) {
if ( tree () . isLeaf ( it ) ) {
grid_iterators_ . push_back ( Grid::iterator (leaf_count ++ ) );
tree_iterators_ . push_back ( it );
} else {
grid_iterators_ . push_back ( tree () . size () );
}
}
size_ = leaf_count;
}
#endif
| 25.985401 | 80 | 0.671348 | [
"vector"
] |
0ea85c7f6cfb0a7354f8d13bc5e60a072f0de191 | 341 | h | C | afp-eplex/ellen/runEllenGP.h | lacava/regression-benchmark | f20ef3dd1083b273f5777975f2e1f366060ec2fd | [
"MIT"
] | null | null | null | afp-eplex/ellen/runEllenGP.h | lacava/regression-benchmark | f20ef3dd1083b273f5777975f2e1f366060ec2fd | [
"MIT"
] | null | null | null | afp-eplex/ellen/runEllenGP.h | lacava/regression-benchmark | f20ef3dd1083b273f5777975f2e1f366060ec2fd | [
"MIT"
] | null | null | null | #pragma once
#ifndef RUNELLENGP_H
#define RUNELLENGP_H
// extern "C" {
void runEllenGP(dict& param_dict, float** features, float* target, string pname,string dname);
// }
// void runEllenGP(std::string paramfile, std::string datafile,bool trials,int trialnum);
//void shuffle_data(Data& d, params& p, vector<Randclass>& r,state& s);
#endif
| 31 | 94 | 0.739003 | [
"vector"
] |
0eae72137d8cf26eb1d3a7e10026b0d91fedf2a9 | 3,451 | c | C | src/zir/types/reference.c | lbrugnara/zenit | 72cf091e8c392c895adaa7ac623320af92a8b452 | [
"MIT"
] | 4 | 2019-04-27T21:09:56.000Z | 2022-02-03T21:22:14.000Z | src/zir/types/reference.c | lbrugnara/zenit | 72cf091e8c392c895adaa7ac623320af92a8b452 | [
"MIT"
] | null | null | null | src/zir/types/reference.c | lbrugnara/zenit | 72cf091e8c392c895adaa7ac623320af92a8b452 | [
"MIT"
] | null | null | null |
#include <stdlib.h>
#include <string.h>
#include <fllib/Mem.h>
#include <fllib/Cstring.h>
#include "reference.h"
ZirReferenceType* zir_reference_type_new(ZirType *element)
{
ZirReferenceType *type = fl_malloc(sizeof(ZirReferenceType));
type->base.typekind = ZIR_TYPE_REFERENCE;
type->element = element;
return type;
}
unsigned long zir_reference_type_hash(ZirReferenceType *type)
{
static const char *format = "[ref][e:%lu]";
char type_key[256] = { 0 };
snprintf(type_key, 256, format, zir_type_hash(type->element));
unsigned long hash = 5381;
FlByte c;
for (size_t i=0; i < strlen(type_key); i++)
hash = ((hash << 5) + hash) + type_key[i];
return hash;
}
char* zir_reference_type_to_string(ZirReferenceType *type)
{
if (type == NULL)
return NULL;
unsigned long type_hash = zir_reference_type_hash(type);
if (type->base.to_string.value == NULL)
{
// First call, initialize the value
type->base.to_string.value = fl_cstring_new(0);
}
else if (type_hash == type->base.to_string.version)
{
// If the type information didn't change, just return the
// current value
return type->base.to_string.value;
}
// We allocate memory for the string representation of this object
char *string_value = fl_cstring_vdup("&%s", zir_type_to_string(type->element));
// Update the string representation
type->base.to_string.version = type_hash;
type->base.to_string.value = fl_cstring_replace_realloc(type->base.to_string.value, type->base.to_string.value, string_value);
fl_cstring_free(string_value);
return type->base.to_string.value;
}
bool zir_reference_type_equals(ZirReferenceType *type_a, ZirType *type_b)
{
if (type_a == NULL || type_b == NULL)
return (ZirType*) type_a == type_b;
if (type_b->typekind != ZIR_TYPE_REFERENCE)
return false;
ZirReferenceType *type_b_ref = (ZirReferenceType*) type_b;
return zir_type_equals(type_a->element, type_b_ref->element);
}
bool zir_reference_type_is_assignable_from(ZirReferenceType *target_type, ZirType *from_type)
{
if (!target_type || !from_type)
return false;
if (from_type->typekind != ZIR_TYPE_REFERENCE)
return false;
ZirReferenceType *ref_from_type = (ZirReferenceType*) from_type;
return zir_type_is_assignable_from(target_type->element, ref_from_type->element);
}
bool zir_reference_type_is_castable_to(ZirReferenceType *reference, ZirType *target_type)
{
if (target_type == NULL || target_type == NULL)
return false;
// Cast between the same types are valid
if (zir_reference_type_equals(reference, target_type))
return true;
if (target_type->typekind == ZIR_TYPE_REFERENCE)
return zir_type_is_assignable_from(reference->element, ((ZirReferenceType*) target_type)->element);
// We can cast a reference to an unsigned integer
if (target_type->typekind == ZIR_TYPE_UINT)
return true;
return false;
}
size_t zir_reference_type_size(ZirReferenceType *type, size_t ref_size)
{
if (!type)
return 0;
return ref_size;
}
void zir_reference_type_free(ZirReferenceType *type)
{
if (!type)
return;
if (type->base.to_string.value != NULL)
fl_cstring_free(type->base.to_string.value);
if (type->element)
zir_type_free(type->element);
fl_free(type);
}
| 26.546154 | 130 | 0.690524 | [
"object"
] |
0eb18ddc766cd796e77798c7abfd9bd9a9963c00 | 1,668 | c | C | src/samples/me/basic/main.c | rofl0r/pspsdk | cc887d731f635e06fba85118eb5c7ee87746cbc8 | [
"BSD-3-Clause"
] | 545 | 2015-01-11T22:09:12.000Z | 2022-03-21T16:37:24.000Z | src/samples/me/basic/main.c | rofl0r/pspsdk | cc887d731f635e06fba85118eb5c7ee87746cbc8 | [
"BSD-3-Clause"
] | 43 | 2015-02-14T18:50:10.000Z | 2022-03-28T05:42:01.000Z | src/samples/me/basic/main.c | rofl0r/pspsdk | cc887d731f635e06fba85118eb5c7ee87746cbc8 | [
"BSD-3-Clause"
] | 222 | 2015-01-09T21:06:52.000Z | 2022-03-31T20:18:55.000Z | /*
* PSP Software Development Kit - https://github.com/pspdev
* -----------------------------------------------------------------------
* Licensed under the BSD license, see LICENSE in PSPSDK root for details.
*
* main.c - A basic example of getting code onto the ME. This was based on
* the sample code written by crazyc on the pspdev forums.
*
* Copyright (c) 2005 James Forshaw <tyranid@gmail.com>
*
*/
#include <pspkernel.h>
#include <pspdebug.h>
#include <pspctrl.h>
#include <pspdisplay.h>
#include <stdlib.h>
#include <string.h>
/* Define the module info section */
PSP_MODULE_INFO("mebasic", 0x1000, 1, 1);
/* Define the main thread's attribute value (optional) */
PSP_MAIN_THREAD_ATTR(0);
/* Define printf, just to make typing easier */
#define printf pspDebugScreenPrintf
void me_run(void);
void me_end(void);
int main(int argc, char *argv[])
{
SceCtrlData ctl;
pspDebugScreenInit();
sceCtrlSetSamplingCycle(0);
sceCtrlSetSamplingMode(PSP_CTRL_MODE_DIGITAL);
/* Copy our small program into the ME reset vector */
memcpy((void *)0xbfc00040, me_run, (int)(me_end - me_run));
sceKernelDcacheWritebackInvalidateAll();
sceSysregMeResetEnable();
sceSysregMeBusClockEnable();
sceSysregMeResetDisable();
sceSysregVmeResetDisable();
while(1)
{
volatile u32 *count = (u32*) 0xBFC00060;
pspDebugScreenSetXY(0, 0);
pspDebugScreenPrintf("ME Basic Example, press Home to exit\n");
sceKernelDcacheWritebackInvalidateAll();
pspDebugScreenPrintf("ME Counter: %08x\n", *count);
sceCtrlReadBufferPositive(&ctl, 1);
if(ctl.Buttons & PSP_CTRL_HOME)
{
sceKernelExitGame();
}
sceDisplayWaitVblankStart();
}
return 0;
}
| 24.529412 | 74 | 0.699041 | [
"vector"
] |
0eb655861df24d4a2083f543d0f712280302f3ed | 711 | h | C | src/body.h | ozzzzz/nbody | d24ef07a6ae6d2803c3d2cfb94d723d711309684 | [
"BSD-3-Clause"
] | null | null | null | src/body.h | ozzzzz/nbody | d24ef07a6ae6d2803c3d2cfb94d723d711309684 | [
"BSD-3-Clause"
] | null | null | null | src/body.h | ozzzzz/nbody | d24ef07a6ae6d2803c3d2cfb94d723d711309684 | [
"BSD-3-Clause"
] | null | null | null | //
// Created by Bogdan Neterebskii on 21/07/2018.
//
#ifndef NBODY_BODY_H
#define NBODY_BODY_H
#include <stdio.h>
#include "vector.h"
class Body {
public:
Body(v3 position, v3 velocity, float mass) {
this->position = position;
this->velocity = velocity;
this->mass = mass;
}
v3 position;
v3 velocity;
float mass;
void move(float dt) {
v3 new_position = add(this->position, mul( v3(dt), this->velocity) );
this->position = new_position;
}
void accelerate(float dt, v3 acceleration) {
this->velocity = add(this->velocity, mul(v3(dt), acceleration));
}
};
void show_positions(Body *bodies, int n);
#endif //NBODY_BODY_H
| 17.775 | 77 | 0.625879 | [
"vector"
] |
0eb7f04b9bebf6acbe7e49180eeadb37295527e2 | 2,047 | h | C | monodrive/core/src/SharedMemory.h | tomd-tc/monodrive-client | 95b6f2e02e081dcd2d66cc66dbf68c2f39da8ed6 | [
"MIT"
] | null | null | null | monodrive/core/src/SharedMemory.h | tomd-tc/monodrive-client | 95b6f2e02e081dcd2d66cc66dbf68c2f39da8ed6 | [
"MIT"
] | null | null | null | monodrive/core/src/SharedMemory.h | tomd-tc/monodrive-client | 95b6f2e02e081dcd2d66cc66dbf68c2f39da8ed6 | [
"MIT"
] | 1 | 2021-08-19T16:42:04.000Z | 2021-08-19T16:42:04.000Z | // Copyright (C) 2017-2020, monoDrive, LLC. All Rights Reserved.
//#include <boost/interprocess/shared_memory_object.hpp>
#pragma push_macro("TEXT")
#undef TEXT
#pragma warning( push )
#pragma warning( disable: 4668 4191)
#undef check
#pragma push_macro("check")
#include <boost/interprocess/managed_shared_memory.hpp>
//#include <boost/interprocess/mapped_region.hpp>
#include <boost/interprocess/containers/vector.hpp>
#include <boost/interprocess/allocators/allocator.hpp>
#pragma warning( pop)
#pragma pop_macro("TEXT")
#pragma pop_macro("check")
#include <cstring>
#include <cstdlib>
#include <string>
#include <iostream>
typedef struct
{
int8_t payload[100];
} camera;
template <typename T>
struct sensor_frame
{
int32_t time;
int32_t gametime;
int32_t length;
T frame;
};
template <typename T>
class SharedMemory
{
typedef boost::interprocess::allocator<T, boost::interprocess::managed_shared_memory::segment_manager> memory_alloc;
typedef boost::interprocess::vector<T, memory_alloc> memory_vec;
public:
SharedMemory(const char* in_name):name(in_name){
}
~SharedMemory(){
boost::interprocess::shared_memory_object::remove(name);
}
memory_vec* create(const char* segmentName, int size)
{
segment = boost::interprocess::managed_shared_memory(boost::interprocess::create_only, name, size);
const memory_alloc alloc_inst(segment.get_segment_manager());
return segment.construct<memory_vec>(segmentName)(alloc_inst);
}
void open()
{
segment = boost::interprocess::managed_shared_memory(boost::interprocess::open_only, name);
}
memory_vec* find(const char* segmentName)
{
memory_vec *image_frame = segment.find<memory_vec>(segmentName).first;
return image_frame;
}
private:
boost::interprocess::managed_shared_memory segment;
const char *name;
}; | 29.242857 | 120 | 0.673669 | [
"vector"
] |
1d8c99922ec25ad34cac90d12f3486d4d2f64ff0 | 614 | h | C | command_line_parser01/command_line_parser.h | syohex/cpp-study | 598c2afbe77c6d4ba88f549d3e99f97b5bf81acb | [
"MIT"
] | null | null | null | command_line_parser01/command_line_parser.h | syohex/cpp-study | 598c2afbe77c6d4ba88f549d3e99f97b5bf81acb | [
"MIT"
] | null | null | null | command_line_parser01/command_line_parser.h | syohex/cpp-study | 598c2afbe77c6d4ba88f549d3e99f97b5bf81acb | [
"MIT"
] | null | null | null | #pragma once
#include <vector>
#include <map>
#include <string>
#include <exception>
class CommandLineError : public std::exception {
public:
explicit CommandLineError(std::string message);
const char *what() const noexcept override;
private:
std::string message_;
};
class OptionBase {
public:
virtual ~OptionBase() = default;
};
class CommandLineParser {
public:
~CommandLineParser();
void Add(const std::string &long_name, char short_name, const std::string &description);
private:
std::map<std::string, OptionBase *> options_;
std::vector <std::string> option_order_;
}; | 19.1875 | 92 | 0.708469 | [
"vector"
] |
1d97891f047434ac0cfffa889b77288779c42eab | 3,484 | h | C | BSEDemo/TestUtils.h | crisbia/bse | d549deda2761d301a67aa838743ec731c82e3c07 | [
"MIT"
] | null | null | null | BSEDemo/TestUtils.h | crisbia/bse | d549deda2761d301a67aa838743ec731c82e3c07 | [
"MIT"
] | 3 | 2021-12-02T12:52:35.000Z | 2021-12-19T20:37:35.000Z | BSEDemo/TestUtils.h | crisbia/bse | d549deda2761d301a67aa838743ec731c82e3c07 | [
"MIT"
] | null | null | null | #ifndef _H_INCLUDED_TEST_UTILS
#define _H_INCLUDED_TEST_UTILS
#include "bsePhysics.h"
#include "TestFramework.h"
namespace BSEDemo
{
//---------------------------------------------------------------------------------------------------------------------
class ContactPointInfo
{
public:
ContactPointInfo(const ContactPointInfo& source) { position = source.position; normal = source.normal; }
ContactPointInfo() { position.x = position.y = 0; normal.x = normal.y; }
ContactPointInfo(const bse::Vec2& pos, const bse::Vec2 &norm) { position = pos; normal = norm; }
bse::Vec2 position;
bse::Vec2 normal;
};
//---------------------------------------------------------------------------------------------------------------------
extern void createGround(bse::phx::Scene* scene, bse::Real angle = 0, const bse::Vec2& groundDims = bse::Vec2(15.0f, 0.5f));
//---------------------------------------------------------------------------------------------------------------------
extern void createRoom(bse::phx::Scene* scene, const bse::Real x, const bse::Real y, const bse::Real sizeX, const bse::Real sizeY);
//---------------------------------------------------------------------------------------------------------------------
extern void createPolygonalRoom(bse::phx::Scene* scene, const bse::Real x, const bse::Real y, const bse::Real sizeX, const bse::Real sizeY);
//---------------------------------------------------------------------------------------------------------------------
extern void createPolygon(bse::phx::PolygonDesc& polyDesc, const bse::Real extRadius, const int numEdges);
//---------------------------------------------------------------------------------------------------------------------
class ObjectState
{
public:
bse::phx::Shape* shape;
void save(bse::phx::Shape* shape);
bse::Vec2 position;
float orientation;
bse::Vec2 linearVelocity;
float angularVelocity;
};
//---------------------------------------------------------------------------------------------------------------------
class ObjectMouseForce : public TestTool
{
public:
ObjectMouseForce();
virtual void update(float physicsDt, float aiDt);
virtual void render(float physicsDt, float aiDt);
bse::phx::Shape* pick();
void setStrength(float strength) { m_strength = strength; }
void reset() { m_currentObject = 0; m_objectState.save(0); };
bse::phx::Shape* getCurrentObject() const { return m_currentObject; }
protected:
virtual void doEnable() {}
virtual void doDisable() { m_currentObject = 0; }
bse::Vec2 m_offset;
bse::Vec2 m_position;
bse::phx::Scene* m_scene;
bse::phx::Shape* m_currentObject;
float m_strength;
ObjectState m_objectState;
};
//---------------------------------------------------------------------------------------------------------------------
class ObjectSelector : public TestTool
{
public:
ObjectSelector();
virtual void update(float physicsDt, float aiDt);
virtual void render(float physicsDt, float aiDt);
bse::phx::Body* pick();
void reset() { m_currentObject = 0; };
bse::phx::Body* getCurrentObject() const { return m_currentObject; }
protected:
virtual void doEnable() {}
virtual void doDisable() { m_currentObject = 0; }
bse::phx::Scene* m_scene;
bse::phx::Body* m_currentObject;
bse::Vec2 m_position;
};
} // namespace BSEDemo
#endif // _H_INCLUDED_TEST_UTILS
| 35.917526 | 141 | 0.498565 | [
"render",
"shape"
] |
1da7353060c82159882b0e745cd636117c355aaf | 4,883 | h | C | dev/Gems/LmbrCentral/Code/Source/Shape/SphereShapeComponent.h | stickyparticles/lumberyard | dc523dd780f3cd1874251181b7cf6848b8db9959 | [
"AML"
] | 2 | 2019-11-29T09:04:54.000Z | 2021-03-18T02:34:44.000Z | dev/Gems/LmbrCentral/Code/Source/Shape/SphereShapeComponent.h | JulianoCristian/Lumberyard-3 | dc523dd780f3cd1874251181b7cf6848b8db9959 | [
"AML"
] | null | null | null | dev/Gems/LmbrCentral/Code/Source/Shape/SphereShapeComponent.h | JulianoCristian/Lumberyard-3 | dc523dd780f3cd1874251181b7cf6848b8db9959 | [
"AML"
] | 3 | 2019-05-13T09:41:33.000Z | 2021-04-09T12:12:38.000Z | /*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
#pragma once
#include <AzCore/Math/Transform.h>
#include <AzCore/Component/Component.h>
#include <AzCore/Component/TransformBus.h>
#include <LmbrCentral/Shape/ShapeComponentBus.h>
#include <LmbrCentral/Shape/SphereShapeComponentBus.h>
namespace LmbrCentral
{
class SphereShapeComponent
: public AZ::Component
, private ShapeComponentRequestsBus::Handler
, private SphereShapeComponentRequestsBus::Handler
, private AZ::TransformNotificationBus::Handler
{
public:
friend class EditorSphereShapeComponent;
AZ_COMPONENT(SphereShapeComponent, "{E24CBFF0-2531-4F8D-A8AB-47AF4D54BCD2}");
//////////////////////////////////////////////////////////////////////////
// AZ::Component interface implementation
void Activate() override;
void Deactivate() override;
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
// ShapeComponent::Handler implementation
AZ::Crc32 GetShapeType() override
{
return AZ::Crc32("Sphere");
}
AZ::Aabb GetEncompassingAabb() override;
bool IsPointInside(const AZ::Vector3& point) override;
float DistanceSquaredFromPoint(const AZ::Vector3& point) override;
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
// SphereShapeComponentRequestsBus::Handler implementation
SphereShapeConfiguration GetSphereConfiguration() override
{
return m_configuration;
}
void SetRadius(float newRadius) override;
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////
// Transform notification bus listener
/// Called when the local transform of the entity has changed. Local transform update always implies world transform change too.
void OnTransformChanged(const AZ::Transform& /*local*/, const AZ::Transform& /*world*/) override;
//////////////////////////////////////////////////////////////////////////////////
protected:
static void GetProvidedServices(AZ::ComponentDescriptor::DependencyArrayType& provided)
{
provided.push_back(AZ_CRC("ShapeService"));
provided.push_back(AZ_CRC("SphereShapeService"));
}
static void GetIncompatibleServices(AZ::ComponentDescriptor::DependencyArrayType& incompatible)
{
incompatible.push_back(AZ_CRC("ShapeService"));
incompatible.push_back(AZ_CRC("SphereShapeService"));
}
static void GetRequiredServices(AZ::ComponentDescriptor::DependencyArrayType& required)
{
required.push_back(AZ_CRC("TransformService"));
}
static void Reflect(AZ::ReflectContext* context);
private:
//! Stores configuration of a sphere for this component
SphereShapeConfiguration m_configuration;
//////////////////////////////////////////////////////////////////////////
// Runtime data
class SphereIntersectionDataCache : public IntersectionTestDataCache<SphereShapeConfiguration>
{
public:
void UpdateIntersectionParams(const AZ::Transform& currentTransform,
const SphereShapeConfiguration& configuration) override;
AZ_INLINE float GetRadiusSquared() const
{
return m_radiusSquared;
}
AZ_INLINE const AZ::Vector3& GetCenterPosition() const
{
return m_currentCenterPosition;
}
private:
// Radius of the sphere squared
float m_radiusSquared;
// Position of the center of the sphere
AZ::Vector3 m_currentCenterPosition;
};
// Caches transient intersection data
SphereIntersectionDataCache m_intersectionDataCache;
//! Caches the current transform for the entity on which this component lives
AZ::Transform m_currentTransform;
};
} // namespace LmbrCentral
| 38.148438 | 136 | 0.567069 | [
"shape",
"transform"
] |
1dab27a896c088e071fc4cc51987eeb16f03998e | 3,526 | h | C | src/AIShell.h | dpocheng/CPP-ConnectK-AI | 2624fdbeef0768d2f1fc4c25a192cfc63f8a8386 | [
"Apache-2.0"
] | null | null | null | src/AIShell.h | dpocheng/CPP-ConnectK-AI | 2624fdbeef0768d2f1fc4c25a192cfc63f8a8386 | [
"Apache-2.0"
] | null | null | null | src/AIShell.h | dpocheng/CPP-ConnectK-AI | 2624fdbeef0768d2f1fc4c25a192cfc63f8a8386 | [
"Apache-2.0"
] | null | null | null | #ifndef AISHELL_H
#define AISHELL_H
//#pragma once
#include <algorithm>
#include <ctime>
#include <limits>
#include <math.h>
//#include <random>
#include <string>
#include <vector>
//#include "AlphaBeta.h"
//#include "Evaluation.h"
//#include "GameStateChecker.h"
//#include "IDSClock.h"
//#include "MinMax.h"
#include "Move.h"
//#include "Validation.h"
// A new AIShell will be created for every move request.
class AIShell{
public:
//these represent the values for each piece type.
static const int AI_PIECE=1;
static const int HUMAN_PIECE = -1;
static const int NO_PIECE=0;
private:
//Do not alter the values of numRows or numcols.
//they are used for deallocating the gameState variable.
std::string version;
int numRows; //the total number of rows in the game state.
int numCols; //the total number of columns in the game state.
int **gameState; //a pointer to a two-dimensional array representing the game state.
bool gravityOn; //this will be true if gravity is turned on. It will be false if gravity is turned off.
bool isFirstPlayer; //true if the turn is first player; false if the turn is second
Move lastMove; //this is the move made last by your opponent. If your opponent has not made a move yet (you move first) then this move will hold the value (-1, -1) instead.
Move efficientMove;
public:
int deadline; //this is how many milliseconds the AI has to make move.
int k; // k is the number of pieces a player must get in a row/column/diagonal to win the game. IE in connect 4, this variable would be 4
AIShell(int numCols, int numRows, bool gravityOn, int** gameState, Move lastMove);
~AIShell();
Move makeMove();
//Evaluation
int evalFunc(int numCols, int numRows, int k, int** gameState, bool isFirstPlayer);
void boardCheck(int numCols, int numRows, int k, int** listMax, int** listMin, int** gameState);
//GameStateChecker
int findAll(int col, int numCols, int row, int numRows, int k, int** gameState, bool isFirstPlayer);
int northWest(int col, int numCols, int row, int numRows, int k, int** gameState, bool isFirstPlayer);
int north(int col, int numCols, int row, int numRows, int k, int** gameState, bool isFirstPlayer);
int northEast(int col, int numCols, int row, int numRows, int k, int** gameState, bool isFirstPlayer);
int west(int col, int numCols, int row, int numRows, int k, int** gameState, bool isFirstPlayer);
int east(int col, int numCols, int row, int numRows, int k, int** gameState, bool isFirstPlayer);
int southWest(int col, int numCols, int row, int numRows, int k, int** gameState, bool isFirstPlayer);
int south(int col, int numCols, int row, int numRows, int k, int** gameState, bool isFirstPlayer);
int southEast(int col, int numCols, int row, int numRows, int k, int** gameState, bool isFirstPlayer);
//Minimax
int findMaximum(int maxValue, int minValue, int dp, int** gameState);
int findMinimum(int maxValue, int minValue, int dp, int** gameState);
int retrieveNumCols();
int retrieveNumRows();
int retrieveK();
//Validation
std::vector<Move> checkValidMoves(int** gameState, bool isInOrder);
MoveInfo buildValidTree(int** gameState, bool isPlayerTurn, Move tempMove);
//Alpha Beta
Move alphaBetaPruning(int maxValue, int minValue, int dp, MoveInfo storedMove);
//IDS
Move IDS(int maxValue, int minValue, bool isPlayerTurn, MoveInfo storedMove, std::clock_t ct);
Move retrieveEfficientMove();
};
#endif //AISHELL_H
| 40.528736 | 174 | 0.717527 | [
"vector"
] |
1dad92cec5e99c3e37b7f684eeb47d8c7ffa618d | 35,092 | c | C | src/lgc.c | zenkj/luarc-5.1 | 3dda3b1aacdf9bbe6bd7a880299191b4553e3880 | [
"MIT"
] | 6 | 2015-05-17T09:24:24.000Z | 2020-04-30T22:36:47.000Z | src/lgc.c | zenkj/luarc-5.1 | 3dda3b1aacdf9bbe6bd7a880299191b4553e3880 | [
"MIT"
] | 1 | 2015-04-28T18:29:19.000Z | 2016-05-29T03:20:57.000Z | src/lgc.c | zenkj/luarc-5.1 | 3dda3b1aacdf9bbe6bd7a880299191b4553e3880 | [
"MIT"
] | 2 | 2016-01-22T01:25:19.000Z | 2020-10-06T15:49:41.000Z | /*
** $Id: lgc.c,v 2.38.1.1 2007/12/27 13:02:25 roberto Exp $
** Garbage Collector
** See Copyright Notice in lua.h
*/
#include <string.h>
#define lgc_c
#define LUA_CORE
#include "lua.h"
#include "ldebug.h"
#include "ldo.h"
#include "lfunc.h"
#include "lgc.h"
#include "lmem.h"
#include "lobject.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "ltm.h"
#define GCSTEPSIZE 1024u
#define GCSWEEPMAX 40
#define GCSWEEPCOST 10
#define GCFINALIZECOST 100
#define maskmarks cast_byte(~(bitmask(BLACKBIT)|WHITEBITS))
#define makewhite(g,x) \
((x)->gch.marked = cast_byte(((x)->gch.marked & maskmarks) | luaC_white(g)))
#define white2gray(x) reset2bits((x)->gch.marked, WHITE0BIT, WHITE1BIT)
#define black2gray(x) resetbit((x)->gch.marked, BLACKBIT)
#define stringmark(s) reset2bits((s)->tsv.marked, WHITE0BIT, WHITE1BIT)
#define isfinalized(u) testbit((u)->marked, FINALIZEDBIT)
#define markfinalized(u) l_setbit((u)->marked, FINALIZEDBIT)
#define KEYWEAK bitmask(KEYWEAKBIT)
#define VALUEWEAK bitmask(VALUEWEAKBIT)
#define markvalue(g,o) { checkconsistency(o); \
if (iscollectable(o) && iswhite(gcvalue(o))) reallymarkobject(g,gcvalue(o)); }
#define markobject(g,t) { if (iswhite(obj2gco(t))) \
reallymarkobject(g, obj2gco(t)); }
#define setthreshold(g) (g->GCthreshold = (g->estimate/100) * g->gcpause)
#if LUA_REFCOUNT
static void removeentry (lua_State *L, Node *n) {
#else
static void removeentry (Node *n) {
#endif
lua_assert(ttisnil(gval(n)));
if (iscollectable(gkey(n))) {
#if LUA_REFCOUNT
if (ttype(gkey(n)) != LUA_TDEADKEY)
luarc_subref(L, key2tval(n));
#endif
setttype(gkey(n), LUA_TDEADKEY); /* dead key; remove it */
}
}
static void reallymarkobject (global_State *g, GCObject *o) {
lua_assert(iswhite(o) && !isdead(g, o));
white2gray(o);
switch (o->gch.tt) {
case LUA_TSTRING: {
return;
}
case LUA_TUSERDATA: { /* udata has no gc ptr, can't add to gc list */
Table *mt = gco2u(o)->metatable;
gray2black(o); /* udata are never gray */
if (mt) markobject(g, mt);
markobject(g, gco2u(o)->env);
return;
}
case LUA_TUPVAL: {
/* open upval is in g->uvhead list, closed in rootgc list */
UpVal *uv = gco2uv(o);
markvalue(g, uv->v);
if (uv->v == &uv->u.value) /* closed? */
gray2black(o); /* open upvalues are never black */
return;
}
#if LUA_REFCOUNT
case LUA_TFUNCTION:
case LUA_TTABLE:
case LUA_TTHREAD:
case LUA_TPROTO: {
o->tgch.gcnext = g->gray;
o->tgch.gcprev = NULL;
if (g->gray) g->gray->tgch.gcprev = o;
g->gray = o;
break;
}
#else
case LUA_TFUNCTION: {
gco2cl(o)->c.gclist = g->gray;
g->gray = o;
break;
}
case LUA_TTABLE: {
gco2h(o)->gclist = g->gray;
g->gray = o;
break;
}
case LUA_TTHREAD: {
gco2th(o)->gclist = g->gray;
g->gray = o;
break;
}
case LUA_TPROTO: {
gco2p(o)->gclist = g->gray;
g->gray = o;
break;
}
#endif
default: lua_assert(0);
}
}
static void marktmu (global_State *g) {
GCObject *u = g->tmudata;
if (u) {
do {
u = u->gch.next;
makewhite(g, u); /* may be marked, if left from previous GC */
reallymarkobject(g, u);
} while (u != g->tmudata);
}
}
/* move `dead' udata that need finalization to list `tmudata' */
size_t luaC_separateudata (lua_State *L, int all) {
global_State *g = G(L);
size_t deadmem = 0;
GCObject **p = &g->mainthread->next;
GCObject *curr;
while ((curr = *p) != NULL) {
if (!(iswhite(curr) || all) || isfinalized(gco2u(curr)))
p = &curr->gch.next; /* don't bother with them */
else if (fasttm(L, gco2u(curr)->metatable, TM_GC) == NULL) {
markfinalized(gco2u(curr)); /* don't need finalization */
p = &curr->gch.next;
}
else { /* must call its gc method */
deadmem += sizeudata(gco2u(curr));
markfinalized(gco2u(curr));
#if LUA_REFCOUNT
if (curr->tgch.next)
curr->tgch.next->tgch.prev = curr->tgch.prev;
/* udata with FINALIZEDBIT set will not be freed by refcount, so
** no need to assign curr->tgch.prev here.
*/
#endif
*p = curr->gch.next;
/* link `curr' at the end of `tmudata' list */
if (g->tmudata == NULL) /* list is empty? */
g->tmudata = curr->gch.next = curr; /* creates a circular list */
else {
curr->gch.next = g->tmudata->gch.next;
g->tmudata->gch.next = curr;
g->tmudata = curr;
}
}
}
return deadmem;
}
static int traversetable (global_State *g, Table *h) {
int i;
int weakkey = 0;
int weakvalue = 0;
const TValue *mode;
if (h->metatable)
markobject(g, h->metatable);
mode = gfasttm(g, h->metatable, TM_MODE);
if (mode && ttisstring(mode)) { /* is there a weak mode? */
weakkey = (strchr(svalue(mode), 'k') != NULL);
weakvalue = (strchr(svalue(mode), 'v') != NULL);
if (weakkey || weakvalue) { /* is really weak? */
h->marked &= ~(KEYWEAK | VALUEWEAK); /* clear bits */
h->marked |= cast_byte((weakkey << KEYWEAKBIT) |
(weakvalue << VALUEWEAKBIT));
#if LUA_REFCOUNT
h->gcnext = g->weak;
lua_assert(h->gcprev == NULL); /* h is former gray list head */
if (g->weak) g->weak->tgch.gcprev = obj2gco(h);
#else
h->gclist = g->weak; /* must be cleared after GC, ... */
#endif
g->weak = obj2gco(h); /* ... so put in the appropriate list */
}
}
if (weakkey && weakvalue) return 1;
if (!weakvalue) {
i = h->sizearray;
while (i--)
markvalue(g, &h->array[i]);
}
i = sizenode(h);
while (i--) {
Node *n = gnode(h, i);
lua_assert(ttype(gkey(n)) != LUA_TDEADKEY || ttisnil(gval(n)));
if (ttisnil(gval(n))) {
#if LUA_REFCOUNT
removeentry(g->mainthread, n); /* remove empty entries */
#else
removeentry(n); /* remove empty entries */
#endif
}
else {
lua_assert(!ttisnil(gkey(n)));
if (!weakkey) markvalue(g, gkey(n));
if (!weakvalue) markvalue(g, gval(n));
}
}
return weakkey || weakvalue;
}
/*
** All marks are conditional because a GC may happen while the
** prototype is still being created
*/
static void traverseproto (global_State *g, Proto *f) {
int i;
if (f->source) stringmark(f->source);
for (i=0; i<f->sizek; i++) /* mark literals */
markvalue(g, &f->k[i]);
for (i=0; i<f->sizeupvalues; i++) { /* mark upvalue names */
if (f->upvalues[i])
stringmark(f->upvalues[i]);
}
for (i=0; i<f->sizep; i++) { /* mark nested protos */
if (f->p[i])
markobject(g, f->p[i]);
}
for (i=0; i<f->sizelocvars; i++) { /* mark local-variable names */
if (f->locvars[i].varname)
stringmark(f->locvars[i].varname);
}
}
static void traverseclosure (global_State *g, Closure *cl) {
markobject(g, cl->c.env);
if (cl->c.isC) {
int i;
for (i=0; i<cl->c.nupvalues; i++) /* mark its upvalues */
markvalue(g, &cl->c.upvalue[i]);
}
else {
int i;
lua_assert(cl->l.nupvalues == cl->l.p->nups);
markobject(g, cl->l.p);
for (i=0; i<cl->l.nupvalues; i++) /* mark its upvalues */
markobject(g, cl->l.upvals[i]);
}
}
static void checkstacksizes (lua_State *L, StkId max) {
int ci_used = cast_int(L->ci - L->base_ci); /* number of `ci' in use */
int s_used = cast_int(max - L->stack); /* part of stack in use */
if (L->size_ci > LUAI_MAXCALLS) /* handling overflow? */
return; /* do not touch the stacks */
if (4*ci_used < L->size_ci && 2*BASIC_CI_SIZE < L->size_ci)
luaD_reallocCI(L, L->size_ci/2); /* still big enough... */
condhardstacktests(luaD_reallocCI(L, ci_used + 1));
if (4*s_used < L->stacksize &&
2*(BASIC_STACK_SIZE+EXTRA_STACK) < L->stacksize)
luaD_reallocstack(L, L->stacksize/2); /* still big enough... */
condhardstacktests(luaD_reallocstack(L, s_used));
}
static void traversestack (global_State *g, lua_State *l) {
StkId o, lim;
CallInfo *ci;
markvalue(g, gt(l));
lim = l->top;
for (ci = l->base_ci; ci <= l->ci; ci++) {
lua_assert(ci->top <= l->stack_last);
if (lim < ci->top) lim = ci->top;
}
for (o = l->stack; o < l->top; o++)
markvalue(g, o);
#if LUA_REFCOUNT
/* Here it's very important to free temperary values */
for (; o <= lim; o++)
setnilvalue(l, o);
#else /* !LUA_REFCOUNT */
for (; o <= lim; o++)
setnilvalue(o);
#endif
checkstacksizes(l, lim);
}
/*
** traverse one gray object, turning it to black.
** Returns `quantity' traversed.
*/
static l_mem propagatemark (global_State *g) {
GCObject *o = g->gray;
lua_assert(isgray(o));
gray2black(o);
switch (o->gch.tt) {
case LUA_TTABLE: {
Table *h = gco2h(o);
#if LUA_REFCOUNT
if (h->gcnext) h->gcnext->tgch.gcprev = NULL;
g->gray = h->gcnext;
#else
g->gray = h->gclist;
#endif
if (traversetable(g, h)) /* table is weak? */
black2gray(o); /* keep it gray */
return sizeof(Table) + sizeof(TValue) * h->sizearray +
sizeof(Node) * sizenode(h);
}
case LUA_TFUNCTION: {
Closure *cl = gco2cl(o);
#if LUA_REFCOUNT
if (cl->c.gcnext) cl->c.gcnext->tgch.gcprev = NULL;
g->gray = cl->c.gcnext;
#else
g->gray = cl->c.gclist;
#endif
traverseclosure(g, cl);
return (cl->c.isC) ? sizeCclosure(cl->c.nupvalues) :
sizeLclosure(cl->l.nupvalues);
}
case LUA_TTHREAD: {
lua_State *th = gco2th(o);
#if LUA_REFCOUNT
if (th->gcnext) th->gcnext->tgch.gcprev = NULL;
g->gray = th->gcnext;
if (g->grayagain) g->grayagain->tgch.gcprev = o;
th->gcnext = g->grayagain;
g->grayagain = o;
#else
g->gray = th->gclist;
th->gclist = g->grayagain;
g->grayagain = o;
#endif
black2gray(o);
traversestack(g, th);
return sizeof(lua_State) + sizeof(TValue) * th->stacksize +
sizeof(CallInfo) * th->size_ci;
}
case LUA_TPROTO: {
Proto *p = gco2p(o);
#if LUA_REFCOUNT
if (p->gcnext) p->gcnext->tgch.gcprev = NULL;
g->gray = p->gcnext;
#else
g->gray = p->gclist;
#endif
traverseproto(g, p);
return sizeof(Proto) + sizeof(Instruction) * p->sizecode +
sizeof(Proto *) * p->sizep +
sizeof(TValue) * p->sizek +
sizeof(int) * p->sizelineinfo +
sizeof(LocVar) * p->sizelocvars +
sizeof(TString *) * p->sizeupvalues;
}
default: lua_assert(0); return 0;
}
}
static size_t propagateall (global_State *g) {
size_t m = 0;
while (g->gray) m += propagatemark(g);
return m;
}
/*
** The next function tells whether a key or value can be cleared from
** a weak table. Non-collectable objects are never removed from weak
** tables. Strings behave as `values', so are never removed too. for
** other objects: if really collected, cannot keep them; for userdata
** being finalized, keep them in keys, but not in values
*/
static int iscleared (const TValue *o, int iskey) {
if (!iscollectable(o)) return 0;
if (ttisstring(o)) {
stringmark(rawtsvalue(o)); /* strings are `values', so are never weak */
return 0;
}
return iswhite(gcvalue(o)) ||
(ttisuserdata(o) && (!iskey && isfinalized(uvalue(o))));
}
/*
** clear collected entries from weaktables
*/
#if LUA_REFCOUNT
static void cleartable (lua_State *L, GCObject *l) {
#else
static void cleartable (GCObject *l) {
#endif
while (l) {
Table *h = gco2h(l);
int i = h->sizearray;
lua_assert(testbit(h->marked, VALUEWEAKBIT) ||
testbit(h->marked, KEYWEAKBIT));
if (testbit(h->marked, VALUEWEAKBIT)) {
while (i--) {
TValue *o = &h->array[i];
if (iscleared(o, 0)) { /* value was collected? */
#if LUA_REFCOUNT
setnilvalue(L, o); /* remove value */
#else /* !LUA_REFCOUNT */
setnilvalue(o); /* remove value */
#endif
}
}
}
i = sizenode(h);
while (i--) {
Node *n = gnode(h, i);
if (!ttisnil(gval(n)) && /* non-empty entry? */
(iscleared(key2tval(n), 1) || iscleared(gval(n), 0))) {
#if LUA_REFCOUNT
setnilvalue(L, gval(n)); /* remove value ... */
removeentry(L, n); /* remove entry from table */
#else
setnilvalue(gval(n)); /* remove value ... */
removeentry(n); /* remove entry from table */
#endif
}
}
#if LUA_REFCOUNT
l = h->gcnext;
#else
l = h->gclist;
#endif
}
}
static void freeobj (lua_State *L, GCObject *o) {
switch (o->gch.tt) {
case LUA_TPROTO: luaF_freeproto(L, gco2p(o)); break;
case LUA_TFUNCTION: luaF_freeclosure(L, gco2cl(o)); break;
case LUA_TUPVAL: luaF_freeupval(L, gco2uv(o)); break;
case LUA_TTABLE: luaH_free(L, gco2h(o)); break;
case LUA_TTHREAD: {
lua_assert(gco2th(o) != L && gco2th(o) != G(L)->mainthread);
luaE_freethread(L, gco2th(o));
break;
}
case LUA_TSTRING: {
#if LUA_PROFILE
G(L)->stringcount--;
G(L)->stringbytes -= sizestring(gco2ts(o));
#endif
G(L)->strt.nuse--;
luaM_freemem(L, o, sizestring(gco2ts(o)));
break;
}
case LUA_TUSERDATA: {
#if LUA_PROFILE
G(L)->udatacount--;
G(L)->udatabytes -= sizeudata(gco2u(o));
#endif
luaM_freemem(L, o, sizeudata(gco2u(o)));
break;
}
default: lua_assert(0);
}
}
#define sweepwholelist(L,p) sweeplist(L,p,MAX_LUMEM)
static GCObject **sweeplist (lua_State *L, GCObject **p, lu_mem count) {
GCObject *curr;
global_State *g = G(L);
int deadmask = otherwhite(g);
while ((curr = *p) != NULL && count-- > 0) {
if (curr->gch.tt == LUA_TTHREAD) /* sweep open upvalues of each thread */
sweepwholelist(L, &gco2th(curr)->openupval);
if ((curr->gch.marked ^ WHITEBITS) & deadmask) { /* not dead? */
lua_assert(!isdead(g, curr) || testbit(curr->gch.marked, FIXEDBIT));
makewhite(g, curr); /* make it white (for next cycle) */
p = &curr->gch.next;
}
else { /* must erase `curr' */
lua_assert(isdead(g, curr) || deadmask == bitmask(SFIXEDBIT));
#if LUA_REFCOUNT
if (curr->tgch.next)
curr->tgch.next->tgch.prev = curr->tgch.prev;
#endif
*p = curr->gch.next;
if (curr == g->rootgc) /* is the first element of the list? */
g->rootgc = curr->gch.next; /* adjust first */
freeobj(L, curr);
}
}
return p;
}
static void checkSizes (lua_State *L) {
global_State *g = G(L);
/* check size of string hash */
if (g->strt.nuse < cast(lu_int32, g->strt.size/4) &&
g->strt.size > MINSTRTABSIZE*2)
luaS_resize(L, g->strt.size/2); /* table is too big */
/* check size of buffer */
if (luaZ_sizebuffer(&g->buff) > LUA_MINBUFFER*2) { /* buffer too big? */
size_t newsize = luaZ_sizebuffer(&g->buff) / 2;
luaZ_resizebuffer(L, &g->buff, newsize);
}
}
static void GCTM (lua_State *L) {
global_State *g = G(L);
GCObject *o = g->tmudata->gch.next; /* get first element */
Udata *udata = rawgco2u(o);
const TValue *tm;
/* remove udata from `tmudata' */
if (o == g->tmudata) /* last element? */
g->tmudata = NULL;
else
g->tmudata->gch.next = udata->uv.next;
udata->uv.next = g->mainthread->next; /* return it to `root' list */
#if LUA_REFCOUNT
udata->uv.prev = obj2gco(g->mainthread);
if (g->mainthread->next)
g->mainthread->next->gch.prev = o;
#endif
g->mainthread->next = o;
makewhite(g, o);
tm = fasttm(L, udata->uv.metatable, TM_GC);
if (tm != NULL) {
lu_byte oldah = L->allowhook;
lu_mem oldt = g->GCthreshold;
L->allowhook = 0; /* stop debug hooks during GC tag method */
g->GCthreshold = 2*g->totalbytes; /* avoid GC steps */
L->top += 2;
setobj2s(L, L->top-2, tm);
setuvalue(L, L->top-1, udata);
luaD_call(L, L->top - 2, 0);
L->allowhook = oldah; /* restore hooks */
g->GCthreshold = oldt; /* restore threshold */
}
}
/*
** Call all GC tag methods
*/
void luaC_callGCTM (lua_State *L) {
while (G(L)->tmudata)
GCTM(L);
}
void luaC_freeall (lua_State *L) {
global_State *g = G(L);
int i;
g->currentwhite = WHITEBITS | bitmask(SFIXEDBIT); /* mask to collect all elements */
sweepwholelist(L, &g->rootgc);
for (i = 0; i < g->strt.size; i++) /* free all string lists */
sweepwholelist(L, &g->strt.hash[i]);
}
static void markmt (global_State *g) {
int i;
for (i=0; i<NUM_TAGS; i++)
if (g->mt[i]) markobject(g, g->mt[i]);
}
#if LUA_PROFILE
static void logmemstat (lua_State *L) {
lua_Number now;
global_State *g = G(L);
now = lua_nanosecond(L);
if (now - g->prevtime > 10.0*1000*1000*1000) {
long long n, s;
g->prevtime = now;
n = (long long)(g->tablecount + g->protocount + g->lclosurecount
+ g->cclosurecount + g->threadcount + g->openupvalcount
+ g->closeupvalcount + g->udatacount + g->stringcount);
s = (long long)g->totalbytes;
lualog(L, 2, "heapinfo: count %lld, bytes %lld", n, s);
lualog(L, 2, "gcperiod: max %.0lf, min %.0lf, avg %.1lf, cnt %lld",
g->gcperiod.max, g->gcperiod.min,
g->gcperiod.avg, (long long)g->gcperiod.cnt);
lualog(L, 2, "nogcperiod: max %.0lf, min %.0lf, avg %.1lf, cnt %lld",
g->nogcperiod.max, g->nogcperiod.min,
g->nogcperiod.avg, (long long)g->nogcperiod.cnt);
lualog(L, 2, "tableinfo: count %lld, bytes %lld",
(long long)g->tablecount, (long long)g->tablebytes);
lualog(L, 2, "protoinfo: count %lld, bytes %lld",
(long long)g->protocount, (long long)g->protobytes);
lualog(L, 2, "closureinfo: count %lld, bytes %lld",
(long long)(g->cclosurecount + g->lclosurecount),
(long long)(g->cclosurebytes + g->lclosurebytes));
lualog(L, 2, "threadinfo: count %lld, bytes %lld",
(long long)g->threadcount, (long long)g->threadbytes);
lualog(L, 2, "udatainfo: count %lld, bytes %lld",
(long long)g->udatacount, (long long)g->udatabytes);
lualog(L, 2, "stringinfo: count %lld, bytes %lld",
(long long)g->stringcount, (long long)g->stringbytes);
}
}
#endif
/* mark root set */
static void markroot (lua_State *L) {
global_State *g = G(L);
g->gray = NULL;
g->grayagain = NULL;
g->weak = NULL;
markobject(g, g->mainthread);
/* make global table be traversed before main stack */
markvalue(g, gt(g->mainthread));
markvalue(g, registry(L));
markmt(g);
g->gcstate = GCSpropagate;
#if LUA_PROFILE
if (g->loglevel >= 2)
logmemstat(L);
#endif
}
static void remarkupvals (global_State *g) {
UpVal *uv;
for (uv = g->uvhead.u.l.next; uv != &g->uvhead; uv = uv->u.l.next) {
lua_assert(uv->u.l.next->u.l.prev == uv && uv->u.l.prev->u.l.next == uv);
if (isgray(obj2gco(uv)))
markvalue(g, uv->v);
}
}
static void atomic (lua_State *L) {
global_State *g = G(L);
size_t udsize; /* total size of userdata to be finalized */
logperiodbegin(L, 2);
/* remark occasional upvalues of (maybe) dead threads */
remarkupvals(g);
/* traverse objects cautch by write barrier and by 'remarkupvals' */
propagateall(g);
/* remark weak tables */
g->gray = g->weak;
g->weak = NULL;
lua_assert(!iswhite(obj2gco(g->mainthread)));
markobject(g, L); /* mark running thread */
markmt(g); /* mark basic metatables (again) */
propagateall(g);
/* remark gray again */
g->gray = g->grayagain;
g->grayagain = NULL;
propagateall(g);
udsize = luaC_separateudata(L, 0); /* separate userdata to be finalized */
marktmu(g); /* mark `preserved' userdata */
udsize += propagateall(g); /* remark, to propagate `preserveness' */
#if LUA_REFCOUNT
cleartable(L, g->weak); /* remove collected objects from weak tables */
#else
cleartable(g->weak); /* remove collected objects from weak tables */
#endif
/* flip current white */
g->currentwhite = cast_byte(otherwhite(g));
g->sweepstrgc = 0;
g->sweepgc = &g->rootgc;
g->gcstate = GCSsweepstring;
g->estimate = g->totalbytes - udsize; /* first estimate */
logperiod(L, 2, "gc atomic period");
}
static l_mem singlestep (lua_State *L) {
global_State *g = G(L);
#if LUA_PROFILE
statacc(&g->gcsteps);
#endif
/*lua_checkmemory(L);*/
switch (g->gcstate) {
case GCSpause: {
#if LUA_PROFILE
lua_Number t = lua_nanosecond(L);
statloop1(&g->nogcperiod, t);
statacc1(&g->gcperiod, t);
lualog(L, 3, "gc begin");
#endif
markroot(L); /* start a new collection */
return 0;
}
case GCSpropagate: {
#if LUA_PROFILE
statacc(&g->marksteps);
#endif
if (g->gray)
return propagatemark(g);
else { /* no more `gray' objects */
atomic(L); /* finish mark phase */
#if LUA_PROFILE
lualog(L, 3, "gc sweepstring begin");
statloop(&g->marksteps);
#endif
return 0;
}
}
case GCSsweepstring: {
lu_mem old = g->totalbytes;
#if LUA_PROFILE
statacc(&g->sweepstringsteps);
#endif
sweepwholelist(L, &g->strt.hash[g->sweepstrgc++]);
if (g->sweepstrgc >= g->strt.size) { /* nothing more to sweep? */
g->gcstate = GCSsweep; /* end sweep-string phase */
#if LUA_PROFILE
lualog(L, 3, "gc sweep begin");
statloop(&g->sweepstringsteps);
#endif
}
lua_assert(old >= g->totalbytes);
g->estimate -= old - g->totalbytes;
return GCSWEEPCOST;
}
case GCSsweep: {
lu_mem old = g->totalbytes;
#if LUA_PROFILE
statacc(&g->sweepsteps);
#endif
g->sweepgc = sweeplist(L, g->sweepgc, GCSWEEPMAX);
if (*g->sweepgc == NULL) { /* nothing more to sweep? */
checkSizes(L);
g->gcstate = GCSfinalize; /* end sweep phase */
#if LUA_PROFILE
lualog(L, 3, "gc finalize begin");
statloop(&g->sweepsteps);
#endif
}
lua_assert(old >= g->totalbytes);
g->estimate -= old - g->totalbytes;
return GCSWEEPMAX*GCSWEEPCOST;
}
case GCSfinalize: {
#if LUA_PROFILE
statacc(&g->finalizesteps);
#endif
if (g->tmudata) {
GCTM(L);
if (g->estimate > GCFINALIZECOST)
g->estimate -= GCFINALIZECOST;
return GCFINALIZECOST;
}
else {
#if LUA_PROFILE
lua_Number t = lua_nanosecond(L);
statacc1(&g->nogcperiod, t);
statloop1(&g->gcperiod, t);
statloop(&g->gcsteps);
statloop(&g->finalizesteps);
lualog(L, 3, "gc end");
#endif
g->gcstate = GCSpause; /* end collection */
g->gcdept = 0;
return 0;
}
}
default: lua_assert(0); return 0;
}
}
void luaC_step (lua_State *L) {
global_State *g = G(L);
l_mem lim = (GCSTEPSIZE/100) * g->gcstepmul;
if (lim == 0)
lim = (MAX_LUMEM-1)/2; /* no limit */
g->gcdept += g->totalbytes - g->GCthreshold;
do {
lim -= singlestep(L);
if (g->gcstate == GCSpause)
break;
} while (lim > 0);
if (g->gcstate != GCSpause) {
if (g->gcdept < GCSTEPSIZE)
g->GCthreshold = g->totalbytes + GCSTEPSIZE; /* - lim/g->gcstepmul;*/
else {
g->gcdept -= GCSTEPSIZE;
g->GCthreshold = g->totalbytes;
}
}
else {
lua_assert(g->totalbytes >= g->estimate);
setthreshold(g);
}
}
void luaC_fullgc (lua_State *L) {
global_State *g = G(L);
if (g->gcstate <= GCSpropagate) {
/* reset sweep marks to sweep all elements (returning them to white) */
g->sweepstrgc = 0;
g->sweepgc = &g->rootgc;
/* reset other collector lists */
g->gray = NULL;
g->grayagain = NULL;
g->weak = NULL;
g->gcstate = GCSsweepstring;
}
lua_assert(g->gcstate != GCSpause && g->gcstate != GCSpropagate);
/* finish any pending sweep phase */
while (g->gcstate != GCSfinalize) {
lua_assert(g->gcstate == GCSsweepstring || g->gcstate == GCSsweep);
singlestep(L);
}
markroot(L);
while (g->gcstate != GCSpause) {
singlestep(L);
}
setthreshold(g);
}
void luaC_barrierf (lua_State *L, GCObject *o, GCObject *v) {
global_State *g = G(L);
lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
lua_assert(g->gcstate != GCSfinalize && g->gcstate != GCSpause);
lua_assert(ttype(&o->gch) != LUA_TTABLE);
/* must keep invariant? */
if (g->gcstate == GCSpropagate)
reallymarkobject(g, v); /* restore invariant */
else /* don't mind */
makewhite(g, o); /* mark as white just to avoid other barriers */
}
void luaC_barrierback (lua_State *L, Table *t) {
global_State *g = G(L);
GCObject *o = obj2gco(t);
lua_assert(isblack(o) && !isdead(g, o));
lua_assert(g->gcstate != GCSfinalize && g->gcstate != GCSpause);
lualog(L, 3, "barrier back");
black2gray(o); /* make table gray (again) */
#if LUA_REFCOUNT
t->gcnext = g->grayagain;
t->gcprev = NULL;
if (g->grayagain)
g->grayagain->tgch.gcprev = o;
g->grayagain = o;
#else
t->gclist = g->grayagain;
g->grayagain = o;
#endif
}
void luaC_link (lua_State *L, GCObject *o, lu_byte tt) {
global_State *g = G(L);
#if LUA_REFCOUNT
o->gch.next = g->rootgc;
o->gch.prev = NULL;
if (g->rootgc)
g->rootgc->gch.prev = o;
g->rootgc = o;
#else
o->gch.next = g->rootgc;
g->rootgc = o;
#endif
o->gch.marked = luaC_white(g);
o->gch.tt = tt;
}
void luaC_linkupval (lua_State *L, UpVal *uv) {
global_State *g = G(L);
GCObject *o = obj2gco(uv);
#if LUA_REFCOUNT
o->gch.next = g->rootgc;
o->gch.prev = NULL;
if (g->rootgc)
g->rootgc->gch.prev = o;
g->rootgc = o;
#else
o->gch.next = g->rootgc; /* link upvalue into `rootgc' list */
g->rootgc = o;
#endif
if (isgray(o)) {
if (g->gcstate == GCSpropagate) {
gray2black(o); /* closed upvalues need barrier */
luaC_barrier(L, uv, uv->v);
}
else { /* sweep phase: sweep it (turning it into white) */
makewhite(g, o);
lua_assert(g->gcstate != GCSfinalize && g->gcstate != GCSpause);
}
}
}
#if LUA_REFCOUNT
static void luarc_freestring (lua_State *L, TString *s) {
GCObject *obj = cast(GCObject *, s);
stringtable *st = &G(L)->strt;
if (obj->gch.prev != NULL)
obj->gch.prev->gch.next = obj->gch.next;
else {
lua_assert(st->hash[lmod(s->tsv.hash, st->size)] == obj);
st->hash[lmod(s->tsv.hash, st->size)] = obj->gch.next;
}
if (obj->gch.next) obj->gch.next->gch.prev = obj->gch.prev;
st->nuse --;
luaM_freemem(L, obj, sizestring(gco2ts(obj)));
}
static void luarc_freeudata (lua_State *L, Udata *ud) {
GCObject *obj = cast(GCObject *, ud);
if (isfinalized(gco2u(obj))) return; /* do not free finalized udata */
/* FINALIZEDBIT is not set, udata must be in rootgc list */
markfinalized(gco2u(obj));
/* Unlink userdata from rootgc */
if (obj->gch.next) obj->gch.next->gch.prev = obj->gch.prev;
lua_assert(obj->gch.prev);
obj->gch.prev->gch.next = obj->gch.next;
/* Execute GC tagmethod */
if (fasttm(L, ud->uv.metatable, TM_GC) != NULL) {
/* put userdata to the head(not tail) of g->tmudata */
if (G(L)->tmudata == NULL) /* list is empty? */
G(L)->tmudata = obj->gch.next = obj;
else {
obj->gch.next = G(L)->tmudata->gch.next;
G(L)->tmudata->gch.next = obj;
/* do not change g->tmudata to keep obj as the list head */
}
GCTM(L); /* execute the gc tagmethod */
/* obj is put back to rootgc by GCTM, unlink it */
if (obj->gch.next) obj->gch.next->gch.prev = obj->gch.prev;
lua_assert(obj->gch.prev == obj2gco(G(L)->mainthread));
obj->gch.prev->gch.next = obj->gch.next;
}
/* Adjust g->sweepgc in sweep state */
if (G(L)->gcstate == GCSsweep && G(L)->sweepgc == &ud->uv.next) {
if (ud->uv.prev) G(L)->sweepgc = &ud->uv.prev->gch.next;
else G(L)->sweepgc = &G(L)->rootgc;
}
if (ud->uv.metatable)
luarc_subtableref(L, ud->uv.metatable);
luarc_subtableref(L, ud->uv.env);
luaM_freemem(L, obj, sizeudata(&ud->uv));
}
static void luarc_freeproto (lua_State *L, Proto *f) {
int i;
GCObject *obj = cast(GCObject *, f);
/* Proto is in rootgc list, possibly in gray list */
/* Unlink from rootgc list */
if (f->next != NULL) f->next->gch.prev = f->prev;
if (f->prev != NULL) f->prev->gch.next = f->next;
else {
lua_assert(G(L)->rootgc == obj);
G(L)->rootgc = f->next;
}
/* Adjust g->gray list in mark state */
if (G(L)->gcstate == GCSpropagate && isgray(obj)) {
if (f->gcnext) f->gcnext->tgch.gcprev = f->gcprev;
if (f->gcprev) f->gcprev->tgch.gcnext = f->gcnext;
else {
lua_assert(G(L)->gray == obj);
G(L)->gray = f->gcnext;
}
}
/* Adjust g->sweepgc in GC sweep state */
if (G(L)->gcstate == GCSsweep && G(L)->sweepgc == &f->next) {
if (f->prev != NULL) G(L)->sweepgc = &f->prev->gch.next;
else G(L)->sweepgc = &G(L)->rootgc;
}
/* Reduce refcount of members */
if (f->source) luarc_substringref(L, f->source);
for (i=0; i<f->sizek; i++)
luarc_subref(L, &f->k[i]);
for (i=0; i<f->sizeupvalues; i++) {
if (f->upvalues[i])
luarc_substringref(L, f->upvalues[i]);
}
for (i=0; i<f->sizep; i++) {
if (f->p[i])
luarc_subprotoref(L, f->p[i]);
}
for (i=0; i<f->sizelocvars; i++) {
if (f->locvars[i].varname)
luarc_substringref(L, f->locvars[i].varname);
}
luaF_freeproto(L, f);
}
static void luarc_freeclosure (lua_State *L, Closure *cl) {
int i;
GCObject *obj = cast(GCObject *, cl);
/* Closure is in rootgc list, possibly in gray list */
/* Unlink from rootgc list */
if (cl->c.next != NULL) cl->c.next->gch.prev = cl->c.prev;
if (cl->c.prev != NULL) cl->c.prev->gch.next = cl->c.next;
else {
lua_assert(G(L)->rootgc == obj);
G(L)->rootgc = cl->c.next;
}
/* Unlink from g->gray list in mark state */
if (G(L)->gcstate == GCSpropagate && isgray(obj)) {
if (cl->c.gcnext) cl->c.gcnext->tgch.gcprev = cl->c.gcprev;
if (cl->c.gcprev) cl->c.gcprev->tgch.gcnext = cl->c.gcnext;
else {
lua_assert(G(L)->gray == obj);
G(L)->gray = cl->c.gcnext;
}
}
/* Adjust g->sweepgc in GC sweep state */
if (G(L)->gcstate == GCSsweep && G(L)->sweepgc == &cl->c.next) {
if (cl->c.prev != NULL) G(L)->sweepgc = &cl->c.prev->gch.next;
else G(L)->sweepgc = &G(L)->rootgc;
}
/* Reduce refcount of members */
luarc_subtableref(L, cl->c.env);
if (cl->c.isC) {
for (i=0; i<cl->c.nupvalues; i++)
luarc_subref(L, &cl->c.upvalue[i]);
}
else {
luarc_subprotoref(L, cl->l.p);
for (i=0; i<cl->l.nupvalues; i++)
luarc_subupvalref(L, cl->l.upvals[i]);
}
luaF_freeclosure(L, cl);
}
static void luarc_freeupval (lua_State *L, UpVal *uv) {
/* Open upval is in L->openupval list, closed upval is in g->rootgc list
** open upval may be used immediately, even when its refcount is 0.
** So only closed upval is freed.
*/
if (uv->v != &uv->u.value) return; /* Do not free open upval */
/* Closed upval is in the rootgc list only */
/* Unlink uv from rootgc */
if (uv->next) uv->next->gch.prev = uv->prev;
if (uv->prev) uv->prev->gch.next = uv->next;
else {
lua_assert(G(L)->rootgc == obj2gco(uv));
G(L)->rootgc = uv->next;
}
/* Adjust g->sweepgc in GC sweep state */
if (G(L)->gcstate == GCSsweep && G(L)->sweepgc == &uv->next) {
if (uv->prev != NULL) G(L)->sweepgc = &uv->prev->gch.next;
else G(L)->sweepgc = &G(L)->rootgc;
}
/* Reduce refcount of member */
luarc_subref(L, uv->v);
luaF_freeupval(L, uv);
}
static void luarc_freetable (lua_State *L, Table *t) {
int i;
GCObject *obj = cast(GCObject *, t);
/* Table is in g->rootgc list, and possibly in
** g->gray/g->grayagain/g->weak list
*/
/* Unlink t from rootgc list */
if (t->next) t->next->gch.prev = t->prev;
if (t->prev) t->prev->gch.next = t->next;
else {
lua_assert(G(L)->rootgc == obj);
G(L)->rootgc = t->next;
}
/* Unlink t from gray/grayagain/weak list in mark state */
if (G(L)->gcstate == GCSpropagate && isgray(obj)) {
if (t->gcnext) t->gcnext->tgch.gcprev = t->gcprev;
if (t->gcprev) t->gcprev->tgch.gcnext = t->gcnext;
else {
if (G(L)->gray == obj) G(L)->gray = t->gcnext;
else if (G(L)->grayagain == obj) G(L)->grayagain = t->gcnext;
else if (G(L)->weak == obj) G(L)->weak = t->gcnext;
else lua_assert(0);
}
}
/* Adjust g->sweepgc in GC sweep state */
if (G(L)->gcstate == GCSsweep && G(L)->sweepgc == &t->next) {
if (t->prev) G(L)->sweepgc = &t->prev->gch.next;
else G(L)->sweepgc = &G(L)->rootgc;
}
/* Reduce refcount of keys/values, even this is a weak table */
if (t->metatable) luarc_subtableref(L, t->metatable);
i = t->sizearray;
while (i--) luarc_subref(L, &t->array[i]);
i = sizenode(t);
while (i--) {
Node *n = gnode(t, i);
lua_assert(ttype(gkey(n)) != LUA_TDEADKEY || ttisnil(gval(n)));
if (ttisnil(gval(n))) removeentry(L, n);
else {
lua_assert(!ttisnil(gkey(n)));
luarc_subref(L, key2tval(n));
luarc_subref(L, gval(n));
}
}
luaH_free(L, t);
}
static void luarc_freethread (lua_State *L, lua_State *th) {
StkId o, lim;
CallInfo *ci;
GCObject *obj = cast(GCObject *, th);
/* Thread is in rootgc list, and possibly gray/grayagain list */
/* Unlink th from rootgc list */
if (th->next) th->next->gch.prev = th->prev;
if (th->prev) th->prev->gch.next = th->next;
else {
lua_assert(G(L)->rootgc == obj);
G(L)->rootgc = th->next;
}
/* Unlink th from gray/grayagain list in mark state */
if (G(L)->gcstate == GCSpropagate && isgray(obj)) {
if (th->gcnext) th->gcnext->tgch.gcprev = th->gcprev;
if (th->gcprev) th->gcprev->tgch.gcnext = th->gcnext;
else {
if (G(L)->gray == obj) G(L)->gray = th->gcnext;
else if (G(L)->grayagain == obj) G(L)->grayagain = th->gcnext;
else lua_assert(0);
}
}
/* Adjust g->sweepgc in GC sweep state */
if (G(L)->gcstate == GCSsweep && G(L)->sweepgc == &th->next) {
if (th->prev) G(L)->sweepgc = &th->prev->gch.next;
else G(L)->sweepgc = &G(L)->rootgc;
}
/* Reduce refcount of members */
luarc_subvalref(L, gt(th));
lim = th->top;
for (ci = th->base_ci; ci <= th->ci; ci++) {
lua_assert(ci->top <= th->stack_last);
if (lim < ci->top) lim = ci->top;
}
for (o = th->stack; o <= lim; o++)
luarc_subref(L, o);
lua_assert(th != L && th != G(L)->mainthread);
luaE_freethread(L, th);
}
void luaC_freeobj (lua_State *L, GCObject *obj) {
switch(obj->gch.tt) {
case LUA_TSTRING:
luarc_freestring(L, rawgco2ts(obj));
break;
case LUA_TUSERDATA:
luarc_freeudata(L, rawgco2u(obj));
break;
case LUA_TPROTO:
luarc_freeproto(L, gco2p(obj));
break;
case LUA_TFUNCTION:
luarc_freeclosure(L, gco2cl(obj));
break;
case LUA_TUPVAL:
luarc_freeupval(L, gco2uv(obj));
break;
case LUA_TTABLE:
luarc_freetable(L, gco2h(obj));
break;
case LUA_TTHREAD:
luarc_freethread(L, gco2th(obj));
break;
default: lua_assert(0);
}
}
#endif /* LUA_REFCOUNT */
| 27.895072 | 87 | 0.590191 | [
"object"
] |
1db5faa1a7105dc70eb77f216824b3d4ea99caf7 | 1,253 | h | C | src/scene.h | jdillenkofer/raytracer | e52655c6a1808d9279613cb1cac66c05abe25138 | [
"MIT"
] | 4 | 2019-06-10T15:31:00.000Z | 2021-03-21T19:29:38.000Z | src/scene.h | jdillenkofer/raytracer | e52655c6a1808d9279613cb1cac66c05abe25138 | [
"MIT"
] | null | null | null | src/scene.h | jdillenkofer/raytracer | e52655c6a1808d9279613cb1cac66c05abe25138 | [
"MIT"
] | 1 | 2019-12-17T16:45:30.000Z | 2019-12-17T16:45:30.000Z | #ifndef RAYTRACER_SCENE_H
#define RAYTRACER_SCENE_H
#include <stdint.h>
#include "pointlight.h"
#include "material.h"
#include "plane.h"
#include "sphere.h"
#include "triangle.h"
#include "camera.h"
#include "object.h"
typedef struct {
Camera* camera;
uint32_t materialCapacity;
uint32_t materialCount;
Material *materials;
uint32_t planeCapacity;
uint32_t planeCount;
Plane *planes;
uint32_t sphereCapacity;
uint32_t sphereCount;
Sphere *spheres;
uint32_t triangleCapacity;
uint32_t triangleCount;
Triangle *triangles;
uint32_t pointLightCapacity;
uint32_t pointLightCount;
PointLight* pointLights;
} Scene;
Scene* scene_create(void);
Scene* scene_init(uint32_t width, uint32_t height);
// adds the material to the scene and returns the materialId
uint32_t scene_addMaterial(Scene* scene, Material material);
void scene_addPlane(Scene* scene, Plane plane);
void scene_addSphere(Scene* scene, Sphere sphere);
void scene_addTriangle(Scene* scene, Triangle triangle);
void scene_addObject(Scene* scene, Object object);
void scene_addPointLight(Scene* scene, PointLight pointLight);
void scene_shrinkToFit(Scene *scene);
void scene_destroy(Scene* scene);
#endif //RAYTRACER_SCENE_H
| 24.568627 | 62 | 0.762171 | [
"object"
] |
1dc384a4e610f932bc8d69c97d198b13bb1a3575 | 968 | h | C | Minuit/Minuit/MnMatrix.h | cdeil/iminuit | 8880106e8ba0df18c529c48cd193ba4411ef58b9 | [
"MIT"
] | null | null | null | Minuit/Minuit/MnMatrix.h | cdeil/iminuit | 8880106e8ba0df18c529c48cd193ba4411ef58b9 | [
"MIT"
] | null | null | null | Minuit/Minuit/MnMatrix.h | cdeil/iminuit | 8880106e8ba0df18c529c48cd193ba4411ef58b9 | [
"MIT"
] | null | null | null | #ifndef MN_MnMatrix_H_
#define MN_MnMatrix_H_
//add MnConfig file to define before everything compiler
// dependent macros
#include "Minuit/MnConfig.h"
// Removing this the following include will cause the library to fail
// to compile with gcc 4.0.0 under Red Hat Enterprise Linux 3. That
// is, FumiliBuiilder.cpp will fail with message about ambigous enum.
// Putting an include <vector> before other includes in that file will
// fix it, but then another file class will fail with the same
// message. I don't understand it, but putting the include <vector>
// in this one spot, fixes the problem and does not require any other
// changes to the source code.
//
// Paul_Kunz@slac.stanford.edu 3 June 2005
//
#include <vector>
#include "Minuit/LASymMatrix.h"
#include "Minuit/LAVector.h"
#include "Minuit/LaInverse.h"
#include "Minuit/LaOuterProduct.h"
typedef LASymMatrix MnAlgebraicSymMatrix;
typedef LAVector MnAlgebraicVector;
#endif //MN_MnMatrix_H_
| 31.225806 | 70 | 0.769628 | [
"vector"
] |
1dd36766d95b2cea3a1140572d017eb94dd56f17 | 3,502 | h | C | src/hss/ogr_hss.h | nls-jajuko/gdal-hss-plugin | d756e3a5a363591831461f642df1e8673a7f5f7d | [
"Apache-2.0"
] | null | null | null | src/hss/ogr_hss.h | nls-jajuko/gdal-hss-plugin | d756e3a5a363591831461f642df1e8673a7f5f7d | [
"Apache-2.0"
] | 5 | 2021-09-17T20:25:50.000Z | 2021-09-17T20:31:12.000Z | src/hss/ogr_hss.h | nls-jajuko/gdal-hss-plugin | d756e3a5a363591831461f642df1e8673a7f5f7d | [
"Apache-2.0"
] | null | null | null |
#ifndef OGR_HSS_H_INCLUDED
#define OGR_HSS_H_INCLUDED
#include "ogrsf_frmts.h"
#include <vector>
#include <string>
class OGRHSSDataSource;
/************************************************************************/
/* OGRHSSLayer */
/************************************************************************/
typedef struct
{
vsi_l_offset offset;
int line;
} OffsetAndLine;
class OGRHSSLayer final: public OGRLayer
{
OGRFeatureDefn* poFeatureDefn;
OGRHSSDataSource* poDS;
int bWriter;
bool eof;
bool failed;
int curLine;
int nNextFID;
VSILFILE* fpHSS;
int nFeatures;
std::vector<OffsetAndLine> offsetAndLineFeaturesTable;
void WriteFeatureAttributes(VSILFILE* fp, OGRFeature *poFeature);
public:
OGRHSSLayer(const char *pszFilename,
const char* layerName,
OGRwkbGeometryType eLayerGeomType,
int bWriter,
OGRHSSDataSource* poDS);
~OGRHSSLayer();
void SetFeatureIndexTable(std::vector<OffsetAndLine>&& offsetAndLineFeaturesTable);
void ResetReading() override;
OGRFeature * GetNextFeature() override;
OGRErr ICreateFeature( OGRFeature *poFeature ) override;
OGRErr CreateField( OGRFieldDefn *poField, int bApproxOK ) override;
OGRFeatureDefn * GetLayerDefn() override { return poFeatureDefn; }
OGRFeature * GetFeature( GIntBig nFID ) override;
int TestCapability( const char * ) override;
};
/************************************************************************/
/* OGRHSSDataSource */
/************************************************************************/
class OGRHSSDataSource final: public OGRDataSource
{
char* pszName;
OGRHSSLayer** papoLayers;
int nLayers;
/* Export related */
VSILFILE *fpOutput; /* Virtual file API */
int coordinatePrecision;
char* pszCoordinateSeparator;
public:
OGRHSSDataSource();
~OGRHSSDataSource();
VSILFILE *GetOutputFP() { return fpOutput; }
int GetCoordinatePrecision() { return coordinatePrecision; }
int Open( const char * pszFilename );
int Create( const char *pszFilename,
char **papszOptions );
const char* GetName() override { return pszName; }
int GetLayerCount() override { return nLayers; }
OGRLayer* GetLayer( int ) override;
OGRLayer * ICreateLayer( const char * pszLayerName,
OGRSpatialReference *poSRS,
OGRwkbGeometryType eType,
char ** papszOptions ) override;
int TestCapability( const char * ) override;
OGRErr exportFeature(OGRFeature *poFeature);
void PrintLine(const char *fmt, ...) CPL_PRINT_FUNC_FORMAT (2, 3);
};
#endif | 32.728972 | 102 | 0.469446 | [
"vector"
] |
1dd6df136c347de26ef0677d0759a31dd1cebd1e | 1,769 | h | C | src/plugins/robots/generic/control_interface/ci_range_and_bearing_sensor.h | hoelzl/argos3 | 05e2b8a0a2a94139a0753ebfac4d4c51cdea8e1c | [
"MIT"
] | null | null | null | src/plugins/robots/generic/control_interface/ci_range_and_bearing_sensor.h | hoelzl/argos3 | 05e2b8a0a2a94139a0753ebfac4d4c51cdea8e1c | [
"MIT"
] | null | null | null | src/plugins/robots/generic/control_interface/ci_range_and_bearing_sensor.h | hoelzl/argos3 | 05e2b8a0a2a94139a0753ebfac4d4c51cdea8e1c | [
"MIT"
] | null | null | null | /**
* @file <argos3/plugins/robots/generic/control_interface/ci_range_and_bearing_sensor.h>
*
* @author Carlo Pinciroli <ilpincy@gmail.com>
*/
#ifndef CI_RANGE_AND_BEARING_SENSOR_H
#define CI_RANGE_AND_BEARING_SENSOR_H
namespace argos {
class CCI_RangeAndBearingSensor;
}
#include <argos3/core/control_interface/ci_sensor.h>
#include <argos3/core/utility/datatypes/datatypes.h>
#include <argos3/core/utility/datatypes/byte_array.h>
#include <argos3/core/utility/math/quaternion.h>
namespace argos {
/****************************************/
/****************************************/
class CCI_RangeAndBearingSensor : public CCI_Sensor {
public:
struct SPacket {
Real Range;
CRadians HorizontalBearing;
/**
* The vertical bearing is defined as the angle between the local
* robot XY plane and the message source position, i.e., the elevation
* in math jargon. This is different from the inclination, which is the
* angle between the azimuth vector (robot local Z axis) and
* the vector to the message source. Elevation = 90 degrees - Inclination.
*/
CRadians VerticalBearing;
CByteArray Data;
SPacket();
};
typedef std::vector<SPacket> TReadings;
public:
virtual ~CCI_RangeAndBearingSensor() {}
inline const TReadings& GetReadings() const {
return m_tReadings;
}
#ifdef ARGOS_WITH_LUA
virtual void CreateLuaState(lua_State* pt_lua_state);
virtual void ReadingsToLuaState(lua_State* pt_lua_state);
#endif
protected:
TReadings m_tReadings;
};
/****************************************/
/****************************************/
}
#endif
| 24.569444 | 88 | 0.616167 | [
"vector"
] |
1ddcba3c9c4b574221e7d6cb2328c6a769d4d2ad | 7,672 | h | C | Library/Sources/Stroika/Frameworks/Led/SpellCheckEngine.h | SophistSolutions/Stroika | f4e5d84767903a054fba0a6b9c7c4bd1aaefd105 | [
"MIT"
] | 28 | 2015-09-22T21:43:32.000Z | 2022-02-28T01:35:01.000Z | Library/Sources/Stroika/Frameworks/Led/SpellCheckEngine.h | SophistSolutions/Stroika | f4e5d84767903a054fba0a6b9c7c4bd1aaefd105 | [
"MIT"
] | 98 | 2015-01-22T03:21:27.000Z | 2022-03-02T01:47:00.000Z | Library/Sources/Stroika/Frameworks/Led/SpellCheckEngine.h | SophistSolutions/Stroika | f4e5d84767903a054fba0a6b9c7c4bd1aaefd105 | [
"MIT"
] | 4 | 2019-02-21T16:45:25.000Z | 2022-02-18T13:40:04.000Z | /*
* Copyright(c) Sophist Solutions, Inc. 1990-2021. All rights reserved
*/
#ifndef _Stroika_Frameworks_Led_SpellCheckEngine_h_
#define _Stroika_Frameworks_Led_SpellCheckEngine_h_ 1
#include "../StroikaPreComp.h"
/*
@MODULE: SpellCheckEngine
@DESCRIPTION: <p></p>
*/
#include "Support.h"
#include "TextBreaks.h"
namespace Stroika::Frameworks::Led {
/*
@CLASS: SpellCheckEngine
@DESCRIPTION: <p>Abstract spellchecker low-level API. This just defines the basic functionality used for looking for misspelled
words, and for finding guesses.
</p>
*/
class SpellCheckEngine {
protected:
SpellCheckEngine () = default;
public:
virtual ~SpellCheckEngine () = default;
public:
/*
@METHOD: SpellCheckEngine::ScanForUndefinedWord
@DESCRIPTION: <p>Look in the given buffer input buffer, starting at 'cursor' for the next undefined
word Set wordStartResult/wordEndResult according to what is found. Note that if
'*cursor' is nullptr, then it is treated as being 'startBuf' instead. Return true if something is
found (setting only in this case wordStartResult/wordEndResult) and return false if no undefined
words are found. In either case - set 'cursor' on output to reflect how far we scanned ahead.
It is intended that this function be used iteratively and that you repeatedly pass IN
the same cursor that was passed out from the last call. On the first call - set the cursor value
to nullptr. Please don't confuse the cursor value with the POINTER TO the cursor value which is
what is passed in.
</p>
<p>
<code>
const int nChars = 100;
Led_tChar textToSearch [nChars];
// fill in textToSearch from someplace
const Led_tChar* cursor = nullptr;
const Led_tChar* wordStartResult = nullptr;
const Led_tChar* wordEndResult = nullptr;
while (ScanForUndefinedWord (textToSearch, textToSearch + nChars, &cursor,
&wordStartResult, &wordEndResult
)
) {
// we found a possible undefined word.
Led_tString word = Led_tString (wordStartResult, wordEndResult);
}
// no more undefined words.
</code>
</p>
<p>Note that in the case where you haven't examined an entire buffer, but just bits at a time
(chunking) - you may find apparant undefined words at the edges of the buffer. It is up to you to
overlap your calls to check for undefined words in such a way that you verify any found undefined
words truely are undefined, by examining a larger surrounding region.
</p>
*/
virtual bool ScanForUndefinedWord (const Led_tChar* startBuf, const Led_tChar* endBuf, const Led_tChar** cursor,
const Led_tChar** wordStartResult, const Led_tChar** wordEndResult) = 0;
public:
nonvirtual bool LookupWord (const Led_tString& checkWord, Led_tString* matchedWordResult = nullptr);
protected:
/*
@METHOD: SpellCheckEngine::LookupWord_
@ACCESS: protected
@DESCRIPTION: <p>pure virtual method of @'SpellCheckEngine' - called internally by @'SpellCheckEngine::LookupWord'.
This method is overriden in subclasses to to the actual word lookup - returning true (and setting matchedWordResult) if the
given word is found (considered legitimate).</p>
<p>This function may do much more than just lookikng the word up in a dictionary. It may perform
all sorts of lingustic manipulations (stripping trailing 's' from nouns etc) to look for a match.</p>
*/
virtual bool LookupWord_ (const Led_tString& checkWord, Led_tString* matchedWordResult) = 0;
public:
/*
@METHOD: SpellCheckEngine::GenerateSuggestions
@DESCRIPTION: <p>This generates a list of words which are close to the given misspelled word. (ORDER
ALPHA OR MOST LIKELY FIRST????)</p>
*/
virtual vector<Led_tString> GenerateSuggestions (const Led_tString& misspelledWord) = 0;
public:
/*
@METHOD: SpellCheckEngine::PeekAtTextBreaksUsed
@DESCRIPTION: <p>This method can return nullptr. If it returns non-nullptr, you can use this @'TextBreaks' object
to break the source text at word boundaries in much the same manner as the spellcheck engine does.</p>
<p>This can be useful if you provide a UI which breaks the input into chunks - but wants
to make sure the chunks correspond at the edges to word boundaries.</p>
<p>Note - the lifetime of the PeekAt call is short. It is at least til the next
call to the spellcheck engine (should I gaurantee longer?).
*/
virtual TextBreaks* PeekAtTextBreaksUsed () = 0;
public:
class UDInterface;
/*
@METHOD: SpellCheckEngine::GetUDInterface
@DESCRIPTION: <p>This method can return nullptr if there is no UDInterface supported.</p>
*/
virtual UDInterface* GetUDInterface () = 0;
public:
/*
@METHOD: SpellCheckEngine::Invariant
@DESCRIPTION: <p>if @'qDebug' is on, this called the virtual @'SpellCheckEngine::Invariant' () method
(in subclasses) to check the state. It can be called freely of @'qDebug' is off - it will have no effect. If
it is on, however, the time used by this funcion could be significant.</p>
*/
nonvirtual void Invariant () const;
#if qDebug
protected:
virtual void Invariant_ () const;
#endif
};
/*
@CLASS: SpellCheckEngine::UDInterface
@DESCRIPTION: <p>
</p>
*/
class SpellCheckEngine::UDInterface {
protected:
UDInterface () = default;
public:
virtual ~UDInterface () = default;
public:
/*
@METHOD: SpellCheckEngine::UDInterface::AddWordToUserDictionarySupported
@DESCRIPTION: <p>This method allows for the UDInterface interface to be supported by a spellcheck engine, but still to dynamically
turn on/off UD support (say if the UD is loaded or not).</p>
*/
virtual bool AddWordToUserDictionarySupported () const = 0;
public:
/*
@METHOD: SpellCheckEngine::UDInterface::AddWordToUserDictionary
@DESCRIPTION: <p>Add the given word the current open (or primary) user dictionary. This is typically called from
the 'add word to dictionary' button in the spellcheck dialog.</p>
*/
virtual void AddWordToUserDictionary (const Led_tString& word) = 0;
};
}
/*
********************************************************************************
***************************** Implementation Details ***************************
********************************************************************************
*/
#include "SpellCheckEngine.inl"
#endif /*_Stroika_Frameworks_Led_SpellCheckEngine_h_*/
| 45.129412 | 143 | 0.59098 | [
"object",
"vector"
] |
1ddffdb61e4e06f1e61b673504c0b858cec1477b | 2,066 | h | C | es-core/src/audio/alsa/AlsaController.h | AdoPi/custom-es-fork | 49d23b57173612531fdf0f1c996592fb161df779 | [
"MIT"
] | null | null | null | es-core/src/audio/alsa/AlsaController.h | AdoPi/custom-es-fork | 49d23b57173612531fdf0f1c996592fb161df779 | [
"MIT"
] | null | null | null | es-core/src/audio/alsa/AlsaController.h | AdoPi/custom-es-fork | 49d23b57173612531fdf0f1c996592fb161df779 | [
"MIT"
] | null | null | null | //
// Created by bkg2k on 13/08/2020.
//
#pragma once
#include <utils/cplusplus/StaticLifeCycleControler.h>
#include <audio/IAudioController.h>
#include "AlsaCard.h"
class AlsaController: public IAudioController, public StaticLifeCycleControler<AlsaController>
{
private:
std::vector<AlsaCard> mPlaybacks;
/*!
* @brief Initialize ALSA and fetch all cards/devices
*/
void Initialize();
/*!
* @brief Lookup card and device from an opaque identifier
* @param identifier Opaque identifier
* @param cardIndex Output card index if found
* @param deviceIndex Output device index if found
* @return True if the card & device have been found, false otherwise
*/
bool LookupCardDevice(int identifier, int& cardIndex, int& deviceIndex);
public:
//! Default output name
static constexpr const char* sDefaultOutput = "Default output";
/*!
* @brief Default constructor
*/
AlsaController()
: StaticLifeCycleControler("AlsaController")
{
Initialize();
}
/*!
* @brief Destructor
*/
~AlsaController() override = default;
/*
* IAudioController implementation
*/
/*!
* @brief Get playback list
* @return Map opaque identifier : playback name
*/
HashMap<int, std::string> GetPlaybackList() const final;
/*!
* @brief Set the default card/device
* @param identifier opaque identifier from GetPlaybackList()
*/
void SetDefaultPlayback(int identifier) final;
/*!
* @brief Set the default card/device
* @param playbackName playback name from GetPlaybackList()
* @return playbackName or default value if playbackName is invalid
*/
std::string SetDefaultPlayback(const std::string& playbackName) final;
/*!
* @brief Get volume from the given playback
* @return Volume percent
*/
int GetVolume() final;
/*!
* @brief Set volume to the given playback
* @param volume Volume percent
*/
void SetVolume(int volume) final;
};
| 25.195122 | 94 | 0.657309 | [
"vector"
] |
1deadc4d7e52bf886fd0af204dc4658987147d36 | 617 | h | C | aws-cpp-sdk-apigateway/include/aws/apigateway/model/Op.h | Neusoft-Technology-Solutions/aws-sdk-cpp | 88c041828b0dbee18a297c3cfe98c5ecd0706d0b | [
"Apache-2.0"
] | 1 | 2022-02-12T08:09:30.000Z | 2022-02-12T08:09:30.000Z | aws-cpp-sdk-apigateway/include/aws/apigateway/model/Op.h | Neusoft-Technology-Solutions/aws-sdk-cpp | 88c041828b0dbee18a297c3cfe98c5ecd0706d0b | [
"Apache-2.0"
] | 1 | 2021-10-14T16:57:00.000Z | 2021-10-18T10:47:24.000Z | aws-cpp-sdk-apigateway/include/aws/apigateway/model/Op.h | ravindra-wagh/aws-sdk-cpp | 7d5ff01b3c3b872f31ca98fb4ce868cd01e97696 | [
"Apache-2.0"
] | 1 | 2021-12-30T04:25:33.000Z | 2021-12-30T04:25:33.000Z | /**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
#include <aws/apigateway/APIGateway_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
namespace Aws
{
namespace APIGateway
{
namespace Model
{
enum class Op
{
NOT_SET,
add,
remove,
replace,
move,
copy,
test
};
namespace OpMapper
{
AWS_APIGATEWAY_API Op GetOpForName(const Aws::String& name);
AWS_APIGATEWAY_API Aws::String GetNameForOp(Op value);
} // namespace OpMapper
} // namespace Model
} // namespace APIGateway
} // namespace Aws
| 17.138889 | 69 | 0.705024 | [
"model"
] |
1dec2180587618f84020494c6b7ba2f65518aa19 | 2,772 | h | C | src/svm/svm_exec.h | zuevmaxim/hogwildpp | 40a035e5342df58b338c16bdcd94b9f20e38174e | [
"Apache-2.0"
] | 36 | 2016-10-10T23:56:25.000Z | 2021-04-20T16:24:05.000Z | src/svm/svm_exec.h | zuevmaxim/hogwildpp | 40a035e5342df58b338c16bdcd94b9f20e38174e | [
"Apache-2.0"
] | null | null | null | src/svm/svm_exec.h | zuevmaxim/hogwildpp | 40a035e5342df58b338c16bdcd94b9f20e38174e | [
"Apache-2.0"
] | 8 | 2017-01-12T21:15:51.000Z | 2022-02-14T07:04:47.000Z | // Copyright 2012 Victor Bittorf, Chris Re
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Hogwild!, part of the Hazy Project
// Author : Victor Bittorf (bittorf [at] cs.wisc.edu)
// Original Hogwild! Author: Chris Re (chrisre [at] cs.wisc.edu)
#ifndef HAZY_HOGWILD_INSTANCES_SVM_SVM_EXEC_H
#define HAZY_HOGWILD_INSTANCES_SVM_SVM_EXEC_H
#include <cmath>
#include "hazy/hogwild/hogwild_task.h"
#include "svmmodel.h"
namespace hazy {
namespace hogwild {
namespace svm {
//! Changes the model using the given example
void inline ModelUpdate(const SVMExample &examp, const SVMParams ¶ms,
SVMModel *model, size_t &updates, size_t &count);
//! Returns the loss for the given example and model
fp_type inline ComputeLoss(const SVMExample &e, const SVMModel& model);
//! Container for methods to train and test an SVM
class SVMExec {
public:
/// Preforms updates to the model
/*! Updates by scanning over examples, uses the thread id and total
* number of threads to determine which chunk of examples to work on.
* \param task container of model, params, and examples
* \param tid the thread ID; 0 <= tid < total
* \param total the total number of threads working on updating
*/
static double UpdateModel(SVMTask &task, unsigned tid, unsigned total);
/// Compute error of the task's model and the task's examples
/*! Computes the error of each example with given the model. Uses the
* number of threads to determine which chunk of examples to work on.
* TODO XXX Needs to aggregate the RMSE
* \param task container of model, params, and examples
* \param tid the thread ID; 0 <= tid < total
* \param total the total number of threads working on updating
*/
static double TestModel(SVMTask &task, unsigned tid, unsigned total);
//! Invoked after each training epoch, causes the stepsize to decay
static void PostUpdate(SVMModel &model, SVMParams ¶ms);
static void PostEpoch(SVMModel &model, SVMParams ¶ms) {
}
static double ModelObj(SVMTask &task, unsigned tid, unsigned total);
static double ModelAccuracy(SVMTask &task, unsigned tid, unsigned total);
};
} // namespace svm
} // namespace hogwild
} // namespace hazy
#include "svm_exec.hxx"
#endif
| 36.473684 | 77 | 0.731241 | [
"model"
] |
d9c827516b4b674f12f9ea37df403dac595b44c2 | 945 | h | C | Platform/StringHelper.h | wahidtanner/Platform | 32974fd0da88ba31b6ee9c99ee82f9ea0ccce97f | [
"MIT"
] | null | null | null | Platform/StringHelper.h | wahidtanner/Platform | 32974fd0da88ba31b6ee9c99ee82f9ea0ccce97f | [
"MIT"
] | null | null | null | Platform/StringHelper.h | wahidtanner/Platform | 32974fd0da88ba31b6ee9c99ee82f9ea0ccce97f | [
"MIT"
] | null | null | null | //
// StringHelper.h
// Platform
//
// Created by Wahid Tanner on 6/14/13.
//
#ifndef Platform_StringHelper_h
#define Platform_StringHelper_h
#include <locale>
#include <string>
namespace MuddledManaged
{
namespace Platform
{
class StringHelper
{
public:
static std::string narrow (const std::wstring & wideString)
{
std::locale loc;
std::vector<char> narrowCharBuf(wideString.size());
std::use_facet<std::ctype<wchar_t>>(loc).narrow(
wideString.data(),
wideString.data() + wideString.size(),
'?',
narrowCharBuf.data());
std::string narrowString = narrowCharBuf.data();
return narrowString;
}
};
} // namespace Platform
} // namespace MuddledManaged
#endif // Platform_StringHelper_h
| 23.625 | 71 | 0.541799 | [
"vector"
] |
d9c8aaef2954894971f47e65b8b96ffc36341805 | 36,598 | h | C | indra/newview/pipeline.h | bloomsirenix/Firestorm-manikineko | 67e1bb03b2d05ab16ab98097870094a8cc9de2e7 | [
"Unlicense"
] | 1 | 2022-03-26T15:03:34.000Z | 2022-03-26T15:03:34.000Z | indra/newview/pipeline.h | bloomsirenix/Firestorm-manikineko | 67e1bb03b2d05ab16ab98097870094a8cc9de2e7 | [
"Unlicense"
] | null | null | null | indra/newview/pipeline.h | bloomsirenix/Firestorm-manikineko | 67e1bb03b2d05ab16ab98097870094a8cc9de2e7 | [
"Unlicense"
] | null | null | null | /**
* @file pipeline.h
* @brief Rendering pipeline definitions
*
* $LicenseInfo:firstyear=2001&license=viewerlgpl$
* Second Life Viewer Source Code
* Copyright (C) 2010, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License only.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
* $/LicenseInfo$
*/
#ifndef LL_PIPELINE_H
#define LL_PIPELINE_H
#include "llcamera.h"
#include "llerror.h"
#include "lldrawpool.h"
#include "llspatialpartition.h"
#include "m4math.h"
#include "llpointer.h"
#include "lldrawpoolalpha.h"
#include "lldrawpoolmaterials.h"
#include "llgl.h"
#include "lldrawable.h"
#include "llrendertarget.h"
#include <stack>
class LLViewerTexture;
class LLFace;
class LLViewerObject;
class LLTextureEntry;
class LLCullResult;
class LLVOAvatar;
class LLVOPartGroup;
class LLGLSLShader;
class LLDrawPoolAlpha;
typedef enum e_avatar_skinning_method
{
SKIN_METHOD_SOFTWARE,
SKIN_METHOD_VERTEX_PROGRAM
} EAvatarSkinningMethod;
bool compute_min_max(LLMatrix4& box, LLVector2& min, LLVector2& max); // Shouldn't be defined here!
bool LLRayAABB(const LLVector3 ¢er, const LLVector3 &size, const LLVector3& origin, const LLVector3& dir, LLVector3 &coord, F32 epsilon = 0);
bool setup_hud_matrices(); // use whole screen to render hud
bool setup_hud_matrices(const LLRect& screen_region); // specify portion of screen (in pixels) to render hud attachments from (for picking)
extern LLTrace::BlockTimerStatHandle FTM_RENDER_GEOMETRY;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_GRASS;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_INVISIBLE;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_OCCLUSION;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_SHINY;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_SIMPLE;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_TERRAIN;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_TREES;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_UI;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_WATER;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_WL_SKY;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_ALPHA;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_CHARACTERS;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_BUMP;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_MATERIALS;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_FULLBRIGHT;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_GLOW;
extern LLTrace::BlockTimerStatHandle FTM_STATESORT;
extern LLTrace::BlockTimerStatHandle FTM_PIPELINE;
extern LLTrace::BlockTimerStatHandle FTM_CLIENT_COPY;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_UI_HUD;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_UI_3D;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_UI_2D;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_UI_DEBUG_TEXT;
extern LLTrace::BlockTimerStatHandle FTM_RENDER_UI_SCENE_MON;
class LLPipeline
{
public:
LLPipeline();
~LLPipeline();
void destroyGL();
void restoreGL();
void resetVertexBuffers();
void doResetVertexBuffers(bool forced = false);
void requestResizeScreenTexture(); // set flag only, no work, safer for callbacks...
void requestResizeShadowTexture(); // set flag only, no work, safer for callbacks...
void resizeScreenTexture();
void resizeShadowTexture();
void releaseGLBuffers();
void releaseLUTBuffers();
void releaseScreenBuffers();
void releaseShadowBuffers();
void createGLBuffers();
void createLUTBuffers();
//allocate the largest screen buffer possible up to resX, resY
//returns true if full size buffer allocated, false if some other size is allocated
bool allocateScreenBuffer(U32 resX, U32 resY);
typedef enum {
FBO_SUCCESS_FULLRES = 0,
FBO_SUCCESS_LOWRES,
FBO_FAILURE
} eFBOStatus;
private:
//implementation of above, wrapped for easy error handling
eFBOStatus doAllocateScreenBuffer(U32 resX, U32 resY);
public:
//attempt to allocate screen buffers at resX, resY
//returns true if allocation successful, false otherwise
bool allocateScreenBuffer(U32 resX, U32 resY, U32 samples);
bool allocateShadowBuffer(U32 resX, U32 resY);
void allocatePhysicsBuffer();
void resetVertexBuffers(LLDrawable* drawable);
void generateImpostor(LLVOAvatar* avatar);
void bindScreenToTexture();
void renderFinalize();
void init();
void cleanup();
bool isInit() { return mInitialized; };
/// @brief Get a draw pool from pool type (POOL_SIMPLE, POOL_MEDIA) and texture.
/// @return Draw pool, or NULL if not found.
LLDrawPool *findPool(const U32 pool_type, LLViewerTexture *tex0 = NULL);
/// @brief Get a draw pool for faces of the appropriate type and texture. Create if necessary.
/// @return Always returns a draw pool.
LLDrawPool *getPool(const U32 pool_type, LLViewerTexture *tex0 = NULL);
/// @brief Figures out draw pool type from texture entry. Creates pool if necessary.
static LLDrawPool* getPoolFromTE(const LLTextureEntry* te, LLViewerTexture* te_image);
static U32 getPoolTypeFromTE(const LLTextureEntry* te, LLViewerTexture* imagep);
void addPool(LLDrawPool *poolp); // Only to be used by LLDrawPool classes for splitting pools!
void removePool( LLDrawPool* poolp );
void allocDrawable(LLViewerObject *obj);
void unlinkDrawable(LLDrawable*);
static void removeMutedAVsLights(LLVOAvatar*);
// Object related methods
void markVisible(LLDrawable *drawablep, LLCamera& camera);
void markOccluder(LLSpatialGroup* group);
//downsample source to dest, taking the maximum depth value per pixel in source and writing to dest
// if source's depth buffer cannot be bound for reading, a scratch space depth buffer must be provided
void downsampleDepthBuffer(LLRenderTarget& source, LLRenderTarget& dest, LLRenderTarget* scratch_space = NULL);
void doOcclusion(LLCamera& camera, LLRenderTarget& source, LLRenderTarget& dest, LLRenderTarget* scratch_space = NULL);
void doOcclusion(LLCamera& camera);
void markNotCulled(LLSpatialGroup* group, LLCamera &camera);
void markMoved(LLDrawable *drawablep, bool damped_motion = false);
void markShift(LLDrawable *drawablep);
void markTextured(LLDrawable *drawablep);
void markGLRebuild(LLGLUpdate* glu);
void markRebuild(LLSpatialGroup* group, bool priority = false);
void markRebuild(LLDrawable *drawablep, LLDrawable::EDrawableFlags flag = LLDrawable::REBUILD_ALL, bool priority = false);
void markPartitionMove(LLDrawable* drawablep);
void markMeshDirty(LLSpatialGroup* group);
//get the object between start and end that's closest to start.
LLViewerObject* lineSegmentIntersectInWorld(const LLVector4a& start, const LLVector4a& end,
bool pick_transparent,
bool pick_rigged,
S32* face_hit, // return the face hit
LLVector4a* intersection = NULL, // return the intersection point
LLVector2* tex_coord = NULL, // return the texture coordinates of the intersection point
LLVector4a* normal = NULL, // return the surface normal at the intersection point
LLVector4a* tangent = NULL // return the surface tangent at the intersection point
);
//get the closest particle to start between start and end, returns the LLVOPartGroup and particle index
LLVOPartGroup* lineSegmentIntersectParticle(const LLVector4a& start, const LLVector4a& end, LLVector4a* intersection,
S32* face_hit);
LLViewerObject* lineSegmentIntersectInHUD(const LLVector4a& start, const LLVector4a& end,
bool pick_transparent,
S32* face_hit, // return the face hit
LLVector4a* intersection = NULL, // return the intersection point
LLVector2* tex_coord = NULL, // return the texture coordinates of the intersection point
LLVector4a* normal = NULL, // return the surface normal at the intersection point
LLVector4a* tangent = NULL // return the surface tangent at the intersection point
);
// Something about these textures has changed. Dirty them.
void dirtyPoolObjectTextures(const std::set<LLViewerFetchedTexture*>& textures);
void resetDrawOrders();
U32 addObject(LLViewerObject *obj);
void enableShadows(const bool enable_shadows);
void releaseShadowTargets();
void releaseShadowTarget(U32 index);
// void setLocalLighting(const bool local_lighting);
// bool isLocalLightingEnabled() const;
S32 setLightingDetail(S32 level);
S32 getLightingDetail() const { return mLightingDetail; }
S32 getMaxLightingDetail() const;
void setUseVertexShaders(bool use_shaders);
bool getUseVertexShaders() const { return mVertexShadersEnabled; }
bool canUseVertexShaders();
bool canUseWindLightShaders() const;
bool canUseWindLightShadersOnObjects() const;
bool canUseAntiAliasing() const;
// phases
void resetFrameStats();
void updateMoveDampedAsync(LLDrawable* drawablep);
void updateMoveNormalAsync(LLDrawable* drawablep);
void updateMovedList(LLDrawable::drawable_vector_t& move_list);
void updateMove();
bool visibleObjectsInFrustum(LLCamera& camera);
bool getVisibleExtents(LLCamera& camera, LLVector3 &min, LLVector3& max);
bool getVisiblePointCloud(LLCamera& camera, LLVector3 &min, LLVector3& max, std::vector<LLVector3>& fp, LLVector3 light_dir = LLVector3(0,0,0));
void updateCull(LLCamera& camera, LLCullResult& result, S32 water_clip = 0, LLPlane* plane = NULL, bool hud_attachments = false); //if water_clip is 0, ignore water plane, 1, cull to above plane, -1, cull to below plane
void createObjects(F32 max_dtime);
void createObject(LLViewerObject* vobj);
void processPartitionQ();
void updateGeom(F32 max_dtime);
void updateGL();
void rebuildPriorityGroups();
void rebuildGroups();
void clearRebuildGroups();
void clearRebuildDrawables();
//calculate pixel area of given box from vantage point of given camera
static F32 calcPixelArea(LLVector3 center, LLVector3 size, LLCamera& camera);
static F32 calcPixelArea(const LLVector4a& center, const LLVector4a& size, LLCamera &camera);
void stateSort(LLCamera& camera, LLCullResult& result);
void stateSort(LLSpatialGroup* group, LLCamera& camera);
void stateSort(LLSpatialBridge* bridge, LLCamera& camera, BOOL fov_changed = FALSE);
void stateSort(LLDrawable* drawablep, LLCamera& camera);
void postSort(LLCamera& camera);
void forAllVisibleDrawables(void (*func)(LLDrawable*));
void renderObjects(U32 type, U32 mask, bool texture = true, bool batch_texture = false);
void renderMaskedObjects(U32 type, U32 mask, bool texture = true, bool batch_texture = false);
void renderFullbrightMaskedObjects(U32 type, U32 mask, bool texture = true, bool batch_texture = false);
void renderGroups(LLRenderPass* pass, U32 type, U32 mask, bool texture);
void grabReferences(LLCullResult& result);
void clearReferences();
//check references will assert that there are no references in sCullResult to the provided data
void checkReferences(LLFace* face);
void checkReferences(LLDrawable* drawable);
void checkReferences(LLDrawInfo* draw_info);
void checkReferences(LLSpatialGroup* group);
void renderGeom(LLCamera& camera, bool forceVBOUpdate = false);
void renderGeomDeferred(LLCamera& camera);
void renderGeomPostDeferred(LLCamera& camera, bool do_occlusion=true);
void renderGeomShadow(LLCamera& camera);
void bindDeferredShader(LLGLSLShader& shader, LLRenderTarget* light_target = nullptr);
void setupSpotLight(LLGLSLShader& shader, LLDrawable* drawablep);
void unbindDeferredShader(LLGLSLShader& shader);
void renderDeferredLighting(LLRenderTarget* light_target);
void postDeferredGammaCorrect(LLRenderTarget* screen_target);
void generateWaterReflection(LLCamera& camera);
void generateSunShadow(LLCamera& camera);
LLRenderTarget* getShadowTarget(U32 i);
void generateHighlight(LLCamera& camera);
void renderHighlight(const LLViewerObject* obj, F32 fade);
void setHighlightObject(LLDrawable* obj) { mHighlightObject = obj; }
void renderShadow(glh::matrix4f& view, glh::matrix4f& proj, LLCamera& camera, LLCullResult& result, bool use_shader, bool use_occlusion, U32 target_width);
void renderHighlights();
void renderDebug();
void renderPhysicsDisplay();
void rebuildPools(); // Rebuild pools
void findReferences(LLDrawable *drawablep); // Find the lists which have references to this object
bool verify(); // Verify that all data in the pipeline is "correct"
S32 getLightCount() const { return mLights.size(); }
void calcNearbyLights(LLCamera& camera);
void setupHWLights(LLDrawPool* pool);
void setupAvatarLights(bool for_edit = false);
void enableLights(U32 mask);
void enableLightsStatic();
void enableLightsDynamic();
void enableLightsAvatar();
void enableLightsPreview();
void enableLightsAvatarEdit(const LLColor4& color);
void enableLightsFullbright();
void disableLights();
void shiftObjects(const LLVector3 &offset);
void setLight(LLDrawable *drawablep, bool is_light);
bool hasRenderBatches(const U32 type) const;
LLCullResult::drawinfo_iterator beginRenderMap(U32 type);
LLCullResult::drawinfo_iterator endRenderMap(U32 type);
LLCullResult::sg_iterator beginAlphaGroups();
LLCullResult::sg_iterator endAlphaGroups();
void addTrianglesDrawn(S32 index_count, U32 render_type = LLRender::TRIANGLES);
bool hasRenderDebugFeatureMask(const U32 mask) const { return bool(mRenderDebugFeatureMask & mask); }
bool hasRenderDebugMask(const U64 mask) const { return bool(mRenderDebugMask & mask); }
void setAllRenderDebugFeatures() { mRenderDebugFeatureMask = 0xffffffff; }
void clearAllRenderDebugFeatures() { mRenderDebugFeatureMask = 0x0; }
void setAllRenderDebugDisplays() { mRenderDebugMask = 0xffffffffffffffff; }
void clearAllRenderDebugDisplays() { mRenderDebugMask = 0x0; }
bool hasRenderType(const U32 type) const;
bool hasAnyRenderType(const U32 type, ...) const;
void setRenderTypeMask(U32 type, ...);
// This is equivalent to 'setRenderTypeMask'
//void orRenderTypeMask(U32 type, ...);
void andRenderTypeMask(U32 type, ...);
void clearRenderTypeMask(U32 type, ...);
void setAllRenderTypes();
void clearAllRenderTypes();
void pushRenderTypeMask();
void popRenderTypeMask();
void pushRenderDebugFeatureMask();
void popRenderDebugFeatureMask();
static void toggleRenderType(U32 type);
// For UI control of render features
static bool hasRenderTypeControl(U32 data);
static void toggleRenderDebug(U64 data);
static void toggleRenderDebugFeature(U32 data);
static void toggleRenderTypeControl(U32 data);
static bool toggleRenderTypeControlNegated(S32 data);
static bool toggleRenderDebugControl(U64 data);
static bool toggleRenderDebugFeatureControl(U32 data);
static void setRenderDebugFeatureControl(U32 bit, bool value);
static void setRenderParticleBeacons(bool val);
static void toggleRenderParticleBeacons();
static bool getRenderParticleBeacons();
static void setRenderSoundBeacons(bool val);
static void toggleRenderSoundBeacons();
static bool getRenderSoundBeacons();
static void setRenderMOAPBeacons(bool val);
static void toggleRenderMOAPBeacons();
static bool getRenderMOAPBeacons();
static void setRenderPhysicalBeacons(bool val);
static void toggleRenderPhysicalBeacons();
static bool getRenderPhysicalBeacons();
static void setRenderScriptedBeacons(bool val);
static void toggleRenderScriptedBeacons();
static bool getRenderScriptedBeacons();
static void setRenderScriptedTouchBeacons(bool val);
static void toggleRenderScriptedTouchBeacons();
static bool getRenderScriptedTouchBeacons();
static void setRenderBeacons(bool val);
static void toggleRenderBeacons();
static bool getRenderBeacons();
static void setRenderHighlights(bool val);
static void toggleRenderHighlights();
static bool getRenderHighlights();
static void setRenderHighlightTextureChannel(LLRender::eTexIndex channel); // sets which UV setup to display in highlight overlay
static void updateRenderTransparentWater();
static void updateRenderBump();
static void updateRenderDeferred();
static void refreshCachedSettings();
void addDebugBlip(const LLVector3& position, const LLColor4& color);
void hidePermanentObjects( std::vector<U32>& restoreList );
void restorePermanentObjects( const std::vector<U32>& restoreList );
void skipRenderingOfTerrain( bool flag );
void hideObject( const LLUUID& id );
void restoreHiddenObject( const LLUUID& id );
private:
void unloadShaders();
void addToQuickLookup( LLDrawPool* new_poolp );
void removeFromQuickLookup( LLDrawPool* poolp );
bool updateDrawableGeom(LLDrawable* drawable, bool priority);
void assertInitializedDoError();
bool assertInitialized() { const bool is_init = isInit(); if (!is_init) assertInitializedDoError(); return is_init; };
void connectRefreshCachedSettingsSafe(const std::string name);
void hideDrawable( LLDrawable *pDrawable );
void unhideDrawable( LLDrawable *pDrawable );
// <FS:Ansariel> Reset VB during TP
void initDeferredVB();
// <FS:Ansariel> FIRE-16829: Visual Artifacts with ALM enabled on AMD graphics
void initAuxiliaryVB();
void drawAuxiliaryVB(U32 mask = 0);
void drawAuxiliaryVB(const LLVector2& tc1, const LLVector2& tc2, U32 mask = 0);
void drawAuxiliaryVB(const LLVector2& tc1, const LLVector2& tc2, const LLColor4& color);
public:
enum {GPU_CLASS_MAX = 3 };
enum LLRenderTypeMask
{
// Following are pool types (some are also object types)
RENDER_TYPE_SKY = LLDrawPool::POOL_SKY,
RENDER_TYPE_WL_SKY = LLDrawPool::POOL_WL_SKY,
RENDER_TYPE_GROUND = LLDrawPool::POOL_GROUND,
RENDER_TYPE_TERRAIN = LLDrawPool::POOL_TERRAIN,
RENDER_TYPE_SIMPLE = LLDrawPool::POOL_SIMPLE,
RENDER_TYPE_GRASS = LLDrawPool::POOL_GRASS,
RENDER_TYPE_ALPHA_MASK = LLDrawPool::POOL_ALPHA_MASK,
RENDER_TYPE_FULLBRIGHT_ALPHA_MASK = LLDrawPool::POOL_FULLBRIGHT_ALPHA_MASK,
RENDER_TYPE_FULLBRIGHT = LLDrawPool::POOL_FULLBRIGHT,
RENDER_TYPE_BUMP = LLDrawPool::POOL_BUMP,
RENDER_TYPE_MATERIALS = LLDrawPool::POOL_MATERIALS,
RENDER_TYPE_AVATAR = LLDrawPool::POOL_AVATAR,
RENDER_TYPE_CONTROL_AV = LLDrawPool::POOL_CONTROL_AV, // Animesh
RENDER_TYPE_TREE = LLDrawPool::POOL_TREE,
RENDER_TYPE_INVISIBLE = LLDrawPool::POOL_INVISIBLE,
RENDER_TYPE_VOIDWATER = LLDrawPool::POOL_VOIDWATER,
RENDER_TYPE_WATER = LLDrawPool::POOL_WATER,
RENDER_TYPE_ALPHA = LLDrawPool::POOL_ALPHA,
RENDER_TYPE_GLOW = LLDrawPool::POOL_GLOW,
RENDER_TYPE_PASS_SIMPLE = LLRenderPass::PASS_SIMPLE,
RENDER_TYPE_PASS_GRASS = LLRenderPass::PASS_GRASS,
RENDER_TYPE_PASS_FULLBRIGHT = LLRenderPass::PASS_FULLBRIGHT,
RENDER_TYPE_PASS_INVISIBLE = LLRenderPass::PASS_INVISIBLE,
RENDER_TYPE_PASS_INVISI_SHINY = LLRenderPass::PASS_INVISI_SHINY,
RENDER_TYPE_PASS_FULLBRIGHT_SHINY = LLRenderPass::PASS_FULLBRIGHT_SHINY,
RENDER_TYPE_PASS_SHINY = LLRenderPass::PASS_SHINY,
RENDER_TYPE_PASS_BUMP = LLRenderPass::PASS_BUMP,
RENDER_TYPE_PASS_POST_BUMP = LLRenderPass::PASS_POST_BUMP,
RENDER_TYPE_PASS_GLOW = LLRenderPass::PASS_GLOW,
RENDER_TYPE_PASS_ALPHA = LLRenderPass::PASS_ALPHA,
RENDER_TYPE_PASS_ALPHA_MASK = LLRenderPass::PASS_ALPHA_MASK,
RENDER_TYPE_PASS_FULLBRIGHT_ALPHA_MASK = LLRenderPass::PASS_FULLBRIGHT_ALPHA_MASK,
RENDER_TYPE_PASS_MATERIAL = LLRenderPass::PASS_MATERIAL,
RENDER_TYPE_PASS_MATERIAL_ALPHA = LLRenderPass::PASS_MATERIAL_ALPHA,
RENDER_TYPE_PASS_MATERIAL_ALPHA_MASK = LLRenderPass::PASS_MATERIAL_ALPHA_MASK,
RENDER_TYPE_PASS_MATERIAL_ALPHA_EMISSIVE= LLRenderPass::PASS_MATERIAL_ALPHA_EMISSIVE,
RENDER_TYPE_PASS_SPECMAP = LLRenderPass::PASS_SPECMAP,
RENDER_TYPE_PASS_SPECMAP_BLEND = LLRenderPass::PASS_SPECMAP_BLEND,
RENDER_TYPE_PASS_SPECMAP_MASK = LLRenderPass::PASS_SPECMAP_MASK,
RENDER_TYPE_PASS_SPECMAP_EMISSIVE = LLRenderPass::PASS_SPECMAP_EMISSIVE,
RENDER_TYPE_PASS_NORMMAP = LLRenderPass::PASS_NORMMAP,
RENDER_TYPE_PASS_NORMMAP_BLEND = LLRenderPass::PASS_NORMMAP_BLEND,
RENDER_TYPE_PASS_NORMMAP_MASK = LLRenderPass::PASS_NORMMAP_MASK,
RENDER_TYPE_PASS_NORMMAP_EMISSIVE = LLRenderPass::PASS_NORMMAP_EMISSIVE,
RENDER_TYPE_PASS_NORMSPEC = LLRenderPass::PASS_NORMSPEC,
RENDER_TYPE_PASS_NORMSPEC_BLEND = LLRenderPass::PASS_NORMSPEC_BLEND,
RENDER_TYPE_PASS_NORMSPEC_MASK = LLRenderPass::PASS_NORMSPEC_MASK,
RENDER_TYPE_PASS_NORMSPEC_EMISSIVE = LLRenderPass::PASS_NORMSPEC_EMISSIVE,
// Following are object types (only used in drawable mRenderType)
RENDER_TYPE_HUD = LLRenderPass::NUM_RENDER_TYPES,
RENDER_TYPE_VOLUME,
RENDER_TYPE_PARTICLES,
RENDER_TYPE_CLOUDS,
RENDER_TYPE_HUD_PARTICLES,
NUM_RENDER_TYPES,
END_RENDER_TYPES = NUM_RENDER_TYPES
};
enum LLRenderDebugFeatureMask
{
RENDER_DEBUG_FEATURE_UI = 0x0001,
RENDER_DEBUG_FEATURE_SELECTED = 0x0002,
RENDER_DEBUG_FEATURE_HIGHLIGHTED = 0x0004,
RENDER_DEBUG_FEATURE_DYNAMIC_TEXTURES = 0x0008,
// RENDER_DEBUG_FEATURE_HW_LIGHTING = 0x0010,
RENDER_DEBUG_FEATURE_FLEXIBLE = 0x0010,
RENDER_DEBUG_FEATURE_FOG = 0x0020,
RENDER_DEBUG_FEATURE_FR_INFO = 0x0080,
RENDER_DEBUG_FEATURE_FOOT_SHADOWS = 0x0100,
};
enum LLRenderDebugMask: U64
{
RENDER_DEBUG_COMPOSITION = 0x00000001,
RENDER_DEBUG_VERIFY = 0x00000002,
RENDER_DEBUG_BBOXES = 0x00000004,
RENDER_DEBUG_OCTREE = 0x00000008,
RENDER_DEBUG_WIND_VECTORS = 0x00000010,
RENDER_DEBUG_OCCLUSION = 0x00000020,
RENDER_DEBUG_POINTS = 0x00000040,
RENDER_DEBUG_TEXTURE_PRIORITY = 0x00000080,
RENDER_DEBUG_TEXTURE_AREA = 0x00000100,
RENDER_DEBUG_FACE_AREA = 0x00000200,
RENDER_DEBUG_PARTICLES = 0x00000400,
RENDER_DEBUG_GLOW = 0x00000800, // not used
RENDER_DEBUG_TEXTURE_ANIM = 0x00001000,
RENDER_DEBUG_LIGHTS = 0x00002000,
RENDER_DEBUG_BATCH_SIZE = 0x00004000,
RENDER_DEBUG_ALPHA_BINS = 0x00008000, // not used
RENDER_DEBUG_RAYCAST = 0x00010000,
RENDER_DEBUG_AVATAR_DRAW_INFO = 0x00020000,
RENDER_DEBUG_SHADOW_FRUSTA = 0x00040000,
RENDER_DEBUG_SCULPTED = 0x00080000,
RENDER_DEBUG_AVATAR_VOLUME = 0x00100000,
RENDER_DEBUG_AVATAR_JOINTS = 0x00200000,
RENDER_DEBUG_BUILD_QUEUE = 0x00400000,
RENDER_DEBUG_AGENT_TARGET = 0x00800000,
RENDER_DEBUG_UPDATE_TYPE = 0x01000000,
RENDER_DEBUG_PHYSICS_SHAPES = 0x02000000,
RENDER_DEBUG_NORMALS = 0x04000000,
RENDER_DEBUG_LOD_INFO = 0x08000000,
RENDER_DEBUG_RENDER_COMPLEXITY = 0x10000000,
RENDER_DEBUG_ATTACHMENT_BYTES = 0x20000000, // not used
RENDER_DEBUG_TEXEL_DENSITY = 0x40000000,
RENDER_DEBUG_TRIANGLE_COUNT = 0x80000000,
RENDER_DEBUG_IMPOSTORS = 0x100000000,
RENDER_DEBUG_TEXTURE_SIZE = 0x200000000
};
public:
LLSpatialPartition* getSpatialPartition(LLViewerObject* vobj);
void updateCamera(bool reset = false);
LLVector3 mFlyCamPosition;
LLQuaternion mFlyCamRotation;
bool mBackfaceCull;
S32 mMatrixOpCount;
S32 mTextureMatrixOps;
S32 mNumVisibleNodes;
S32 mDebugTextureUploadCost;
S32 mDebugSculptUploadCost;
S32 mDebugMeshUploadCost;
S32 mNumVisibleFaces;
static S32 sCompiles;
static bool sShowHUDAttachments;
static bool sForceOldBakedUpload; // If true will not use capabilities to upload baked textures.
static S32 sUseOcclusion; // 0 = no occlusion, 1 = read only, 2 = read/write
static bool sDelayVBUpdate;
static bool sAutoMaskAlphaDeferred;
static bool sAutoMaskAlphaNonDeferred;
static bool sDisableShaders; // if true, rendering will be done without shaders
static bool sRenderTransparentWater;
static bool sRenderBump;
static bool sBakeSunlight;
static bool sNoAlpha;
static bool sUseTriStrips;
static bool sUseFarClip;
static bool sShadowRender;
static bool sWaterReflections;
static bool sDynamicLOD;
static bool sPickAvatar;
static bool sReflectionRender;
static bool sDistortionRender;
static bool sImpostorRender;
static bool sImpostorRenderAlphaDepthPass;
static bool sUnderWaterRender;
static bool sRenderGlow;
static bool sTextureBindTest;
static bool sRenderFrameTest;
static bool sRenderAttachedLights;
static bool sRenderAttachedParticles;
static bool sRenderDeferred;
static S32 sVisibleLightCount;
static F32 sMinRenderSize;
static bool sRenderingHUDs;
static F32 sDistortionWaterClipPlaneMargin;
static F32 sVolumeSAFrame;
static bool sRenderParticles; // <FS:LO> flag to hold correct, user selected, status of particles
// [SL:KB] - Patch: Render-TextureToggle (Catznip-4.0)
static bool sRenderTextures;
// [/SL:KB]
// [RLVa:KB] - @setsphere
static bool sUseDepthTexture;
// [/RLVa:KB]
static LLTrace::EventStatHandle<S64> sStatBatchSize;
//screen texture
U32 mScreenWidth;
U32 mScreenHeight;
LLRenderTarget mScreen;
LLRenderTarget mUIScreen;
LLRenderTarget mDeferredScreen;
LLRenderTarget mFXAABuffer;
LLRenderTarget mEdgeMap;
LLRenderTarget mDeferredDepth;
LLRenderTarget mOcclusionDepth;
LLRenderTarget mDeferredLight;
LLRenderTarget mHighlight;
LLRenderTarget mPhysicsDisplay;
LLCullResult mSky;
LLCullResult mReflectedObjects;
LLCullResult mRefractedObjects;
//utility buffer for rendering post effects, gets abused by renderDeferredLighting
LLPointer<LLVertexBuffer> mDeferredVB;
//utility buffer for rendering cubes, 8 vertices are corners of a cube [-1, 1]
LLPointer<LLVertexBuffer> mCubeVB;
// <FS:Ansariel> FIRE-16829: Visual Artifacts with ALM enabled on AMD graphics
LLPointer<LLVertexBuffer> mAuxiliaryVB;
//sun shadow map
LLRenderTarget mShadow[6];
LLRenderTarget mShadowOcclusion[6];
std::vector<LLVector3> mShadowFrustPoints[4];
LLVector4 mShadowError;
LLVector4 mShadowFOV;
LLVector3 mShadowFrustOrigin[4];
LLCamera mShadowCamera[8];
LLVector3 mShadowExtents[4][2];
glh::matrix4f mSunShadowMatrix[6];
glh::matrix4f mShadowModelview[6];
glh::matrix4f mShadowProjection[6];
glh::matrix4f mReflectionModelView;
LLPointer<LLDrawable> mShadowSpotLight[2];
F32 mSpotLightFade[2];
LLPointer<LLDrawable> mTargetShadowSpotLight[2];
LLVector4 mSunClipPlanes;
LLVector4 mSunOrthoClipPlanes;
LLVector2 mScreenScale;
//water reflection texture
LLRenderTarget mWaterRef;
LLRenderTarget mWaterDeferredScreen;
LLRenderTarget mWaterDeferredDepth;
LLRenderTarget mWaterOcclusionDepth;
LLRenderTarget mWaterDeferredLight;
//water distortion texture (refraction)
LLRenderTarget mWaterDis;
LLRenderTarget mBake;
//texture for making the glow
LLRenderTarget mGlow[3];
// texture for SH indirect sky contribution
LLRenderTarget mSkySH;
//noise map
U32 mNoiseMap;
U32 mTrueNoiseMap;
U32 mLightFunc;
LLColor4 mSunDiffuse;
LLColor4 mMoonDiffuse;
LLVector4 mSunDir;
LLVector4 mMoonDir;
bool mNeedsShadowTargetClear;
LLVector4 mTransformedSunDir;
LLVector4 mTransformedMoonDir;
bool mInitialized;
bool mVertexShadersEnabled;
S32 mVertexShadersLoaded; // 0 = no, 1 = yes, -1 = failed
U32 mTransformFeedbackPrimitives; //number of primitives expected to be generated by transform feedback
protected:
bool mRenderTypeEnabled[NUM_RENDER_TYPES];
std::stack<std::string> mRenderTypeEnableStack;
U32 mRenderDebugFeatureMask;
U64 mRenderDebugMask;
U64 mOldRenderDebugMask;
std::stack<U32> mRenderDebugFeatureStack;
/////////////////////////////////////////////
//
//
LLDrawable::drawable_vector_t mMovedList;
LLDrawable::drawable_vector_t mMovedBridge;
LLDrawable::drawable_vector_t mShiftList;
/////////////////////////////////////////////
//
//
struct Light
{
Light(LLDrawable* ptr, F32 d, F32 f = 0.0f)
: drawable(ptr),
dist(d),
fade(f)
{}
LLPointer<LLDrawable> drawable;
F32 dist;
F32 fade;
struct compare
{
bool operator()(const Light& a, const Light& b) const
{
if ( a.dist < b.dist )
return true;
else if ( a.dist > b.dist )
return false;
else
return a.drawable < b.drawable;
}
};
};
typedef std::set< Light, Light::compare > light_set_t;
LLDrawable::drawable_set_t mLights;
light_set_t mNearbyLights; // lights near camera
LLColor4 mHWLightColors[8];
/////////////////////////////////////////////
//
// Different queues of drawables being processed.
//
LLDrawable::drawable_list_t mBuildQ1; // priority
LLDrawable::drawable_list_t mBuildQ2; // non-priority
LLSpatialGroup::sg_vector_t mGroupQ1; //priority
LLSpatialGroup::sg_vector_t mGroupQ2; // non-priority
LLSpatialGroup::sg_vector_t mGroupSaveQ1; // a place to save mGroupQ1 until it is safe to unref
LLSpatialGroup::sg_vector_t mMeshDirtyGroup; //groups that need rebuildMesh called
U32 mMeshDirtyQueryObject;
// <FS:ND> A vector is much better suited for the use case of mPartitionQ
// LLDrawable::drawable_list_t mPartitionQ; //drawables that need to update their spatial partition radius
LLDrawable::drawable_vector_t mPartitionQ; //drawables that need to update their spatial partition radius
// </FS:ND>
bool mGroupQ2Locked;
bool mGroupQ1Locked;
bool mResetVertexBuffers; //if true, clear vertex buffers on next update
LLViewerObject::vobj_list_t mCreateQ;
LLDrawable::drawable_set_t mRetexturedList;
class HighlightItem
{
public:
const LLPointer<LLDrawable> mItem;
mutable F32 mFade;
HighlightItem(LLDrawable* item)
: mItem(item), mFade(0)
{
}
bool operator<(const HighlightItem& rhs) const
{
return mItem < rhs.mItem;
}
bool operator==(const HighlightItem& rhs) const
{
return mItem == rhs.mItem;
}
void incrFade(F32 val) const
{
mFade = llclamp(mFade+val, 0.f, 1.f);
}
};
std::set<HighlightItem> mHighlightSet;
LLPointer<LLDrawable> mHighlightObject;
//////////////////////////////////////////////////
//
// Draw pools are responsible for storing all rendered data,
// and performing the actual rendering of objects.
//
struct compare_pools
{
bool operator()(const LLDrawPool* a, const LLDrawPool* b) const
{
if (!a)
return true;
else if (!b)
return false;
else
{
S32 atype = a->getType();
S32 btype = b->getType();
if (atype < btype)
return true;
else if (atype > btype)
return false;
else
return a->getId() < b->getId();
}
}
};
typedef std::set<LLDrawPool*, compare_pools > pool_set_t;
pool_set_t mPools;
LLDrawPool* mLastRebuildPool;
// For quick-lookups into mPools (mapped by texture pointer)
std::map<uintptr_t, LLDrawPool*> mTerrainPools;
std::map<uintptr_t, LLDrawPool*> mTreePools;
LLDrawPoolAlpha* mAlphaPool;
LLDrawPool* mSkyPool;
LLDrawPool* mTerrainPool;
LLDrawPool* mWaterPool;
LLDrawPool* mGroundPool;
LLRenderPass* mSimplePool;
LLRenderPass* mGrassPool;
LLRenderPass* mAlphaMaskPool;
LLRenderPass* mFullbrightAlphaMaskPool;
LLRenderPass* mFullbrightPool;
LLDrawPool* mInvisiblePool;
LLDrawPool* mGlowPool;
LLDrawPool* mBumpPool;
LLDrawPool* mMaterialsPool;
LLDrawPool* mWLSkyPool;
// Note: no need to keep an quick-lookup to avatar pools, since there's only one per avatar
public:
std::vector<LLFace*> mHighlightFaces; // highlight faces on physical objects
protected:
std::vector<LLFace*> mSelectedFaces;
class DebugBlip
{
public:
LLColor4 mColor;
LLVector3 mPosition;
F32 mAge;
DebugBlip(const LLVector3& position, const LLColor4& color)
: mColor(color), mPosition(position), mAge(0.f)
{ }
};
std::list<DebugBlip> mDebugBlips;
LLPointer<LLViewerFetchedTexture> mFaceSelectImagep;
U32 mLightMask;
U32 mLightMovingMask;
S32 mLightingDetail;
static bool sRenderPhysicalBeacons;
static bool sRenderMOAPBeacons;
static bool sRenderScriptedTouchBeacons;
static bool sRenderScriptedBeacons;
static bool sRenderParticleBeacons;
static bool sRenderSoundBeacons;
public:
static bool sRenderBeacons;
static bool sRenderHighlight;
// Determines which set of UVs to use in highlight display
//
static LLRender::eTexIndex sRenderHighlightTextureChannel;
//debug use
static U32 sCurRenderPoolType ;
//cached settings
static bool WindLightUseAtmosShaders;
static bool RenderAvatarVP;
static bool RenderDeferred;
static F32 RenderDeferredSunWash;
static U32 RenderFSAASamples;
static U32 RenderResolutionDivisor;
// [SL:KB] - Patch: Settings-RenderResolutionMultiplier | Checked: Catznip-5.4
static F32 RenderResolutionMultiplier;
// [/SL:KB]
static bool RenderUIBuffer;
static S32 RenderShadowDetail;
static bool RenderDeferredSSAO;
static F32 RenderShadowResolutionScale;
static bool RenderLocalLights;
static bool RenderDelayCreation;
// static bool RenderAnimateRes; <FS:Beq> FIRE-23122 BUG-225920 Remove broken RenderAnimateRes functionality.
static bool FreezeTime;
static S32 DebugBeaconLineWidth;
static F32 RenderHighlightBrightness;
static LLColor4 RenderHighlightColor;
static F32 RenderHighlightThickness;
static bool RenderSpotLightsInNondeferred;
static LLColor4 PreviewAmbientColor;
static LLColor4 PreviewDiffuse0;
static LLColor4 PreviewSpecular0;
static LLColor4 PreviewDiffuse1;
static LLColor4 PreviewSpecular1;
static LLColor4 PreviewDiffuse2;
static LLColor4 PreviewSpecular2;
static LLVector3 PreviewDirection0;
static LLVector3 PreviewDirection1;
static LLVector3 PreviewDirection2;
static F32 RenderGlowMinLuminance;
static F32 RenderGlowMaxExtractAlpha;
static F32 RenderGlowWarmthAmount;
static LLVector3 RenderGlowLumWeights;
static LLVector3 RenderGlowWarmthWeights;
static S32 RenderGlowResolutionPow;
static S32 RenderGlowIterations;
static F32 RenderGlowWidth;
static F32 RenderGlowStrength;
static bool RenderDepthOfField;
static bool RenderDepthOfFieldInEditMode;
//<FS:TS> FIRE-16251: Depth of Field does not work underwater
static bool FSRenderDepthOfFieldUnderwater;
//</FS:TS> FIRE-16251
// <FS:Beq> FIRE-16728
static bool FSFocusPointLocked;
static bool FSFocusPointFollowsPointer;
// </FS:Beq>
static F32 CameraFocusTransitionTime;
static F32 CameraFNumber;
static F32 CameraFocalLength;
static F32 CameraFieldOfView;
static F32 RenderShadowNoise;
static F32 RenderShadowBlurSize;
static F32 RenderSSAOScale;
static U32 RenderSSAOMaxScale;
static F32 RenderSSAOFactor;
static LLVector3 RenderSSAOEffect;
static F32 RenderShadowOffsetError;
static F32 RenderShadowBiasError;
static F32 RenderShadowOffset;
static F32 RenderShadowBias;
static F32 RenderSpotShadowOffset;
static F32 RenderSpotShadowBias;
static LLDrawable* RenderSpotLight;
static F32 RenderEdgeDepthCutoff;
static F32 RenderEdgeNormCutoff;
static LLVector3 RenderShadowGaussian;
static F32 RenderShadowBlurDistFactor;
static bool RenderDeferredAtmospheric;
static S32 RenderReflectionDetail;
static F32 RenderHighlightFadeTime;
static LLVector3 RenderShadowClipPlanes;
static LLVector3 RenderShadowOrthoClipPlanes;
static LLVector3 RenderShadowNearDist;
static F32 RenderFarClip;
static LLVector3 RenderShadowSplitExponent;
static F32 RenderShadowErrorCutoff;
static F32 RenderShadowFOVCutoff;
static bool CameraOffset;
static F32 CameraMaxCoF;
static F32 CameraDoFResScale;
static F32 RenderAutoHideSurfaceAreaLimit;
};
void render_bbox(const LLVector3 &min, const LLVector3 &max);
void render_hud_elements();
extern LLPipeline gPipeline;
extern bool gDebugPipeline;
extern const LLMatrix4* gGLLastMatrix;
#endif
| 36.30754 | 221 | 0.763758 | [
"render",
"object",
"vector",
"transform"
] |
d9d3f2b852d0558bee38369cd4c4f7b2f49e0808 | 1,777 | h | C | frameworks/render/libs/util/dmzRenderPickUtil.h | tongli/dmz | f2242027a17ea804259f9412b07d69f719a527c5 | [
"MIT"
] | 1 | 2016-05-08T22:02:35.000Z | 2016-05-08T22:02:35.000Z | frameworks/render/libs/util/dmzRenderPickUtil.h | ashok/dmz | 2f8d4bced646f25abf2e98bdc0d378dafb4b32ed | [
"MIT"
] | null | null | null | frameworks/render/libs/util/dmzRenderPickUtil.h | ashok/dmz | 2f8d4bced646f25abf2e98bdc0d378dafb4b32ed | [
"MIT"
] | null | null | null | #ifndef DMZ_RENDER_PICK_2D_UTIL_DOT_H
#define DMZ_RENDER_PICK_2D_UTIL_DOT_H
#include <dmzRenderUtilExport.h>
#include <dmzRenderPick.h>
namespace dmz {
class DMZ_RENDER_UTIL_LINK_SYMBOL RenderPickUtil : public RenderPick {
public:
RenderModulePick *get_render_module_pick ();
// RenderPick Interface.
virtual void store_render_module_pick (
const String &Name,
RenderModulePick &module);
virtual void remove_render_module_pick (
const String &Name,
RenderModulePick &module);
virtual Boolean screen_to_world (
const Int32 ScreenPosX,
const Int32 ScreenPosY,
Vector &worldPosition,
Handle &objectHandle);
virtual Boolean world_to_screen (
const Vector &WorldPosition,
Int32 &screenPosX,
Int32 &screenPosY);
virtual Boolean source_to_world (
const Int32 SourcePosX,
const Int32 SourcePosY,
Vector &worldPosition,
Handle &objectHandle);
virtual Boolean world_to_source (
const Vector &WorldPosition,
Int32 &sourcePosX,
Int32 &sourcePosY);
protected:
RenderPickUtil (const PluginInfo &Info, const Config &Init);
~RenderPickUtil ();
virtual void _store_render_module_pick (RenderModulePick &pickMod) {;}
virtual void _remove_render_module_pick (RenderModulePick &pickMod) {;}
private:
struct State;
RenderPickUtil ();
RenderPickUtil (const RenderPickUtil &);
RenderPickUtil &operator= (const RenderPickUtil &);
State &__state;
};
};
#endif // DMZ_RENDER_PICK_2D_UTIL_DOT_H
| 27.765625 | 80 | 0.630276 | [
"vector"
] |
d9d590a79b946191e2314be22d132987e8a1213b | 619 | h | C | rushhour/rushhour_results_stats.h | nwbruce/rushhour | d1d378f1c68a1fea3cb096c67301d7d882d79fcf | [
"Apache-2.0"
] | null | null | null | rushhour/rushhour_results_stats.h | nwbruce/rushhour | d1d378f1c68a1fea3cb096c67301d7d882d79fcf | [
"Apache-2.0"
] | null | null | null | rushhour/rushhour_results_stats.h | nwbruce/rushhour | d1d378f1c68a1fea3cb096c67301d7d882d79fcf | [
"Apache-2.0"
] | null | null | null | #ifndef RUSHHOUR_RUSHHOUR_RESULTS_STATS_H_
#define RUSHHOUR_RUSHHOUR_RESULTS_STATS_H_
#include <cstdint>
#include <vector>
namespace rushhour {
namespace results {
class Stats {
public:
Stats();
Stats& operator<<(double value);
std::size_t count() const;
double average() const;
double min() const;
double max() const;
double stdev() const;
double percentile(double k) const;
private:
double sum_;
double min_;
double max_;
mutable std::vector<double> data_;
mutable bool data_unsorted_;
};
} // namespace results
} // namespace rushhour
#endif // RUSHHOUR_RUSHHOUR_RESULTS_STATS_H_
| 18.205882 | 45 | 0.735057 | [
"vector"
] |
d9dd67adfe86d40752b13302f0a31f7a71844b1c | 55,786 | c | C | code/src/sound/orxSound.c | iar-wain/orx-test | 55c8a4008375da2f28e692666255c4dbb3727edf | [
"Zlib"
] | null | null | null | code/src/sound/orxSound.c | iar-wain/orx-test | 55c8a4008375da2f28e692666255c4dbb3727edf | [
"Zlib"
] | null | null | null | code/src/sound/orxSound.c | iar-wain/orx-test | 55c8a4008375da2f28e692666255c4dbb3727edf | [
"Zlib"
] | null | null | null | /* Orx - Portable Game Engine
*
* Copyright (c) 2008-2015 Orx-Project
*
* This software is provided 'as-is', without any express or implied
* warranty. In no event will the authors be held liable for any damages
* arising from the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software. If you use this software
* in a product, an acknowledgment in the product documentation would be
* appreciated but is not required.
*
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
*
* 3. This notice may not be removed or altered from any source
* distribution.
*/
/**
* @file orxSound.c
* @date 13/07/2008
* @author iarwain@orx-project.org
*
*/
#include "sound/orxSound.h"
#include "debug/orxDebug.h"
#include "debug/orxProfiler.h"
#include "core/orxConfig.h"
#include "core/orxClock.h"
#include "core/orxEvent.h"
#include "core/orxResource.h"
#include "memory/orxMemory.h"
#include "memory/orxBank.h"
#include "object/orxObject.h"
#include "object/orxStructure.h"
#include "utils/orxHashTable.h"
#include "utils/orxString.h"
/** Module flags
*/
#define orxSOUND_KU32_STATIC_FLAG_NONE 0x00000000 /**< No flags */
#define orxSOUND_KU32_STATIC_FLAG_READY 0x00000001 /**< Ready flag */
#define orxSOUND_KU32_STATIC_MASK_ALL 0xFFFFFFFF /**< All mask */
/** Flags
*/
#define orxSOUND_KU32_FLAG_NONE 0x00000000 /**< No flags */
#define orxSOUND_KU32_FLAG_HAS_SAMPLE 0x00000001 /**< Has referenced sample flag */
#define orxSOUND_KU32_FLAG_HAS_STREAM 0x00000002 /**< Has referenced stream flag */
#define orxSOUND_KU32_FLAG_BACKUP_PLAY 0x10000000 /**< Backup play flag */
#define orxSOUND_KU32_FLAG_BACKUP_PAUSE 0x20000000 /**< Backup pause flag */
#define orxSOUND_KU32_MASK_BACKUP_ALL 0x30000000 /**< Backup all mask */
#define orxSOUND_KU32_MASK_ALL 0xFFFFFFFF /**< All mask */
/** Misc defines
*/
#define orxSOUND_KU32_SAMPLE_BANK_SIZE 32
#define orxSOUND_KU32_BANK_SIZE 64 /**< Bank size */
#define orxSOUND_KZ_STREAM_DEFAULT_CHANNEL_NUMBER 1
#define orxSOUND_KZ_STREAM_DEFAULT_SAMPLE_RATE 44100
#define orxSOUND_KZ_CONFIG_SOUND "Sound"
#define orxSOUND_KZ_CONFIG_MUSIC "Music"
#define orxSOUND_KZ_CONFIG_LOOP "Loop"
#define orxSOUND_KZ_CONFIG_PITCH "Pitch"
#define orxSOUND_KZ_CONFIG_VOLUME "Volume"
#define orxSOUND_KZ_CONFIG_EMPTY_STREAM "empty"
#define orxSOUND_KZ_CONFIG_REFERENCE_DISTANCE "RefDistance"
#define orxSOUND_KZ_CONFIG_ATTENUATION "Attenuation"
#define orxSOUND_KZ_CONFIG_KEEP_IN_CACHE "KeepInCache"
/***************************************************************************
* Structure declaration *
***************************************************************************/
/** Sound sample structure
*/
typedef struct __orxSOUND_SAMPLE_t
{
orxSOUNDSYSTEM_SAMPLE *pstData; /**< Sound data : 4 */
orxU32 u32ID; /**< Sample ID : 8 */
orxU32 u32Counter; /**< Reference counter : 12 */
orxBOOL bInternal; /**< Internal : 16 */
} orxSOUND_SAMPLE;
/** Sound structure
*/
struct __orxSOUND_t
{
orxSTRUCTURE stStructure; /**< Public structure, first structure member : 32 */
const orxSTRING zReference; /**< Sound reference : 20 */
orxSOUNDSYSTEM_SOUND *pstData; /**< Sound data : 24 */
orxSOUND_SAMPLE *pstSample; /**< Sound sample : 28 */
orxSOUND_STATUS eStatus; /**< Sound status : 32 */
orxFLOAT fPitch; /**< Sound pitch : 36 */
};
/** Static structure
*/
typedef struct __orxSOUND_STATIC_t
{
orxHASHTABLE *pstSampleTable; /**< Sample hash table */
orxBANK *pstSampleBank; /**< Sample bank */
orxU32 u32Flags; /**< Control flags */
} orxSOUND_STATIC;
/***************************************************************************
* Static variables *
***************************************************************************/
/** static data
*/
static orxSOUND_STATIC sstSound;
/***************************************************************************
* Private functions *
***************************************************************************/
/** Loads a sound sample
* @return orxSOUND_SAMPLE / orxNULL
*/
static orxINLINE orxSOUND_SAMPLE *orxSound_LoadSample(const orxSTRING _zFileName, orxBOOL _bKeepInCache)
{
orxSOUND_SAMPLE *pstResult;
orxU32 u32ID;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
/* Gets its ID */
u32ID = orxString_GetID(_zFileName);
/* Looks for sample */
pstResult = (orxSOUND_SAMPLE *)orxHashTable_Get(sstSound.pstSampleTable, u32ID);
/* Found? */
if(pstResult != orxNULL)
{
/* Increases its reference counter */
pstResult->u32Counter++;
}
else
{
/* Allocates a sample */
pstResult = (orxSOUND_SAMPLE *)orxBank_Allocate(sstSound.pstSampleBank);
/* Valid? */
if(pstResult != orxNULL)
{
/* Loads its data */
pstResult->pstData = orxSoundSystem_LoadSample(_zFileName);
/* Adds it to sample table */
if((pstResult->pstData != orxNULL)
&& (orxHashTable_Add(sstSound.pstSampleTable, u32ID, pstResult) != orxSTATUS_FAILURE))
{
/* Inits its reference counter */
pstResult->u32Counter = (_bKeepInCache != orxFALSE) ? 1 : 0;
/* Stores its ID */
pstResult->u32ID = u32ID;
/* Updates its status */
pstResult->bInternal = orxTRUE;
}
else
{
/* Deletes it */
orxBank_Free(sstSound.pstSampleBank, pstResult);
/* Updates result */
pstResult = orxNULL;
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Failed to add sound to hashtable.");
}
}
}
/* Done! */
return pstResult;
}
/** Unloads a sound sample
*/
static orxINLINE void orxSound_UnloadSample(orxSOUND_SAMPLE *_pstSample)
{
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxASSERT(_pstSample != orxNULL);
/* Not referenced anymore? */
if(_pstSample->u32Counter == 0)
{
/* Is internal? */
if(_pstSample->bInternal != orxFALSE)
{
/* Has data? */
if(_pstSample->pstData != orxNULL)
{
/* Unloads its data */
orxSoundSystem_DeleteSample(_pstSample->pstData);
}
}
/* Removes it from sample table */
orxHashTable_Remove(sstSound.pstSampleTable, _pstSample->u32ID);
/* Deletes it */
orxBank_Free(sstSound.pstSampleBank, _pstSample);
}
else
{
/* Updates its reference counter */
_pstSample->u32Counter--;
}
/* Done! */
return;
}
/** Unloads all the sound samples
*/
static orxINLINE void orxSound_UnloadAllSample()
{
orxSOUND_SAMPLE *pstSample;
/* Gets first sample */
pstSample = (orxSOUND_SAMPLE *)orxBank_GetNext(sstSound.pstSampleBank, orxNULL);
/* Non empty? */
while(pstSample != orxNULL)
{
/* Deletes it */
orxSound_UnloadSample(pstSample);
/* Gets first sample */
pstSample = (orxSOUND_SAMPLE *)orxBank_GetNext(sstSound.pstSampleBank, orxNULL);
}
/* Done! */
return;
}
/** Processes config data
*/
static orxSTATUS orxFASTCALL orxSound_ProcessConfigData(orxSOUND *_pstSound, orxBOOL _bOnlySettings)
{
orxSTATUS eResult = orxSTATUS_FAILURE;
/* Has reference? */
if((_pstSound->zReference != orxNULL)
&& (*(_pstSound->zReference) != orxCHAR_NULL))
{
/* Pushes its config section */
orxConfig_PushSection(_pstSound->zReference);
/* Don't process only settings? */
if(_bOnlySettings == orxFALSE)
{
const orxSTRING zName;
/* Has data? */
if(_pstSound->pstData != orxNULL)
{
/* Deletes it */
orxSoundSystem_Delete(_pstSound->pstData);
_pstSound->pstData = orxNULL;
}
/* Has a referenced sample? */
if(orxStructure_TestFlags(_pstSound, orxSOUND_KU32_FLAG_HAS_SAMPLE))
{
/* Unloads it */
orxSound_UnloadSample(_pstSound->pstSample);
_pstSound->pstSample = orxNULL;
}
/* Updates flags */
orxStructure_SetFlags(_pstSound, orxSOUND_KU32_FLAG_NONE, orxSOUND_KU32_FLAG_HAS_SAMPLE | orxSOUND_KU32_FLAG_HAS_STREAM);
/* Is a sound? */
if(((zName = orxConfig_GetString(orxSOUND_KZ_CONFIG_SOUND)) != orxSTRING_EMPTY)
&& (*zName != orxCHAR_NULL))
{
/* Profiles */
orxPROFILER_PUSH_MARKER("orxSound_CreateFromConfig (Sound)");
/* Loads its corresponding sample */
_pstSound->pstSample = orxSound_LoadSample(zName, orxConfig_GetBool(orxSOUND_KZ_CONFIG_KEEP_IN_CACHE));
/* Valid? */
if(_pstSound->pstSample != orxNULL)
{
/* Creates sound data based on it */
_pstSound->pstData = orxSoundSystem_CreateFromSample(_pstSound->pstSample->pstData);
/* Valid? */
if(_pstSound->pstData != orxNULL)
{
/* Updates its status */
orxStructure_SetFlags(_pstSound, orxSOUND_KU32_FLAG_HAS_SAMPLE, orxSOUND_KU32_FLAG_NONE);
}
else
{
/* Unloads its sample */
orxSound_UnloadSample(_pstSound->pstSample);
/* Removes its reference */
_pstSound->pstSample = orxNULL;
/* Updates its status */
orxStructure_SetFlags(_pstSound, orxSOUND_KU32_FLAG_NONE, orxSOUND_KU32_FLAG_HAS_SAMPLE);
}
}
/* Profiles */
orxPROFILER_POP_MARKER();
}
/* Is a music? */
else if(((zName = orxConfig_GetString(orxSOUND_KZ_CONFIG_MUSIC)) != orxSTRING_EMPTY)
&& (*zName != orxCHAR_NULL))
{
/* Profiles */
orxPROFILER_PUSH_MARKER("orxSound_CreateFromConfig (Music)");
/* Is empty stream ? */
if(orxString_ICompare(zName, orxSOUND_KZ_CONFIG_EMPTY_STREAM) == 0)
{
/* Creates empty stream */
_pstSound->pstData = orxSoundSystem_CreateStream(orxSOUND_KZ_STREAM_DEFAULT_CHANNEL_NUMBER, orxSOUND_KZ_STREAM_DEFAULT_SAMPLE_RATE, _pstSound->zReference);
}
else
{
/* Loads it */
_pstSound->pstData = orxSoundSystem_CreateStreamFromFile(zName, _pstSound->zReference);
}
/* Valid? */
if(_pstSound->pstData != orxNULL)
{
/* Updates its status */
orxStructure_SetFlags(_pstSound, orxSOUND_KU32_FLAG_HAS_STREAM, orxSOUND_KU32_FLAG_NONE);
}
else
{
/* Updates its status */
orxStructure_SetFlags(_pstSound, orxSOUND_KU32_FLAG_NONE, orxSOUND_KU32_FLAG_HAS_STREAM);
}
/* Profiles */
orxPROFILER_POP_MARKER();
}
}
/* Valid content? */
if(_pstSound->pstData != orxNULL)
{
/* Should loop? */
if(orxConfig_GetBool(orxSOUND_KZ_CONFIG_LOOP) != orxFALSE)
{
/* Updates looping status */
orxSoundSystem_Loop(_pstSound->pstData, orxTRUE);
}
else
{
/* Updates looping status */
orxSoundSystem_Loop(_pstSound->pstData, orxFALSE);
}
/* Has volume? */
if(orxConfig_HasValue(orxSOUND_KZ_CONFIG_VOLUME) != orxFALSE)
{
/* Updates volume */
orxSoundSystem_SetVolume(_pstSound->pstData, orxConfig_GetFloat(orxSOUND_KZ_CONFIG_VOLUME));
}
else
{
/* Updates volume */
orxSoundSystem_SetVolume(_pstSound->pstData, orxFLOAT_1);
}
/* Has pitch? */
if(orxConfig_HasValue(orxSOUND_KZ_CONFIG_PITCH) != orxFALSE)
{
/* Updates pitch (updating internal shadowing for time-stretching purpose) */
orxSound_SetPitch(_pstSound, orxConfig_GetFloat(orxSOUND_KZ_CONFIG_PITCH));
}
else
{
/* Updates pitch (updating internal shadowing for time-stretching purpose) */
orxSound_SetPitch(_pstSound, orxFLOAT_1);
}
/* Has attenuation? */
if(orxConfig_HasValue(orxSOUND_KZ_CONFIG_ATTENUATION) != orxFALSE)
{
/* Updates attenuation */
orxSoundSystem_SetAttenuation(_pstSound->pstData, orxConfig_GetFloat(orxSOUND_KZ_CONFIG_ATTENUATION));
}
else
{
/* Updates attenuation */
orxSoundSystem_SetAttenuation(_pstSound->pstData, orxFLOAT_1);
}
/* Has reference distance? */
if(orxConfig_HasValue(orxSOUND_KZ_CONFIG_REFERENCE_DISTANCE) != orxFALSE)
{
/* Updates distance */
orxSoundSystem_SetReferenceDistance(_pstSound->pstData, orxConfig_GetFloat(orxSOUND_KZ_CONFIG_REFERENCE_DISTANCE));
}
else
{
/* Updates distance */
orxSoundSystem_SetReferenceDistance(_pstSound->pstData, orxFLOAT_1);
}
/* Updates result */
eResult = orxSTATUS_SUCCESS;
}
/* Pops config section */
orxConfig_PopSection();
}
/* Done! */
return eResult;
}
/** Event handler
*/
static orxSTATUS orxFASTCALL orxSound_EventHandler(const orxEVENT *_pstEvent)
{
orxSTATUS eResult = orxSTATUS_SUCCESS;
/* Add or update? */
if((_pstEvent->eID == orxRESOURCE_EVENT_ADD) || (_pstEvent->eID == orxRESOURCE_EVENT_UPDATE))
{
orxRESOURCE_EVENT_PAYLOAD *pstPayload;
/* Gets payload */
pstPayload = (orxRESOURCE_EVENT_PAYLOAD *)_pstEvent->pstPayload;
/* Is config group? */
if(pstPayload->u32GroupID == orxString_ToCRC(orxCONFIG_KZ_RESOURCE_GROUP))
{
orxSOUND *pstSound;
/* For all sounds */
for(pstSound = orxSOUND(orxStructure_GetFirst(orxSTRUCTURE_ID_SOUND));
pstSound != orxNULL;
pstSound = orxSOUND(orxStructure_GetNext(pstSound)))
{
/* Has reference? */
if((pstSound->zReference != orxNULL) && (pstSound->zReference != orxSTRING_EMPTY))
{
/* Matches? */
if(orxConfig_GetOriginID(pstSound->zReference) == pstPayload->u32NameID)
{
orxSOUND_STATUS eStatus;
/* Gets current status */
eStatus = orxSound_GetStatus(pstSound);
/* Stops sound */
orxSound_Stop(pstSound);
/* Re-processes its config data */
orxSound_ProcessConfigData(pstSound, orxFALSE);
/* Depending on previous status */
switch(eStatus)
{
case orxSOUND_STATUS_PLAY:
{
/* Updates sound */
orxSound_Play(pstSound);
break;
}
case orxSOUND_STATUS_PAUSE:
{
/* Updates sound */
orxSound_Play(pstSound);
orxSound_Pause(pstSound);
break;
}
case orxSOUND_STATUS_STOP:
default:
{
/* Updates sound */
orxSound_Stop(pstSound);
break;
}
}
}
}
}
}
/* Is sound group? */
else if(pstPayload->u32GroupID == orxString_ToCRC(orxSOUND_KZ_RESOURCE_GROUP))
{
orxHANDLE hIterator;
orxU64 u64Key;
orxSOUND_SAMPLE *pstSample;
/* Looks for matching sample */
for(hIterator = orxHashTable_GetNext(sstSound.pstSampleTable, orxHANDLE_UNDEFINED, &u64Key, (void **)&pstSample);
(hIterator != orxHANDLE_UNDEFINED) && (pstSample->u32ID != pstPayload->u32NameID);
hIterator = orxHashTable_GetNext(sstSound.pstSampleTable, hIterator, &u64Key, (void **)&pstSample));
/* Found? */
if(hIterator != orxHANDLE_UNDEFINED)
{
orxSOUND *pstSound;
orxBOOL bLoaded;
/* For all sounds */
for(pstSound = orxSOUND(orxStructure_GetFirst(orxSTRUCTURE_ID_SOUND));
pstSound != orxNULL;
pstSound = orxSOUND(orxStructure_GetNext(pstSound)))
{
/* Use concerned sample? */
if(pstSound->pstSample == pstSample)
{
orxU32 u32BackupFlags;
/* Depending on its status */
switch(orxSound_GetStatus(pstSound))
{
case orxSOUND_STATUS_PLAY:
{
/* Updates flags */
u32BackupFlags = orxSOUND_KU32_FLAG_BACKUP_PLAY;
break;
}
case orxSOUND_STATUS_PAUSE:
{
/* Updates flags */
u32BackupFlags = orxSOUND_KU32_FLAG_BACKUP_PAUSE;
break;
}
default:
case orxSOUND_STATUS_STOP:
{
/* Updates flags */
u32BackupFlags = orxSOUND_KU32_FLAG_NONE;
}
}
/* Stops it */
orxSound_Stop(pstSound);
/* Deletes its data */
orxSoundSystem_Delete(pstSound->pstData);
pstSound->pstData = orxNULL;
/* Updates flags */
orxStructure_SetFlags(pstSound, u32BackupFlags, orxSOUND_KU32_MASK_BACKUP_ALL);
}
}
/* Updates sample */
orxSoundSystem_DeleteSample(pstSample->pstData);
pstSample->pstData = orxSoundSystem_LoadSample(orxString_GetFromID(pstSample->u32ID));
/* Updates load status */
bLoaded = (pstSample->pstData != orxNULL) ? orxTRUE : orxFALSE;
/* For all sounds */
for(pstSound = orxSOUND(orxStructure_GetFirst(orxSTRUCTURE_ID_SOUND));
pstSound != orxNULL;
pstSound = orxSOUND(orxStructure_GetNext(pstSound)))
{
/* Use concerned sample? */
if(pstSound->pstSample == pstSample)
{
/* Was sample loaded? */
if(bLoaded != orxFALSE)
{
/* Recreates sound data based on sample */
pstSound->pstData = orxSoundSystem_CreateFromSample(pstSound->pstSample->pstData);
}
else
{
/* Clears data */
pstSound->pstData = orxNULL;
}
/* Success? */
if(pstSound->pstData != orxNULL)
{
/* Re-processes its config data */
orxSound_ProcessConfigData(pstSound, orxTRUE);
/* Depending on previous status */
switch(orxStructure_GetFlags(pstSound, orxSOUND_KU32_MASK_BACKUP_ALL))
{
case orxSOUND_KU32_FLAG_BACKUP_PLAY:
{
/* Plays sound */
orxSound_Play(pstSound);
break;
}
case orxSOUND_KU32_FLAG_BACKUP_PAUSE:
{
/* Pauses sound */
orxSound_Play(pstSound);
orxSound_Pause(pstSound);
break;
}
default:
{
break;
}
}
}
else
{
/* Removes its reference */
pstSound->pstSample = orxNULL;
/* Updates its status */
orxStructure_SetFlags(pstSound, orxSOUND_KU32_FLAG_NONE, orxSOUND_KU32_FLAG_HAS_SAMPLE);
}
/* Clears backup flags */
orxStructure_SetFlags(pstSound, orxSOUND_KU32_FLAG_NONE, orxSOUND_KU32_MASK_BACKUP_ALL);
}
}
/* Failed loading? */
if(bLoaded == orxFALSE)
{
/* Resets its reference counter */
pstSample->u32Counter = 0;
/* Unloads it */
orxSound_UnloadSample(pstSample);
}
}
}
}
/* Done! */
return eResult;
}
/** Deletes all the sounds
*/
static orxINLINE void orxSound_DeleteAll()
{
orxSOUND *pstSound;
/* Gets first sound */
pstSound = orxSOUND(orxStructure_GetFirst(orxSTRUCTURE_ID_SOUND));
/* Non empty? */
while(pstSound != orxNULL)
{
/* Deletes it */
orxSound_Delete(pstSound);
/* Gets first sound */
pstSound = orxSOUND(orxStructure_GetFirst(orxSTRUCTURE_ID_SOUND));
}
/* Done! */
return;
}
/** Updates the SoundPointer (Callback for generic structure update calling)
* @param[in] _pstStructure Generic Structure or the concerned Body
* @param[in] _pstCaller Structure of the caller
* @param[in] _pstClockInfo Clock info used for time updates
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
static orxSTATUS orxFASTCALL orxSound_Update(orxSTRUCTURE *_pstStructure, const orxSTRUCTURE *_pstCaller, const orxCLOCK_INFO *_pstClockInfo)
{
orxVECTOR vPosition;
orxSOUND *pstSound;
orxOBJECT *pstObject;
orxSOUND_STATUS eNewStatus;
orxSTATUS eResult = orxSTATUS_SUCCESS;
/* Profiles */
orxPROFILER_PUSH_MARKER("orxSound_Update");
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstStructure);
orxSTRUCTURE_ASSERT(_pstCaller);
orxASSERT(orxOBJECT(_pstCaller) != orxNULL);
/* Gets calling object */
pstObject = orxOBJECT(_pstCaller);
/* Gets sound */
pstSound = orxSOUND(_pstStructure);
/* Updates its position */
orxSoundSystem_SetPosition(pstSound->pstData, orxObject_GetWorldPosition(pstObject, &vPosition));
/* Has clock? */
if(_pstClockInfo != orxNULL)
{
orxFLOAT fPitchCoef;
/* Gets pitch coef */
fPitchCoef = (_pstClockInfo->eModType == orxCLOCK_MOD_TYPE_MULTIPLY) ? _pstClockInfo->fModValue : orxFLOAT_1;
/* Updates its pitch */
orxSoundSystem_SetPitch(pstSound->pstData, pstSound->fPitch * fPitchCoef);
}
/* Gets new status */
eNewStatus = orxSound_GetStatus(pstSound);
/* Changed? */
if(eNewStatus != pstSound->eStatus)
{
/* Stores new status */
pstSound->eStatus = eNewStatus;
/* Depending on status */
switch(eNewStatus)
{
orxSOUND_EVENT_PAYLOAD stPayload;
case orxSOUND_STATUS_PLAY:
{
/* Inits event payload */
orxMemory_Zero(&stPayload, sizeof(orxSOUND_EVENT_PAYLOAD));
stPayload.pstSound = pstSound;
/* Sends event */
orxEVENT_SEND(orxEVENT_TYPE_SOUND, orxSOUND_EVENT_START, pstObject, pstObject, &stPayload);
break;
}
case orxSOUND_STATUS_STOP:
{
/* Inits event payload */
orxMemory_Zero(&stPayload, sizeof(orxSOUND_EVENT_PAYLOAD));
stPayload.pstSound = pstSound;
/* Sends event */
orxEVENT_SEND(orxEVENT_TYPE_SOUND, orxSOUND_EVENT_STOP, pstObject, pstObject, &stPayload);
break;
}
default:
{
break;
}
}
}
/* Profiles */
orxPROFILER_POP_MARKER();
/* Done! */
return eResult;
}
/***************************************************************************
* Public functions *
***************************************************************************/
/** Sound module setup
*/
void orxFASTCALL orxSound_Setup()
{
/* Adds module dependencies */
orxModule_AddDependency(orxMODULE_ID_SOUND, orxMODULE_ID_MEMORY);
orxModule_AddDependency(orxMODULE_ID_SOUND, orxMODULE_ID_BANK);
orxModule_AddDependency(orxMODULE_ID_SOUND, orxMODULE_ID_STRING);
orxModule_AddDependency(orxMODULE_ID_SOUND, orxMODULE_ID_STRUCTURE);
orxModule_AddDependency(orxMODULE_ID_SOUND, orxMODULE_ID_PROFILER);
orxModule_AddDependency(orxMODULE_ID_SOUND, orxMODULE_ID_SOUNDSYSTEM);
orxModule_AddDependency(orxMODULE_ID_SOUND, orxMODULE_ID_CONFIG);
orxModule_AddDependency(orxMODULE_ID_SOUND, orxMODULE_ID_EVENT);
orxModule_AddDependency(orxMODULE_ID_SOUND, orxMODULE_ID_RESOURCE);
orxModule_AddDependency(orxMODULE_ID_SOUND, orxMODULE_ID_CLOCK);
/* Done! */
return;
}
/** Inits the sound module
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_Init()
{
orxSTATUS eResult = orxSTATUS_FAILURE;
/* Not already Initialized? */
if(!orxFLAG_TEST(sstSound.u32Flags, orxSOUND_KU32_STATIC_FLAG_READY))
{
/* Cleans control structure */
orxMemory_Zero(&sstSound, sizeof(orxSOUND_STATIC));
/* Creates sample table */
sstSound.pstSampleTable = orxHashTable_Create(orxSOUND_KU32_SAMPLE_BANK_SIZE, orxHASHTABLE_KU32_FLAG_NONE, orxMEMORY_TYPE_MAIN);
/* Valid? */
if(sstSound.pstSampleTable != orxNULL)
{
/* Creates sample bank */
sstSound.pstSampleBank = orxBank_Create(orxSOUND_KU32_SAMPLE_BANK_SIZE, sizeof(orxSOUND_SAMPLE), orxBANK_KU32_FLAG_NONE, orxMEMORY_TYPE_MAIN);
/* Valid? */
if(sstSound.pstSampleBank != orxNULL)
{
/* Registers structure type */
eResult = orxSTRUCTURE_REGISTER(SOUND, orxSTRUCTURE_STORAGE_TYPE_LINKLIST, orxMEMORY_TYPE_MAIN, orxSOUND_KU32_BANK_SIZE, &orxSound_Update);
/* Adds event handler */
orxEvent_AddHandler(orxEVENT_TYPE_RESOURCE, orxSound_EventHandler);
}
else
{
/* Deletes sample table */
orxHashTable_Delete(sstSound.pstSampleTable);
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Failed to create sample bank.");
}
}
else
{
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Failed to create reference table.");
}
}
else
{
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Tried to initialize sound module when it was already initialized.");
/* Already initialized */
eResult = orxSTATUS_SUCCESS;
}
/* Initialized? */
if(eResult != orxSTATUS_FAILURE)
{
/* Inits Flags */
orxFLAG_SET(sstSound.u32Flags, orxSOUND_KU32_STATIC_FLAG_READY, orxSOUND_KU32_STATIC_FLAG_NONE);
}
/* Done! */
return eResult;
}
/** Exits from the sound module
*/
void orxFASTCALL orxSound_Exit()
{
/* Initialized? */
if(orxFLAG_TEST(sstSound.u32Flags, orxSOUND_KU32_STATIC_FLAG_READY))
{
/* Removes event handler */
orxEvent_RemoveHandler(orxEVENT_TYPE_RESOURCE, orxSound_EventHandler);
/* Deletes all sounds */
orxSound_DeleteAll();
/* Deletes all sound samples */
orxSound_UnloadAllSample();
/* Deletes sample table */
orxHashTable_Delete(sstSound.pstSampleTable);
/* Deletes sample bank */
orxBank_Delete(sstSound.pstSampleBank);
/* Unregisters structure type */
orxStructure_Unregister(orxSTRUCTURE_ID_SOUND);
/* Updates flags */
orxFLAG_SET(sstSound.u32Flags, orxSOUND_KU32_STATIC_FLAG_NONE, orxSOUND_KU32_STATIC_MASK_ALL);
}
else
{
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Tried to exit from sound module when it wasn't initialized.");
}
/* Done! */
return;
}
/** Creates an empty sound
* @return Created orxSOUND / orxNULL
*/
orxSOUND *orxFASTCALL orxSound_Create()
{
orxSOUND *pstResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
/* Creates sound */
pstResult = orxSOUND(orxStructure_Create(orxSTRUCTURE_ID_SOUND));
/* Created? */
if(pstResult != orxNULL)
{
/* Increases counter */
orxStructure_IncreaseCounter(pstResult);
/* Clears its status */
pstResult->eStatus = orxSOUND_STATUS_NONE;
}
else
{
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Failed to create structure for sound.");
}
/* Done! */
return pstResult;
}
/** Creates a sound with an empty stream (ie. you'll need to provide actual sound data for each packet sent to the sound card using the event system)
* @param[in] _u32ChannelNumber Number of channels of the stream
* @param[in] _u32SampleRate Sampling rate of the stream (ie. number of frames per second)
* @param[in] _zName Name to associate with this sound
* @return orxSOUNDSYSTEM_SAMPLE / orxNULL
*/
orxSOUND *orxFASTCALL orxSound_CreateWithEmptyStream(orxU32 _u32ChannelNumber, orxU32 _u32SampleRate, const orxSTRING _zName)
{
orxSOUND *pstResult = orxNULL;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxASSERT(_zName != orxNULL);
/* Valid name? */
if(_zName != orxSTRING_EMPTY)
{
/* Creates sound */
pstResult = orxSound_Create();
/* Valid? */
if(pstResult != orxNULL)
{
/* Creates empty stream */
pstResult->pstData = orxSoundSystem_CreateStream(_u32ChannelNumber, _u32SampleRate, _zName);
/* Stores its reference */
pstResult->zReference = orxString_Store(_zName);
/* Updates its status */
orxStructure_SetFlags(pstResult, orxSOUND_KU32_FLAG_HAS_STREAM, orxSOUND_KU32_FLAG_NONE);
}
}
/* Done! */
return pstResult;
}
/** Creates a sound from config
* @param[in] _zConfigID Config ID
* @ return orxSOUND / orxNULL
*/
orxSOUND *orxFASTCALL orxSound_CreateFromConfig(const orxSTRING _zConfigID)
{
orxSOUND *pstResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxASSERT((_zConfigID != orxNULL) && (_zConfigID != orxSTRING_EMPTY));
/* Pushes section */
if((orxConfig_HasSection(_zConfigID) != orxFALSE)
&& (orxConfig_PushSection(_zConfigID) != orxSTATUS_FAILURE))
{
/* Creates sound */
pstResult = orxSound_Create();
/* Valid? */
if(pstResult != orxNULL)
{
/* Stores its reference */
pstResult->zReference = orxString_Store(orxConfig_GetCurrentSection());
/* Processes its config data */
if(orxSound_ProcessConfigData(pstResult, orxFALSE) == orxSTATUS_FAILURE)
{
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Can't create sound <%s>: invalid content.", _zConfigID);
/* Deletes it */
orxSound_Delete(pstResult);
/* Updates result */
pstResult = orxNULL;
}
}
else
{
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Can't create sound <%s>: can't allocate memory.", _zConfigID);
}
/* Pops previous section */
orxConfig_PopSection();
}
else
{
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Couldn't find sound section (%s) in config.", _zConfigID);
/* Updates result */
pstResult = orxNULL;
}
/* Done! */
return pstResult;
}
/** Deletes a sound
* @param[in] _pstSound Concerned Sound
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_Delete(orxSOUND *_pstSound)
{
orxSTATUS eResult = orxSTATUS_SUCCESS;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Decreases counter */
orxStructure_DecreaseCounter(_pstSound);
/* Not referenced? */
if(orxStructure_GetRefCounter(_pstSound) == 0)
{
/* Stops it */
orxSound_Stop(_pstSound);
/* Has data? */
if(_pstSound->pstData != orxNULL)
{
/* Deletes it */
orxSoundSystem_Delete(_pstSound->pstData);
}
/* Has a referenced sample? */
if(orxStructure_TestFlags(_pstSound, orxSOUND_KU32_FLAG_HAS_SAMPLE))
{
/* Unloads it */
orxSound_UnloadSample(_pstSound->pstSample);
}
/* Deletes structure */
orxStructure_Delete(_pstSound);
}
else
{
/* Referenced by others */
eResult = orxSTATUS_FAILURE;
}
/* Done! */
return eResult;
}
/** Creates a sample
* @param[in] _u32ChannelNumber Number of channels of the sample
* @param[in] _u32FrameNumber Number of frame of the sample (number of "samples" = number of frames * number of channels)
* @param[in] _u32SampleRate Sampling rate of the sample (ie. number of frames per second)
* @param[in] _zName Name to associate with the sample
* @return orxSOUNDSYSTEM_SAMPLE / orxNULL
*/
orxSOUNDSYSTEM_SAMPLE *orxFASTCALL orxSound_CreateSample(orxU32 _u32ChannelNumber, orxU32 _u32FrameNumber, orxU32 _u32SampleRate, const orxSTRING _zName)
{
orxSOUNDSYSTEM_SAMPLE *pstResult = orxNULL;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxASSERT(_zName != orxNULL);
/* Valid name? */
if(_zName != orxSTRING_EMPTY)
{
orxU32 u32ID;
/* Gets its ID */
u32ID = orxString_ToCRC(_zName);
/* Not already present? */
if(orxHashTable_Get(sstSound.pstSampleTable, u32ID) == orxNULL)
{
orxSOUNDSYSTEM_SAMPLE *pstSample;
/* Creates sample */
pstSample = orxSoundSystem_CreateSample(_u32ChannelNumber, _u32FrameNumber, _u32SampleRate);
/* Success? */
if(pstSample != orxNULL)
{
orxSOUND_SAMPLE *pstSoundSample;
/* Creates sound sample */
pstSoundSample = (orxSOUND_SAMPLE *)orxBank_Allocate(sstSound.pstSampleBank);
/* Success? */
if(pstSoundSample != orxNULL)
{
/* Inits it */
pstSoundSample->pstData = pstSample;
pstSoundSample->u32Counter = 0;
pstSoundSample->u32ID = u32ID;
pstSoundSample->bInternal = orxFALSE;
/* Stores it */
orxHashTable_Add(sstSound.pstSampleTable, u32ID, pstSoundSample);
/* Updates result */
pstResult = pstSample;
}
else
{
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Can't create sample <%s>: couldn't allocate internal structure.", _zName);
/* Deletes sample */
orxSoundSystem_DeleteSample(pstSample);
}
}
}
else
{
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Can't create sample <%s>: a sample with the same name is already present.", _zName);
}
}
/* Done! */
return pstResult;
}
/** Gets a sample
* @param[in] _zName Sample's name
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSOUNDSYSTEM_SAMPLE *orxFASTCALL orxSound_GetSample(const orxSTRING _zName)
{
orxSOUNDSYSTEM_SAMPLE *pstResult = orxNULL;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxASSERT(_zName != orxNULL);
/* Valid name? */
if(_zName != orxSTRING_EMPTY)
{
orxSOUND_SAMPLE *pstSoundSample;
orxU32 u32ID;
/* Gets its ID */
u32ID = orxString_ToCRC(_zName);
/* Gets associated sound sample from table */
pstSoundSample = (orxSOUND_SAMPLE *)orxHashTable_Get(sstSound.pstSampleTable, u32ID);
/* Success? */
if(pstSoundSample != orxNULL)
{
/* Updates result */
pstResult = pstSoundSample->pstData;
}
}
/* Done! */
return pstResult;
}
/** Deletes a sample
* @param[in] _zName Sample's name
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_DeleteSample(const orxSTRING _zName)
{
orxSTATUS eResult = orxSTATUS_FAILURE;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxASSERT(_zName != orxNULL);
/* Valid name? */
if(_zName != orxSTRING_EMPTY)
{
orxSOUND_SAMPLE *pstSoundSample;
orxU32 u32ID;
/* Gets its ID */
u32ID = orxString_ToCRC(_zName);
/* Gets associated sound sample from table */
pstSoundSample = (orxSOUND_SAMPLE *)orxHashTable_Get(sstSound.pstSampleTable, u32ID);
/* Success? */
if(pstSoundSample != orxNULL)
{
/* Not referenced anymore? */
if(pstSoundSample->u32Counter == 0)
{
/* Deletes its data */
orxSoundSystem_DeleteSample(pstSoundSample->pstData);
/* Removes it from sample table */
orxHashTable_Remove(sstSound.pstSampleTable, pstSoundSample->u32ID);
/* Deletes it */
orxBank_Free(sstSound.pstSampleBank, pstSoundSample);
/* Updates result */
eResult = orxSTATUS_SUCCESS;
}
else
{
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Can't delete sample <%s>: sample is still in use by at least a sound.", _zName);
}
}
}
/* Done! */
return eResult;
}
/** Links a sample
* @param[in] _pstSound Concerned sound
* @param[in] _zSampleName Name of the sample to link (must already be loaded/created)
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_LinkSample(orxSOUND *_pstSound, const orxSTRING _zSampleName)
{
orxSTATUS eResult = orxSTATUS_FAILURE;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
orxASSERT(_zSampleName != orxNULL);
/* Unlink previous sample if needed */
orxSound_UnlinkSample(_pstSound);
/* Has no sample now? */
if(orxStructure_TestFlags(_pstSound, orxSOUND_KU32_FLAG_HAS_SAMPLE | orxSOUND_KU32_FLAG_HAS_STREAM) == orxFALSE)
{
orxSOUND_SAMPLE *pstSoundSample;
/* Loads corresponding sample */
pstSoundSample = orxSound_LoadSample(_zSampleName, orxFALSE);
/* Found? */
if(pstSoundSample != orxNULL)
{
/* Stores it */
_pstSound->pstSample = pstSoundSample;
/* Creates sound data based on it */
_pstSound->pstData = orxSoundSystem_CreateFromSample(pstSoundSample->pstData);
/* Valid? */
if(_pstSound->pstData != orxNULL)
{
/* Stores its reference */
_pstSound->zReference = orxString_Store(_zSampleName);
/* Updates its status */
orxStructure_SetFlags(_pstSound, orxSOUND_KU32_FLAG_HAS_SAMPLE, orxSOUND_KU32_FLAG_NONE);
/* Updates result */
eResult = orxSTATUS_SUCCESS;
}
else
{
/* Unloads sound sample */
orxSound_UnloadSample(pstSoundSample);
/* Removes its reference */
_pstSound->pstSample = orxNULL;
}
}
else
{
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Can't link sample <%s> to sound: sample not found.", _zSampleName);
}
}
else
{
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Can't link sample <%s>: sound is already linked to another sample or a stream.", _zSampleName);
}
/* Done! */
return eResult;
}
/** Unlinks (and deletes if not used anymore) a sample
* @param[in] _pstSound Concerned sound
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_UnlinkSample(orxSOUND *_pstSound)
{
orxSTATUS eResult = orxSTATUS_SUCCESS;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sample? */
if(orxStructure_TestFlags(_pstSound, orxSOUND_KU32_FLAG_HAS_SAMPLE) != orxFALSE)
{
/* Stops it */
orxSound_Stop(_pstSound);
/* Removes reference */
_pstSound->zReference = orxNULL;
/* Has data? */
if(_pstSound->pstData != orxNULL)
{
/* Deletes it */
orxSoundSystem_Delete(_pstSound->pstData);
_pstSound->pstData = orxNULL;
}
/* Unloads sound sample */
orxSound_UnloadSample(_pstSound->pstSample);
_pstSound->pstSample = orxNULL;
/* Updates its status */
orxStructure_SetFlags(_pstSound, orxSOUND_KU32_FLAG_NONE, orxSOUND_KU32_FLAG_HAS_SAMPLE);
}
else
{
/* No sample found */
eResult = orxSTATUS_FAILURE;
}
/* Done! */
return eResult;
}
/** Is a stream (ie. music)?
* @param[in] _pstSound Concerned Sound
* @return orxTRUE / orxFALSE
*/
orxBOOL orxFASTCALL orxSound_IsStream(orxSOUND *_pstSound)
{
orxBOOL bResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Updates result */
bResult = orxStructure_TestFlags(_pstSound, orxSOUND_KU32_FLAG_HAS_STREAM);
/* Done! */
return bResult;
}
/** Plays sound
* @param[in] _pstSound Concerned Sound
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_Play(orxSOUND *_pstSound)
{
orxSTATUS eResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Plays it */
eResult = orxSoundSystem_Play(_pstSound->pstData);
}
else
{
/* Updates result */
eResult = orxSTATUS_FAILURE;
}
/* Done! */
return eResult;
}
/** Pauses sound
* @param[in] _pstSound Concerned Sound
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_Pause(orxSOUND *_pstSound)
{
orxSTATUS eResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Pauses it */
eResult = orxSoundSystem_Pause(_pstSound->pstData);
}
else
{
/* Updates result */
eResult = orxSTATUS_FAILURE;
}
/* Done! */
return eResult;
}
/** Stops sound
* @param[in] _pstSound Concerned Sound
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_Stop(orxSOUND *_pstSound)
{
orxSTATUS eResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Stops it */
eResult = orxSoundSystem_Stop(_pstSound->pstData);
}
else
{
/* Updates result */
eResult = orxSTATUS_FAILURE;
}
/* Done! */
return eResult;
}
/** Starts recording
* @param[in] _zName Name for the recorded sound/file
* @param[in] _bWriteToFile Should write to file?
* @param[in] _u32SampleRate Sample rate, 0 for default rate (44100Hz)
* @param[in] _u32ChannelNumber Channel number, 0 for default mono channel
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_StartRecording(const orxCHAR *_zName, orxBOOL _bWriteToFile, orxU32 _u32SampleRate, orxU32 _u32ChannelNumber)
{
orxSTATUS eResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
/* Starts recording */
eResult = orxSoundSystem_StartRecording(_zName, _bWriteToFile, _u32SampleRate, _u32ChannelNumber);
/* Done! */
return eResult;
}
/** Stops recording
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_StopRecording()
{
orxSTATUS eResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
/* Stops recording */
eResult = orxSoundSystem_StopRecording();
/* Done! */
return eResult;
}
/** Is recording possible on the current system?
* @return orxTRUE / orxFALSE
*/
orxBOOL orxFASTCALL orxSound_HasRecordingSupport()
{
orxBOOL bResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
/* Updates result */
bResult = orxSoundSystem_HasRecordingSupport();
/* Done! */
return bResult;
}
/** Sets sound volume
* @param[in] _pstSound Concerned Sound
* @param[in] _fVolume Desired volume (0.0 - 1.0)
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_SetVolume(orxSOUND *_pstSound, orxFLOAT _fVolume)
{
orxSTATUS eResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Valid? */
if(_fVolume >= orxFLOAT_0)
{
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Sets its volume */
eResult = orxSoundSystem_SetVolume(_pstSound->pstData, _fVolume);
}
else
{
/* Updates result */
eResult = orxSTATUS_FAILURE;
}
}
else
{
/* Logs message */
orxDEBUG_PRINT(orxDEBUG_LEVEL_SOUND, "Volume (%f) for sound <%s> must be >= 0.0.", _fVolume, orxSound_GetName(_pstSound));
/* Updates result */
eResult = orxSTATUS_FAILURE;
}
/* Done! */
return eResult;
}
/** Sets sound pitch
* @param[in] _pstSound Concerned Sound
* @param[in] _fPitch Desired pitch
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_SetPitch(orxSOUND *_pstSound, orxFLOAT _fPitch)
{
orxSTATUS eResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Sets its pitch */
eResult = orxSoundSystem_SetPitch(_pstSound->pstData, _fPitch);
/* Success? */
if(eResult != orxSTATUS_FAILURE)
{
/* Stores it */
_pstSound->fPitch = _fPitch;
}
}
else
{
/* Updates result */
eResult = orxSTATUS_FAILURE;
}
/* Done! */
return eResult;
}
/** Sets sound position
* @param[in] _pstSound Concerned Sound
* @param[in] _pvPosition Desired position
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_SetPosition(orxSOUND *_pstSound, const orxVECTOR *_pvPosition)
{
orxSTATUS eResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
orxASSERT(_pvPosition != orxNULL);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Sets its position */
eResult = orxSoundSystem_SetPosition(_pstSound->pstData, _pvPosition);
}
else
{
/* Updates result */
eResult = orxSTATUS_FAILURE;
}
/* Done! */
return eResult;
}
/** Sets sound attenuation
* @param[in] _pstSound Concerned Sound
* @param[in] _fAttenuation Desired attenuation
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_SetAttenuation(orxSOUND *_pstSound, orxFLOAT _fAttenuation)
{
orxSTATUS eResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Sets its position */
eResult = orxSoundSystem_SetAttenuation(_pstSound->pstData, _fAttenuation);
}
else
{
/* Updates result */
eResult = orxSTATUS_FAILURE;
}
/* Done! */
return eResult;
}
/** Sets sound reference distance
* @param[in] _pstSound Concerned Sound
* @param[in] _fDistance Within this distance, sound is perceived at its maximum volume
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_SetReferenceDistance(orxSOUND *_pstSound, orxFLOAT _fDistance)
{
orxSTATUS eResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Sets its position */
eResult = orxSoundSystem_SetReferenceDistance(_pstSound->pstData, _fDistance);
}
else
{
/* Updates result */
eResult = orxSTATUS_FAILURE;
}
/* Done! */
return eResult;
}
/** Loops sound
* @param[in] _pstSound Concerned Sound
* @param[in] _bLoop orxTRUE / orxFALSE
* @return orxSTATUS_SUCCESS / orxSTATUS_FAILURE
*/
orxSTATUS orxFASTCALL orxSound_Loop(orxSOUND *_pstSound, orxBOOL _bLoop)
{
orxSTATUS eResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Sets its looping status */
eResult = orxSoundSystem_Loop(_pstSound->pstData, _bLoop);
}
else
{
/* Updates result */
eResult = orxSTATUS_FAILURE;
}
/* Done! */
return eResult;
}
/** Gets sound volume
* @param[in] _pstSound Concerned Sound
* @return orxFLOAT
*/
orxFLOAT orxFASTCALL orxSound_GetVolume(const orxSOUND *_pstSound)
{
orxFLOAT fResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Updates result */
fResult = orxSoundSystem_GetVolume(_pstSound->pstData);
}
else
{
/* Updates result */
fResult = orxFLOAT_0;
}
/* Done! */
return fResult;
}
/** Gets sound pitch
* @param[in] _pstSound Concerned Sound
* @return orxFLOAT
*/
orxFLOAT orxFASTCALL orxSound_GetPitch(const orxSOUND *_pstSound)
{
orxFLOAT fResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Updates result */
fResult = _pstSound->fPitch;
}
else
{
/* Updates result */
fResult = orxFLOAT_0;
}
/* Done! */
return fResult;
}
/** Gets sound position
* @param[in] _pstSound Concerned Sound
* @param[out] _pvPosition Sound's position
* @return orxVECTOR / orxNULL
*/
orxVECTOR *orxFASTCALL orxSound_GetPosition(const orxSOUND *_pstSound, orxVECTOR *_pvPosition)
{
orxVECTOR *pvResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
orxASSERT(_pvPosition != orxNULL);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Updates result */
pvResult = orxSoundSystem_GetPosition(_pstSound->pstData, _pvPosition);
}
else
{
/* Updates result */
pvResult = orxNULL;
}
/* Done! */
return pvResult;
}
/** Gets sound attenuation
* @param[in] _pstSound Concerned Sound
* @return orxFLOAT
*/
orxFLOAT orxFASTCALL orxSound_GetAttenuation(const orxSOUND *_pstSound)
{
orxFLOAT fResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Updates result */
fResult = orxSoundSystem_GetAttenuation(_pstSound->pstData);
}
else
{
/* Updates result */
fResult = orxFLOAT_0;
}
/* Done! */
return fResult;
}
/** Gets sound reference distance
* @param[in] _pstSound Concerned Sound
* @return orxFLOAT
*/
orxFLOAT orxFASTCALL orxSound_GetReferenceDistance(const orxSOUND *_pstSound)
{
orxFLOAT fResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Updates result */
fResult = orxSoundSystem_GetReferenceDistance(_pstSound->pstData);
}
else
{
/* Updates result */
fResult = orxFLOAT_0;
}
/* Done! */
return fResult;
}
/** Is sound looping?
* @param[in] _pstSound Concerned Sound
* @return orxTRUE / orxFALSE
*/
orxBOOL orxFASTCALL orxSound_IsLooping(const orxSOUND *_pstSound)
{
orxBOOL bResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Updates result */
bResult = orxSoundSystem_IsLooping(_pstSound->pstData);
}
else
{
/* Updates result */
bResult = orxFALSE;
}
/* Done! */
return bResult;
}
/** Gets sound duration
* @param[in] _pstSound Concerned Sound
* @return orxFLOAT
*/
orxFLOAT orxFASTCALL orxSound_GetDuration(const orxSOUND *_pstSound)
{
orxFLOAT fResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Updates result */
fResult = orxSoundSystem_GetDuration(_pstSound->pstData);
}
else
{
/* Updates result */
fResult = orxFLOAT_0;
}
/* Done! */
return fResult;
}
/** Gets sound status
* @param[in] _pstSound Concerned Sound
* @return orxSOUND_STATUS
*/
orxSOUND_STATUS orxFASTCALL orxSound_GetStatus(const orxSOUND *_pstSound)
{
orxSOUND_STATUS eResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Has sound? */
if(_pstSound->pstData != orxNULL)
{
/* Depending on sound system status */
switch(orxSoundSystem_GetStatus(_pstSound->pstData))
{
case orxSOUNDSYSTEM_STATUS_PLAY:
{
/* Updates result */
eResult = orxSOUND_STATUS_PLAY;
break;
}
case orxSOUNDSYSTEM_STATUS_PAUSE:
{
/* Updates result */
eResult = orxSOUND_STATUS_PAUSE;
break;
}
default:
case orxSOUNDSYSTEM_STATUS_STOP:
{
/* Updates result */
eResult = orxSOUND_STATUS_STOP;
break;
}
}
}
else
{
/* Updates result */
eResult = orxSOUND_STATUS_NONE;
}
/* Done! */
return eResult;
}
/** Gets sound config name
* @param[in] _pstSound Concerned sound
* @return orxSTRING / orxSTRING_EMPTY
*/
const orxSTRING orxFASTCALL orxSound_GetName(const orxSOUND *_pstSound)
{
const orxSTRING zResult;
/* Checks */
orxASSERT(sstSound.u32Flags & orxSOUND_KU32_STATIC_FLAG_READY);
orxSTRUCTURE_ASSERT(_pstSound);
/* Updates result */
zResult = (_pstSound->zReference != orxNULL) ? _pstSound->zReference : orxSTRING_EMPTY;
/* Done! */
return zResult;
}
| 27.699106 | 166 | 0.611264 | [
"object"
] |
d9f0f397e85e7b2400a8e2bf5fafc68f2102d352 | 1,701 | h | C | DKParseAuth/DKParseAuth.h | dkhamsing/DKParseAuth | eac01602884b4ac1da6341b9539587765e9b36f4 | [
"MIT"
] | 1 | 2019-06-30T13:00:53.000Z | 2019-06-30T13:00:53.000Z | DKParseAuth/DKParseAuth.h | dkhamsing/DKParseAuth | eac01602884b4ac1da6341b9539587765e9b36f4 | [
"MIT"
] | 1 | 2015-10-19T21:28:49.000Z | 2015-10-19T21:28:49.000Z | DKParseAuth/DKParseAuth.h | dkhamsing/DKParseAuth | eac01602884b4ac1da6341b9539587765e9b36f4 | [
"MIT"
] | null | null | null | //
// DKParseAuth.h
//
// Created by Daniel Khamsing on 9/30/15.
// Copyright © 2015 Daniel Khamsing. All rights reserved.
//
@import UIKit;
// Protocols
#import "DKHudProtocol.h"
/** Simple Parse Authentication. */
@interface DKParseAuth : NSObject
/**
Input cell background color (optional).
*/
@property (nonatomic, strong) UIColor *cellBackgroundColor;
/**
Shared instance.
@return Shared instance.
*/
+ (instancetype)sharedInstance;
/**
Configure Parse.
@param parseClientId Client id.
@param parseClientKey Client key.
*/
- (void)configureWithParseClientId:(NSString *)parseClientId parseClientKey:(NSString *)parseClientKey;
/**
Configure Twitter (optional).
@param twitterConsumerKey Consumer key.
@param twitterConsumerSecret Consumer secret.
*/
- (void)configureWithTwitterConsumerKey:(NSString *)twitterConsumerKey twitterConsumerSecret:(NSString *)twitterConsumerSecret;
/**
Returns a view controller for Parse authentication (sign in, sign up).
@param passwordLength Minimum password length.
@param hud Hud object that implements the hud protocol (use `nil` for no hud).
@param sucessBlock Block to execute on auth success with one argument that specifies if the user has signed up.
*/
- (UIViewController *)authViewControllerWithPasswordLength:(NSInteger)passwordLength hud:(id<DKHudProtocol>)hud successBlock:(void (^)(id user, BOOL signup))successBlock;
/**
Returns a view controller for Parse authentication with no hud.
*/
- (id<DKHudProtocol>)hud;
/**
Specifies whether the user is authenticated with Parse.
@return Boolean that specifies if the user is authenticated.
*/
- (BOOL)authenticated;
/**
Log the user out.
*/
- (void)logout;
@end
| 25.38806 | 170 | 0.753086 | [
"object"
] |
d9fa9dbe9e62a33f70be547b5b3c8ee5ba930497 | 29,427 | h | C | src/Magnum/Shader.h | costashatz/magnum | 8f87ca92334b326a54d27789f370fd8556d557de | [
"MIT"
] | null | null | null | src/Magnum/Shader.h | costashatz/magnum | 8f87ca92334b326a54d27789f370fd8556d557de | [
"MIT"
] | null | null | null | src/Magnum/Shader.h | costashatz/magnum | 8f87ca92334b326a54d27789f370fd8556d557de | [
"MIT"
] | null | null | null | #ifndef Magnum_Shader_h
#define Magnum_Shader_h
/*
This file is part of Magnum.
Copyright © 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018
Vladimír Vondruš <mosra@centrum.cz>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
/** @file
* @brief Class @ref Magnum::Shader
*/
#include <functional>
#include <string>
#include <vector>
#include <Corrade/Containers/ArrayView.h>
#include "Magnum/AbstractObject.h"
#include "Magnum/Magnum.h"
namespace Magnum {
/**
@brief Shader
See @ref AbstractShaderProgram for usage information.
## Performance optimizations
Shader limits and implementation-defined values (such as @ref maxUniformComponents())
are cached, so repeated queries don't result in repeated @fn_gl{Get} calls.
*/
class MAGNUM_EXPORT Shader: public AbstractObject {
public:
/**
* @brief Shader type
*
* @see @ref Shader(Version, Type),
* @ref maxAtomicCounterBuffers(),
* @ref maxAtomicCounters(),
* @ref maxImageUniforms()
* @ref maxShaderStorageBlocks(),
* @ref maxTextureImageUnits(),
* @ref maxUniformBlocks(),
* @ref maxUniformComponents(),
* @ref maxCombinedUniformComponents()
*/
enum class Type: GLenum {
Vertex = GL_VERTEX_SHADER, /**< Vertex shader */
#if !defined(MAGNUM_TARGET_GLES2) && !defined(MAGNUM_TARGET_WEBGL)
/**
* Tessellation control shader
* @requires_gl40 Extension @extension{ARB,tessellation_shader}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_es_extension Extension @extension{ANDROID,extension_pack_es31a}/
* @extension{EXT,tessellation_shader}
* @requires_gles Tessellation shaders are not available in WebGL.
*/
#ifndef MAGNUM_TARGET_GLES
TessellationControl = GL_TESS_CONTROL_SHADER,
#else
TessellationControl = GL_TESS_CONTROL_SHADER_EXT,
#endif
/**
* Tessellation evaluation shader
* @requires_gl40 Extension @extension{ARB,tessellation_shader}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_es_extension Extension @extension{ANDROID,extension_pack_es31a}/
* @extension{EXT,tessellation_shader}
* @requires_gles Tessellation shaders are not available in WebGL.
*/
#ifndef MAGNUM_TARGET_GLES
TessellationEvaluation = GL_TESS_EVALUATION_SHADER,
#else
TessellationEvaluation = GL_TESS_EVALUATION_SHADER_EXT,
#endif
/**
* Geometry shader
* @requires_gl32 Extension @extension{ARB,geometry_shader4}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_es_extension Extension @extension{ANDROID,extension_pack_es31a}/
* @extension{EXT,geometry_shader}
* @requires_gles Geometry shaders are not available in WebGL.
*/
#ifndef MAGNUM_TARGET_GLES
Geometry = GL_GEOMETRY_SHADER,
#else
Geometry = GL_GEOMETRY_SHADER_EXT,
#endif
/**
* Compute shader
* @requires_gl43 Extension @extension{ARB,compute_shader}
* @requires_gles31 Compute shaders are not available in OpenGL ES
* 3.0 and older.
* @requires_gles Compute shaders are not available in WebGL.
*/
Compute = GL_COMPUTE_SHADER,
#endif
Fragment = GL_FRAGMENT_SHADER /**< Fragment shader */
};
/**
* @brief Max supported component count on vertex shader output
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. In OpenGL ES 2.0 the four-component vector count is
* queried and multiplied with 4.
* @see @fn_gl{Get} with @def_gl{MAX_VERTEX_OUTPUT_COMPONENTS},
* @def_gl{MAX_VARYING_COMPONENTS} in OpenGL <3.2 or
* @def_gl{MAX_VARYING_VECTORS} in OpenGL ES 2.0
*/
static Int maxVertexOutputComponents();
/** @todo `GL_MAX_PATCH_VERTICES`, `GL_MAX_TESS_GEN_LEVEL`, `GL_MAX_TESS_PATCH_COMPONENTS` when @extension{ARB,tessellation_shader} is done */
#if !defined(MAGNUM_TARGET_GLES2) && !defined(MAGNUM_TARGET_WEBGL)
/**
* @brief Max supported component count of tessellation control shader input vertex
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither @extension{ARB,tessellation_shader} (part
* of OpenGL 4.0) nor @extension{ANDROID,extension_pack_es31a}/
* @extension{EXT,tessellation_shader} ES extension is available,
* returns `0`.
* @see @fn_gl{Get} with @def_gl{MAX_TESS_CONTROL_INPUT_COMPONENTS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gl Tessellation shaders are not available in WebGL.
*/
static Int maxTessellationControlInputComponents();
/**
* @brief Max supported component count of tessellation control shader output vertex
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither @extension{ARB,tessellation_shader} (part
* of OpenGL 4.0) nor @extension{ANDROID,extension_pack_es31a}/
* @extension{EXT,tessellation_shader} ES extension is available,
* returns `0`.
* @see @fn_gl{Get} with @def_gl{MAX_TESS_CONTROL_OUTPUT_COMPONENTS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gl Tessellation shaders are not available in WebGL.
*/
static Int maxTessellationControlOutputComponents();
/**
* @brief Max supported component count of all tessellation control shader output vertices combined
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither @extension{ARB,tessellation_shader} (part
* of OpenGL 4.0) nor @extension{ANDROID,extension_pack_es31a}/
* @extension{EXT,tessellation_shader} ES extension is available,
* returns `0`.
* @see @fn_gl{Get} with @def_gl{MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gl Tessellation shaders are not available in WebGL.
*/
static Int maxTessellationControlTotalOutputComponents();
/**
* @brief Max supported component count of tessellation evaluation shader input vertex
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither @extension{ARB,tessellation_shader} (part
* of OpenGL 4.0) nor @extension{ANDROID,extension_pack_es31a}/
* @extension{EXT,tessellation_shader} ES extension is available,
* returns `0`.
* @see @fn_gl{Get} with @def_gl{MAX_TESS_EVALUATION_INPUT_COMPONENTS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gl Tessellation shaders are not available in WebGL.
*/
static Int maxTessellationEvaluationInputComponents();
/**
* @brief Max supported component count of tessellation evaluation shader output vertex
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither @extension{ARB,tessellation_shader} (part
* of OpenGL 4.0) nor @extension{ANDROID,extension_pack_es31a}/
* @extension{EXT,tessellation_shader} ES extension is available,
* returns `0`.
* @see @fn_gl{Get} with @def_gl{MAX_TESS_EVALUATION_OUTPUT_COMPONENTS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gl Tessellation shaders are not available in WebGL.
*/
static Int maxTessellationEvaluationOutputComponents();
/**
* @brief Max supported component count of geometry shader input vertex
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither @extension{ARB,geometry_shader4} (part of
* OpenGL 3.2) nor @extension{ANDROID,extension_pack_es31a}/
* @extension{EXT,geometry_shader} ES extension is not available,
* returns `0`.
* @see @fn_gl{Get} with @def_gl{MAX_GEOMETRY_INPUT_COMPONENTS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gles Geometry shaders are not available in WebGL.
*/
static Int maxGeometryInputComponents();
/**
* @brief Max supported component count of geometry shader output vertex
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither @extension{ARB,geometry_shader4} (part of
* OpenGL 3.2) nor @extension{ANDROID,extension_pack_es31a}/
* @extension{EXT,geometry_shader} ES extension is not available,
* returns `0`.
* @see @fn_gl{Get} with @def_gl{MAX_GEOMETRY_OUTPUT_COMPONENTS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gles Geometry shaders are not available in WebGL.
*/
static Int maxGeometryOutputComponents();
/**
* @brief Max supported component count of all geometry shader output vertices combined
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither @extension{ARB,geometry_shader4} (part of
* OpenGL 3.2) nor @extension{ANDROID,extension_pack_es31a}/
* @extension{EXT,geometry_shader} ES extension is not available,
* returns `0`.
* @see @fn_gl{Get} with @def_gl{MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gles Geometry shaders are not available in WebGL.
*/
static Int maxGeometryTotalOutputComponents();
#endif
/**
* @brief Max supported component count on fragment shader input
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. In OpenGL ES 2.0 the four-component vector count is
* queried and multiplied with 4.
* @see @fn_gl{Get} with @def_gl{MAX_FRAGMENT_INPUT_COMPONENTS},
* @def_gl{MAX_VARYING_COMPONENTS} in OpenGL <3.2 or
* @def_gl{MAX_VARYING_VECTORS} in OpenGL ES 2.0
*/
static Int maxFragmentInputComponents();
/**
* @brief Max supported uniform component count in default block
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If particular shader stage is not available, returns
* `0`. In OpenGL ES 2.0 the four-component vector count is queried and
* multiplied with 4.
* @see @ref maxCombinedUniformComponents(),
* @fn_gl{Get} with @def_gl{MAX_VERTEX_UNIFORM_COMPONENTS},
* @def_gl{MAX_TESS_CONTROL_UNIFORM_COMPOENTS},
* @def_gl{MAX_TESS_EVALUATION_UNIFORM_COMPONENTS},
* @def_gl{MAX_GEOMETRY_UNIFORM_COMPONENTS},
* @def_gl{MAX_COMPUTE_UNIFORM_COMPONENTS},
* @def_gl{MAX_FRAGMENT_UNIFORM_COMPONENTS} or
* @def_gl{MAX_VERTEX_UNIFORM_VECTORS},
* @def_gl{MAX_FRAGMENT_UNIFORM_VECTORS} in OpenGL ES 2.0
*/
static Int maxUniformComponents(Type type);
#if !defined(MAGNUM_TARGET_GLES2) && !defined(MAGNUM_TARGET_WEBGL)
/**
* @brief Max supported atomic counter buffer count
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither extension @extension{ARB,shader_atomic_counters}
* (part of OpenGL 4.2) nor OpenGL ES 3.1 is available or if particular
* shader stage is not available, returns `0`.
* @see @ref maxCombinedAtomicCounterBuffers(), @ref maxAtomicCounters(),
* @fn_gl{Get} with @def_gl{MAX_VERTEX_ATOMIC_COUNTER_BUFFERS},
* @def_gl{MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS},
* @def_gl{MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS},
* @def_gl{MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS},
* @def_gl{MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS} or
* @def_gl{MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gles Atomic counters are not available in WebGL.
*/
static Int maxAtomicCounterBuffers(Type type);
/**
* @brief Max supported atomic counter buffer count for all stages combined
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither extension @extension{ARB,shader_atomic_counters}
* (part of OpenGL 4.2) nor OpenGL ES 3.1 is available, returns `0`.
* @see @ref maxAtomicCounterBuffers(), @ref maxCombinedAtomicCounters(),
* @fn_gl{Get} with @def_gl{MAX_COMBINED_ATOMIC_COUNTER_BUFFERS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gles Atomic counters are not available in WebGL.
*/
static Int maxCombinedAtomicCounterBuffers();
/**
* @brief Max supported atomic counter count
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither extension @extension{ARB,shader_atomic_counters}
* (part of OpenGL 4.2) nor OpenGL ES 3.1 is available or if particular
* shader stage is not available, returns `0`.
* @see @ref maxCombinedAtomicCounters(), @ref maxAtomicCounterBuffers(),
* @fn_gl{Get} with @def_gl{MAX_VERTEX_ATOMIC_COUNTERS},
* @def_gl{MAX_TESS_CONTROL_ATOMIC_COUNTERS},
* @def_gl{MAX_TESS_EVALUATION_ATOMIC_COUNTERS},
* @def_gl{MAX_GEOMETRY_ATOMIC_COUNTERS},
* @def_gl{MAX_COMPUTE_ATOMIC_COUNTERS} or
* @def_gl{MAX_FRAGMENT_ATOMIC_COUNTERS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gles Atomic counters are not available in WebGL.
*/
static Int maxAtomicCounters(Type type);
/**
* @brief Max supported atomic counter count for all stages combined
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither extension @extension{ARB,shader_atomic_counters}
* (part of OpenGL 4.2) nor OpenGL ES 3.1 is available, returns `0`.
* @see @ref maxAtomicCounters(), @ref maxCombinedAtomicCounterBuffers(),
* @fn_gl{Get} with @def_gl{MAX_COMBINED_ATOMIC_COUNTERS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gles Atomic counters are not available in WebGL.
*/
static Int maxCombinedAtomicCounters();
/**
* @brief Max supported image uniform count
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither extension @extension{ARB,shader_image_load_store}
* (part of OpenGL 4.2) nor OpenGL ES 3.1 is available or if particular
* shader stage is not available, returns `0`.
* @see @ref maxCombinedImageUniforms(),
* @fn_gl{Get} with @def_gl{MAX_VERTEX_IMAGE_UNIFORMS},
* @def_gl{MAX_TESS_CONTROL_IMAGE_UNIFORMS},
* @def_gl{MAX_TESS_EVALUATION_IMAGE_UNIFORMS},
* @def_gl{MAX_GEOMETRY_IMAGE_UNIFORMS},
* @def_gl{MAX_COMPUTE_IMAGE_UNIFORMS} or
* @def_gl{MAX_FRAGMENT_IMAGE_UNIFORMS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gles Shader image load/store is not available in WebGL.
*/
static Int maxImageUniforms(Type type);
/**
* @brief Max supported image uniform count for all stages combined
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither extension @extension{ARB,shader_image_load_store}
* (part of OpenGL 4.2) nor OpenGL ES 3.1 is available, returns `0`.
* @see @ref maxImageUniforms(),
* @fn_gl{Get} with @def_gl{MAX_COMBINED_IMAGE_UNIFORMS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gles Shader image load/store is not available in WebGL.
*/
static Int maxCombinedImageUniforms();
/**
* @brief Max supported shader storage block count
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither extension @extension{ARB,shader_storage_buffer_object}
* (part of OpenGL 4.3) nor OpenGL ES 3.1 is available or if particular
* shader stage is not available, returns `0`.
* @see @ref maxCombinedShaderStorageBlocks(),
* @fn_gl{Get} with @def_gl{MAX_VERTEX_SHADER_STORAGE_BLOCKS},
* @def_gl{MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS},
* @def_gl{MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS},
* @def_gl{MAX_GEOMETRY_SHADER_STORAGE_BLOCKS},
* @def_gl{MAX_COMPUTE_SHADER_STORAGE_BLOCKS} or
* @def_gl{MAX_FRAGMENT_SHADER_STORAGE_BLOCKS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gles Shader image load/store is not available in WebGL.
*/
static Int maxShaderStorageBlocks(Type type);
/**
* @brief Max supported shader storage block count for all stages combined
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If neither extension @extension{ARB,shader_storage_buffer_object}
* (part of OpenGL 4.3) nor OpenGL ES 3.1 is available, returns `0`.
* @see @ref maxShaderStorageBlocks(),
* @fn_gl{Get} with @def_gl{MAX_COMBINED_SHADER_STORAGE_BLOCKS}
* @requires_gles30 Not defined in OpenGL ES 2.0.
* @requires_gles Shader storage is not available in WebGL.
*/
static Int maxCombinedShaderStorageBlocks();
#endif
/**
* @brief Max supported texture image unit count
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If particular shader stage is not available, returns
* `0`.
* @see @ref maxCombinedTextureImageUnits(),
* @fn_gl{Get} with @def_gl{MAX_VERTEX_TEXTURE_IMAGE_UNITS},
* @def_gl{MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS},
* @def_gl{MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS},
* @def_gl{MAX_GEOMETRY_TEXTURE_IMAGE_UNITS},
* @def_gl{MAX_COMPUTE_TEXTURE_IMAGE_UNITS},
* @def_gl{MAX_TEXTURE_IMAGE_UNITS}
*/
static Int maxTextureImageUnits(Type type);
/**
* @brief Max supported texture image unit count for all stages combined
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls.
* @see @ref maxTextureImageUnits(), @fn_gl{Get} with
* @def_gl{MAX_COMBINED_TEXTURE_IMAGE_UNITS}
*/
static Int maxCombinedTextureImageUnits();
#ifndef MAGNUM_TARGET_GLES2
/**
* @brief Max supported uniform block count
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If extension @extension{ARB,uniform_buffer_objects}
* (part of OpenGL 3.1) or particular shader stage is not available,
* returns `0`.
* @see @ref maxCombinedUniformBlocks(), @ref maxUniformComponents(),
* @ref maxCombinedUniformComponents(),
* @fn_gl{Get} with @def_gl{MAX_VERTEX_UNIFORM_BLOCKS},
* @def_gl{MAX_TESS_CONTROL_UNIFORM_BLOCKS},
* @def_gl{MAX_TESS_EVALUATION_UNIFORM_BLOCKS},
* @def_gl{MAX_GEOMETRY_UNIFORM_BLOCKS},
* @def_gl{MAX_COMPUTE_UNIFORM_BLOCKS} or
* @def_gl{MAX_FRAGMENT_UNIFORM_BLOCKS}
* @requires_gles30 Uniform blocks are not available in OpenGL ES 2.0.
* @requires_webgl20 Uniform blocks are not available in WebGL 1.0.
*/
static Int maxUniformBlocks(Type type);
/**
* @brief Max supported uniform block count for all stages combined
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If extension @extension{ARB,uniform_buffer_objects}
* (part of OpenGL 3.1) is not available, returns `0`.
* @see @ref maxUniformBlocks(), @ref maxUniformComponents(),
* @ref maxCombinedUniformComponents(),
* @fn_gl{Get} with @def_gl{MAX_COMBINED_UNIFORM_BLOCKS}
* @requires_gles30 Uniform blocks are not available in OpenGL ES 2.0.
* @requires_webgl20 Uniform blocks are not available in WebGL 1.0.
*/
static Int maxCombinedUniformBlocks();
/**
* @brief Max supported uniform component count in all blocks combined
*
* The result is cached, repeated queries don't result in repeated
* OpenGL calls. If extension @extension{ARB,uniform_buffer_objects}
* (part of OpenGL 3.1) or particular shader stage is not available,
* returns `0`.
* @see @ref maxUniformComponents(), @ref maxUniformBlocks(),
* @fn_gl{Get} with @def_gl{MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS},
* @def_gl{MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS},
* @def_gl{MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS},
* @def_gl{MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS},
* @def_gl{MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS} or
* @def_gl{MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS}
* @requires_gles30 Uniform blocks are not available in OpenGL ES 2.0.
* @requires_webgl20 Uniform blocks are not available in WebGL 1.0.
*/
static Int maxCombinedUniformComponents(Type type);
#endif
/**
* @brief Compile multiple shaders simultaneously
*
* Returns `false` if compilation of any shader failed, `true` if
* everything succeeded. Compiler messages (if any) are printed to
* error output. The operation is batched in a way that allows the
* driver to perform multiple compilations simultaneously (i.e. in
* multiple threads).
* @see @fn_gl{ShaderSource}, @fn_gl{CompileShader}, @fn_gl{GetShader}
* with @def_gl{COMPILE_STATUS} and @def_gl{INFO_LOG_LENGTH},
* @fn_gl{GetShaderInfoLog}
*/
static bool compile(std::initializer_list<std::reference_wrapper<Shader>> shaders);
/**
* @brief Constructor
* @param version Target version
* @param type Shader type
*
* Creates empty OpenGL shader and adds @c \#version directive
* corresponding to @p version parameter at the beginning. If
* @ref Version::None is specified, (not) adding the @c \#version
* directive is left to the user.
* @see @fn_gl{CreateShader}
*/
explicit Shader(Version version, Type type);
/** @brief Copying is not allowed */
Shader(const Shader&) = delete;
/** @brief Move constructor */
Shader(Shader&& other) noexcept;
/**
* @brief Destructor
*
* Deletes associated OpenGL shader.
* @see @fn_gl{DeleteShader}
*/
~Shader();
/** @brief Copying is not allowed */
Shader& operator=(const Shader&) = delete;
/** @brief Move assignment */
Shader& operator=(Shader&& other) noexcept;
/** @brief OpenGL shader ID */
GLuint id() const { return _id; }
#ifndef MAGNUM_TARGET_WEBGL
/**
* @brief Shader label
*
* The result is *not* cached, repeated queries will result in repeated
* OpenGL calls. If OpenGL 4.3 is not supported and neither
* @extension{KHR,debug} (covered also by @extension{ANDROID,extension_pack_es31a})
* nor @extension{EXT,debug_label} desktop or ES extension is
* available, this function returns empty string.
* @see @fn_gl{GetObjectLabel} with @def_gl{SHADER} or
* @fn_gl_extension{GetObjectLabel,EXT,debug_label} with
* @def_gl{SHADER_OBJECT_EXT}
* @requires_gles Debug output is not available in WebGL.
*/
std::string label() const;
/**
* @brief Set shader label
* @return Reference to self (for method chaining)
*
* Default is empty string. If OpenGL 4.3 is not supported and neither
* @extension{KHR,debug} (covered also by @extension{ANDROID,extension_pack_es31a})
* nor @extension{EXT,debug_label} desktop or ES extension is
* available, this function does nothing.
* @see @ref maxLabelLength(), @fn_gl{ObjectLabel} with
* @def_gl{SHADER} or @fn_gl_extension{LabelObject,EXT,debug_label}
* with @def_gl{SHADER_OBJECT_EXT}
* @requires_gles Debug output is not available in WebGL.
*/
Shader& setLabel(const std::string& label) {
return setLabelInternal({label.data(), label.size()});
}
/** @overload */
template<std::size_t size> Shader& setLabel(const char(&label)[size]) {
return setLabelInternal({label, size - 1});
}
#endif
/** @brief Shader type */
Type type() const { return _type; }
/** @brief Shader sources */
std::vector<std::string> sources() const;
/**
* @brief Add shader source
* @param source String with shader source
* @return Reference to self (for method chaining)
*
* Adds given source to source list, preceeded with @c \#line directive
* marking first line of the source as `n(1)` where n is number of
* added source. The source number `0` is @c \#version directive added
* in constructor, if any. If passed string is empty, the function does
* nothing.
* @see @ref addFile()
*/
Shader& addSource(std::string source);
/**
* @brief Add source file
* @param filename Name of source file to read from
* @return Reference to self (for method chaining)
*
* The file must exist and must be readable. Calls @ref addSource()
* with the contents.
*/
Shader& addFile(const std::string& filename);
/**
* @brief Compile shader
*
* Compiles single shader. Prefer to compile multiple shaders at once
* using @ref compile(std::initializer_list<std::reference_wrapper<Shader>>)
* for improved performance, see its documentation for more
* information.
*/
bool compile() { return compile({*this}); }
private:
Shader& setLabelInternal(Containers::ArrayView<const char> label);
Type _type;
GLuint _id;
std::vector<std::string> _sources;
};
/** @debugoperatorclassenum{Magnum::Shader,Magnum::Shader::Type} */
MAGNUM_EXPORT Debug& operator<<(Debug& debug, Shader::Type value);
inline Shader::Shader(Shader&& other) noexcept: _type(other._type), _id(other._id), _sources(std::move(other._sources)) {
other._id = 0;
}
inline Shader& Shader::operator=(Shader&& other) noexcept {
using std::swap;
swap(_type, other._type);
swap(_id, other._id);
swap(_sources, other._sources);
return *this;
}
}
#endif
| 45.133436 | 150 | 0.635097 | [
"geometry",
"vector"
] |
d9fbab2ca63f392c4252473715a9bd7f79f0550b | 1,771 | h | C | libmcell/api/base_introspection_class.h | mcellteam/mcell | 3920aec22c55013b78f7d6483b81f70a0d564d22 | [
"MIT"
] | 25 | 2015-03-25T16:36:01.000Z | 2022-01-17T14:28:43.000Z | libmcell/api/base_introspection_class.h | mcellteam/mcell | 3920aec22c55013b78f7d6483b81f70a0d564d22 | [
"MIT"
] | 31 | 2015-02-12T22:15:18.000Z | 2022-03-30T22:43:24.000Z | libmcell/api/base_introspection_class.h | mcellteam/mcell | 3920aec22c55013b78f7d6483b81f70a0d564d22 | [
"MIT"
] | 12 | 2016-01-15T23:20:19.000Z | 2021-02-10T06:18:00.000Z | /******************************************************************************
*
* Copyright (C) 2020 by
* The Salk Institute for Biological Studies
*
* Use of this source code is governed by an MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT.
*
******************************************************************************/
#ifndef LIBMCELL_API_BASE_INTROSPECTION_CLASS_H_
#define LIBMCELL_API_BASE_INTROSPECTION_CLASS_H_
#include "api/api_common.h"
#include "base_data_class.h"
namespace MCell {
class World;
namespace API {
// base class for all classes that hold the model input data
class BaseIntrospectionClass: public BaseDataClass {
public:
BaseIntrospectionClass()
: world(nullptr) {
name = INTROSPECTED_OBJECT;
// - introspected objects are assumed to be initialized because they are returned
// by API methods and this does not depend on model initialization
// - this flag is checked in set_* methods and must be true to avoid ignoring writes to attributes
initialized = true;
}
virtual ~BaseIntrospectionClass() {
}
void check_initialization() const {
if (world == nullptr) {
throw RuntimeError(
"Object of class " + class_name + " was not correctly initialized. "
"Introspection objects cannot be created independently. they must always be retrieved through "
"methods of the " + NAME_CLASS_MODEL + " class."
);
}
}
void set_all_attributes_as_default_or_unset() {
BaseDataClass::set_all_attributes_as_default_or_unset();
world = nullptr;
}
// internal World pointer
World* world;
};
} // namespace API
} // namespace MCell
#endif /* LIBMCELL_API_BASE_INTROSPECTION_CLASS_H_ */
| 28.564516 | 105 | 0.653868 | [
"object",
"model"
] |
d9ffadadc66613636374cb402dafa86880cd9ef5 | 786 | h | C | simulation/neuralNetGR.h | lis-epfl/Tensoft-G21 | 7a83c5dabc12906c0a6bd1da0a28a131e9d5e144 | [
"Apache-2.0"
] | 1 | 2021-08-03T10:52:20.000Z | 2021-08-03T10:52:20.000Z | simulation/neuralNetGR.h | lis-epfl/Tensoft-G21 | 7a83c5dabc12906c0a6bd1da0a28a131e9d5e144 | [
"Apache-2.0"
] | null | null | null | simulation/neuralNetGR.h | lis-epfl/Tensoft-G21 | 7a83c5dabc12906c0a6bd1da0a28a131e9d5e144 | [
"Apache-2.0"
] | 1 | 2021-09-18T07:23:35.000Z | 2021-09-18T07:23:35.000Z | /**
* @author Enrico Zardini
*/
#ifndef ROBOT_SIMULATION_NEURALNETGR_H
#define ROBOT_SIMULATION_NEURALNETGR_H
#include <map>
#include <vector>
struct Neuron
{
int node_id;
double bias;
std::vector<std::pair<int,double>> inputs;
double output;
};
class neuralNetGR
{
public:
//constructor
neuralNetGR(char *nn_path);
//destructor
~neuralNetGR();
//command provider
void compute_module_params(double input_values[], double module_params[], int num_outputs);
void printNN();
private:
std::vector<int> input_keys;
std::vector<int> output_keys;
std::vector<int> proc_order;
std::map<int,Neuron*> network;
std::vector<double> frequencies;
std::vector<double> phases;
};
#endif //ROBOT_SIMULATION_NEURALNETGR_H
| 17.086957 | 95 | 0.693384 | [
"vector"
] |
8a00f887fa931a5e0806de587dfa64cac236d8c8 | 1,951 | h | C | sp/src/public/tier0/validator.h | tingtom/Fodder | 3250572dbc56547709f564ba68e451b21660cdb4 | [
"Unlicense"
] | 15 | 2016-04-07T21:29:55.000Z | 2022-03-18T08:03:31.000Z | sp/src/public/tier0/validator.h | tingtom/Fodder | 3250572dbc56547709f564ba68e451b21660cdb4 | [
"Unlicense"
] | 31 | 2016-11-27T14:38:02.000Z | 2020-06-03T11:11:29.000Z | sp/src/public/tier0/validator.h | tingtom/Fodder | 3250572dbc56547709f564ba68e451b21660cdb4 | [
"Unlicense"
] | 6 | 2015-02-20T06:11:13.000Z | 2018-11-15T08:22:01.000Z | //========= Copyright Valve Corporation, All rights reserved. ============//
//
// Purpose:
//
// $NoKeywords: $
//=============================================================================//
#include "valobject.h"
#ifndef VALIDATOR_H
#define VALIDATOR_H
#ifdef _WIN32
#pragma once
#endif
#ifdef DBGFLAG_VALIDATE
class CValidator
{
public:
// Constructors & destructors
CValidator( void );
~CValidator( void );
// Call this each time we enter a new Validate function
void Push( tchar *pchType, void *pvObj, tchar *pchName );
// Call this each time we exit a Validate function
void Pop( void );
// Claim ownership of a memory block
void ClaimMemory( void *pvMem );
// Finish performing a check and perform necessary computations
void Finalize( void );
// Render our results to the console
void RenderObjects( int cubThreshold ); // Render all reported objects
void RenderLeaks( void ); // Render all memory leaks
// List manipulation functions:
CValObject *FindObject( void *pvObj ); // Returns CValObject containing pvObj, or NULL.
void DiffAgainst( CValidator *pOtherValidator ); // Removes any entries from this validator that are also present in the other.
// Accessors
bool BMemLeaks( void ) { return m_bMemLeaks; };
CValObject *PValObjectFirst( void ) { return m_pValObjectFirst; };
void Validate( CValidator &validator, tchar *pchName ); // Validate our internal structures
private:
CValObject *m_pValObjectFirst; // Linked list of all ValObjects
CValObject *m_pValObjectLast; // Last ValObject on the linked list
CValObject *m_pValObjectCur; // Object we're current processing
int m_cpvOwned; // Total # of blocks owned
int m_cpubLeaked; // # of leaked memory blocks
int m_cubLeaked; // Amount of leaked memory
bool m_bMemLeaks; // Has any memory leaked?
};
#endif // DBGFLAG_VALIDATE
#endif // VALIDATOR_H
| 26.364865 | 129 | 0.667863 | [
"render",
"object"
] |
8a04c4404e8d11c880b124049f983ec71b817004 | 92,429 | c | C | sdk-6.5.20/src/examples/sand/cint_vswitch_vpls_1plus1_protection.c | copslock/broadcom_cpri | 8e2767676e26faae270cf485591902a4c50cf0c5 | [
"Spencer-94"
] | null | null | null | sdk-6.5.20/src/examples/sand/cint_vswitch_vpls_1plus1_protection.c | copslock/broadcom_cpri | 8e2767676e26faae270cf485591902a4c50cf0c5 | [
"Spencer-94"
] | null | null | null | sdk-6.5.20/src/examples/sand/cint_vswitch_vpls_1plus1_protection.c | copslock/broadcom_cpri | 8e2767676e26faae270cf485591902a4c50cf0c5 | [
"Spencer-94"
] | null | null | null | /*~~~~~~~~~~~~~~~~~~~~~~~Mulitpoint VPLS Service~~~~~~~~~~~~~~~~~~~~~~~~~~*/
/*
* This license is set out in https://raw.githubusercontent.com/Broadcom-Network-Switching-Software/OpenBCM/master/Legal/LICENSE file.
*
* Copyright 2007-2020 Broadcom Inc. All rights reserved.
* File: cint_vswitch_vpls_1plus1_protection.c
* Purpose: Example of Open Multi-Point VPLS service in which PWE works as 1+1 protection
*
* Attachment circuit (AC): Ethernet port attached to the service based on port-vlan-vlan,
* connect from access side.
* PWE: Virtual circuit attached to the service based on VC-label. Connect to the MPLS
* network side.
*
* For this service, multiple logical ports can be attached where each logical port can be:
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* | . . . . . . |
* | . . . . |
* | . . +---------+ . . . |
* | . | | /\ . . |
* | . | PE3 | \\ . |
* | . /\ | Switch |/\ \\ . |
* | . || +---------+\\ \\ . |
* | . || \\ +------+ . |
* | . +------+ \\ |TUNNEL| . |
* | . |TUNNEL| \\+------+. |
* | . +------+ \\ \\ . . . . |
* | . . . || \\ \\ . . . . . . |
* | . . . . . /\/ +------------+ \/\ \/\ . . +----+ . . . |
* | . . . +----+ . +---------+ <---| - - - - - -|---- +---------+ . |UNIc| . |
* | . . |UNIa|--------| | ----| - - - - - -|---> | |----- +----+ +----+ . |
* | . +----+ +----+ . | PE1 | | MPLS | | PE2 |--------------|UNId| . |
* | . |UNIb|-----------------| Switch | <---| - TUNNEL- -|---- | Switch |--- +----+ +----+ . |
* | . +----+ . +---------+ ----| - - - - - -|---> +---------+ . |UNIe| . |
* |. ETHERNET . . . . +------------+ . . +----+ ETHERNET . |
* | . . . . . . . . . . . . . . . . . |
* | . . . . . . . . . . . . . . . . . . . . . . . . . . |
* | |
* | +-------------+ +---------------------------------+ +-------------------------+ |
* | | Ethr | Data | | Ethr | MPLS | PWE | Ethr | Data | | Ethr |S-TAG|C-TAG| Data | |
* | +-------------+ +---------------------------------+ +-------------------------+ |
* | |
* | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ |
* | | Figure 13: Multipoing VPLS Service Attachment | |
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This CINT configures a provider edge as shown in Figure 14.
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* | Access-P1: Access port, defined on port1 with outer-VID10 and inner-VID20 |
* | . . . . . |
* | . . . @ . . |
* | . . /PWE-2 . |
* | . / . |
* | . +---------------+ . |
* | . . . . . . <-|---PWE(2009)---|- . |
* | . . . . . -|---PWE(1981)---|-> |
* | . +--+ . . +---------------+. |
* | . | |----------------------- +---------+ /TUNNELS(300,400) . |
* | . +--+ Access-P1<1,10,20> | |/ . |
* | . . | PE1 @ . |
* | . +--+ . | Switch |\ . |
* | . | |---------------------- +---------+ \ . |
* | . +--+ Access-P2<1,15,30> . \TUNNELS(1000,1002) . |
* | . . . +---------------------+ . |
* | . ETHERNET . . <-|---PWE(2010,in_port)-|- . |
* | . . . . . . -|---PWE(1982,out_port)|-> . |
* | . +---------------------+ . |
* | . \ . |
* | . \PWE-1 . . |
* | . . @ . |
* | . . . . . . |
* | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ |
* | | Figure 14: CINT Provider Edge Configuration | |
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* Explanation:
* - Access-P2: Access port, defined on port 1 with outer-VID 15 and inner-VID 30.
* - PWE-1: network port with incoming VC = 2010, egress-VC = 1982 defined over two tunnels 1000 and 1002.
* - PWE-2: network port with incoming VC = 2009, egress-VC = 1981 defined over two tunnels, 300 and 400.
* - For access ports outer-Tag TPID is 0x8100 and inner Tag TPID is 0x9100.
* - Access-P1 and Access-P2 refer to the logical interface (attachment circuit).
* - P2 refers to the physical ports Access-P1 and Access-P2 are attached to.
* - PWE1 and PWE2 refers to MPLS network ports.
* - P1 refers to the physical ports PWE1 and PWE2 are attached to.
*
* Headers:
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
* | | DA |SA||TIPD1 |Prio|VID|| MPLS | MPLS || PWE ||Eth||Data ||
* | |0:11| ||0x8100| |100||Label:1000|Label:1002||Lable:2010|| || ||
* | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
* | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ |
* | | Figure 15: Packets Received from PWE1 | |
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
* | |DA|SA||TIPD1 |Prio|VID|| MPLS | MPLS || PWE ||Eth||Data | |
* | | | ||0x8100| |100||Label:1000|Label:1002||Lable:1982|| || | |
* | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
* | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ |
* | | Figure 16: Packets Transmitted to PWE1 | |
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
* | | DA |SA||TIPD1 |Prio|VID|| MPLS | MPLS || PWE ||Eth||Data | |
* | |0:11| ||0x8100| |100||Label:300|Label:400||Lable:2009|| || | |
* | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
* | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ |
* | | Figure 17: Packets Received from PWE2 | |
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* Packets Transmitted to PWE2 same as Packets Transmitted to PWE1, see Figure 16
*
* Access side packets:
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* | tag1 tag2 |
* | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
* | | DA | SA || TIPD1 | Prio | VID || TIPD2 | Prio | VID || Data | |
* | | | || 0x8100| | 10 || 0x9100| | 20 || | |
* | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
* | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ |
* | | Figure 18: Packets Received/Transmitted on Access Port 1 | |
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* | tag1 tag2 |
* | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
* | | DA | SA || TIPD1 | Prio | VID || TIPD2 | Prio | VID || Data | |
* | | | || 0x8100| | 15 || 0x9100| | 30 || | |
* | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
* | +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ |
* | | Figure 19: Packets Received/Transmitted on Access Port 2 | |
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* Calling sequence:
* - Port TPIDs setting:
* - For P2 set outer TPID to 0x8100 and inner TPID to 0x9100 (using cint_port_tpid.c).
* - For P1 set outer TPID to 0x8100 and no inner TPID (using cint_port_tpid.c).
* - Set up MPLS tunnels. Refer to mpls_tunnels_config().
* - Set MPLS L2 termination (on ingress).
* - MPLS packets arriving with DA 00:00:00:00:00:11 and VID 100 causes L2 termination.
* - Calls bcm_l2_tunnel_add().
* - Add pop entries to MPLS switch.
* - MPLS packet arriving from the MPLS network side with labels 1000 or 1002 the label/s
* are popped (refer to mpls_add_pop_entry).
* - The same applies to MPLS packets arriving from the MPLS network side
* with labels 400 or 300.
* - Calls bcm_mpls_tunnel_switch_create().
* - Create MPLS L3 interface (on egress).
* - Packet routed to this L3 interface is set with this MAC.
* - Calls bcm_l3_intf_create().
* - Create MPLs tunnels over the created l3 interface.
* - Packet routed to above interface is tunneled into MPLS tunnels. Where labels set to
* 1000 and 1002, respectively.
* - Calls bcm_mpls_tunnel_initiator_create().
* - Create egress object points to the above l3-interface.
* - Packet routed to this egress-object is forwarded to P1 tunneled with above MPLS tunnels
* and with the L3-interface VID and MAC.
* - Calls bcm_l3_egress_create().
* - Create multipoint VSI #6202 (refer to mpls_port__vswitch_vpls_vpn_create__set).
* - calling bcm_mpls_vpn_id_create().
* - You has to supply VPN ID and multicast groups. For this purpose, the following flags have
* to be present BCM_MPLS_VPN_VPLS|BCM_MPLS_VPN_WITH_ID.
* Note that uc/mc/bc group have the same value as VSI. Another option is to set uc_group = VSI,
* and mc = uc + 4k, and bc = mc + 4k.
* - Use open_egress_mc_group() to open multicast group for flooding.
* - Multicast group ID is equal to VPN ID.
* - Add MPLS-ports to the VSI.
* - vswitch_vpls_add_access_port_1/2 creates attachment circuit and add them to the Vswitch and
* update the Multicast group.
* - create faillover with type BCM_FAILOVER_INGRESS
* - create multicast group 6021,
* - vswitch_vpls_add_network_port_1/2 creates PWE and add them to the VSI and update the
* multicast group, remember that setting the failover_mc_group as 6021, failover id as above failover
* - call multicast__mpls_port_add, adding 2 PWE ports into the group 6021.
* Traffic:
* Flooding packets incoming from MPLS network from PWE1
* this mac address should be learned with MC type destination
*
* Sending to known DA
* - Send Ethernet packet from any logical interface AC with known DA on VSI 6202
* the packet will be forwarded to the MC group 6021 and replicated 2 copies to PWE1
* and PWE 2.
* Sending from protection group
* - sending packet from PWE1, the packet should be forward to AC(unicast or flood)
sending packet from PWE2, the packet should be dropped
* - do failover switch, and repeat above step, this packet should be dropped.
sending packet from PWE2, the packet should be forward to AC
*
* To Activate Above Settings Run:
* BCM> cint examples/sand/utility/cint_sand_utils_global.c
* BCM> cint examples/dpp/utility/cint_sand_utils_vlan.c
* BCM> cint examples/sand/utility/cint_sand_utils_mpls.c
* BCM> cint examples/dpp/utility/cint_utils_mpls_port.c
* BCM> cint examples/dpp/utility/cint_utils_multicast.c
* BCM> cint examples/dpp/utility/cint_utils_l2.c
* BCM> cint examples/dpp/utility/cint_utils_l3.c
* BCM> cint examples/dpp/cint_port_tpid.c
* BCM> cint examples/dpp/cint_advanced_vlan_translation_mode.c
* BCM> cint examples/dpp/cint_qos.c
* BCM> cint examples/dpp/cint_mpls_lsr.c
* BCM> cint examples/dpp/cint_vswitch_metro_mp.c
* BCM> cint examples/dpp/cint_vswitch_vpls_1plus1_protection.c
* BCM> cint
* cint> int rv;
* cint> rv = vswitch_vpls_1plus1_protection_run_with_defaults_dvapi(unit, acP,pwP);
*
* Script adjustment:
* User can adjust the following attribute of the application
* - vpn_id: VPN id to create
* - ac_in_port: accesss port: physical port connect to customer
* - pwe_in_port: network port: physical port connect to MPLs network (for incoming packets)
* - pwe_out_port: network port: physical port connect to MPLs network (for outgoing packets)
* - in_vc_label: incomming VC label
* - eg_vc_label: egress VC label
* - in_tunnel_label: inner tunnel label
* - out_tunnel_label: outer tunnel label
* - pwe_cw: adding CW to all the PWEs
* - is_gal: configure PW-ACH termination for BFD vccv Type 4 packets.
* - pwe_php: No mpls tunnels for PWE interface
* Note: in this case VLAN must be valid in bcm_l3_egress_create in order to infer the l3 interface.
*/
int verbose1 = 1;
int default_vpn_id = 6202;
int ingress_egress_separate_ports = 0;
int encap_optimized = 0;
int mpls_termination_label_index_enable = 0;
int mpls_termination_label_index_database_mode = 0;
bcm_if_t pwe_encap_id;
bcm_if_t second_mpls_intf;
bcm_gport_t network_port_id;
int jr1_ac_1plus1_fec_with_id = 0;
/* Used for multicast configuration */
int vlan_port_encap_id1;
int vlan_port_encap_id2;
int mpls_port_encap_id1;
int mpls_port_encap_id2;
int mpls_tunnel_encap_id1;
int mpls_tunnel_encap_id2;
int fec_id_with_ac_1plus1_group = 0;
struct vswitch_vpls_label_id_s {
int label1;
int id1;
int label2;
int id2;
int label3;
int id3;
};
vswitch_vpls_label_id_s vswitch_vpls_pairs;
bcm_gport_t exported_egress_mpls_port_id;
struct vswitch_vpls_info_s {
int in_vc_label; /* incomming VC label */
int eg_vc_label; /* egress VC label */
int ac_in_port; /* accesss port: physical port connect to customer */
int pwe_in_port; /* network port: physical port connect to MPLs network (in) */
int pwe_out_port; /* network port: physical port connect to MPLs network (out) */
bcm_vpn_t vpn_id; /* VPN id to open */
/* tunnels info */
int in_tunnel_label;
int out_tunnel_label;
int tunnel_id;
int ac_port1_outer_vlan;
int ac_port1_inner_vlan;
int ac_port2_outer_vlan;
int ac_port2_inner_vlan;
int access_index_1 ;
int access_index_2 ;
int access_index_3 ;
int ac_port1_flags;
int ac_port2_flags;
bcm_gport_t access_port_id;
};
vswitch_vpls_info_s vswitch_vpls_info_1;
vswitch_vpls_info_s vswitch_vpls_info_2;
bcm_failover_t ingress_failover = 0;
int ingress_protection_group_id = 6021;
bcm_gport_t protected_pw_port_id = 0;
bcm_mac_t vswitch_vpls_my_mac_get() {
return mpls_lsr_my_mac_get();
}
int
vswitch_vlps_info_init(int extend_example, int ac_port, int pwe_in_port, int pwe_out_port, int ac_port1_outer_vlan, int ac_port1_inner_vlan,
int ac_port2_outer_vlan, int ac_port2_inner_vlan, int vpn_id) {
int rv = BCM_E_NONE;
/* VPN id to create */
if (vpn_id < 0) {
vswitch_vpls_info_1.vpn_id = default_vpn_id;
} else {
vswitch_vpls_info_1.vpn_id = vpn_id;
}
/* incomming packet attributes */
vswitch_vpls_info_1.ac_in_port = ac_port;
vswitch_vpls_info_1.pwe_in_port = pwe_in_port;
vswitch_vpls_info_1.pwe_out_port = pwe_out_port;
vswitch_vpls_info_1.in_vc_label = 2010;
vswitch_vpls_info_1.eg_vc_label = 1982;
/* tunnel info */
vswitch_vpls_info_1.in_tunnel_label = 1000;
vswitch_vpls_info_1.out_tunnel_label = 1002;
vswitch_vpls_info_1.ac_port1_outer_vlan = ac_port1_outer_vlan;
vswitch_vpls_info_1.ac_port1_inner_vlan = ac_port1_inner_vlan;
vswitch_vpls_info_1.ac_port2_outer_vlan = ac_port2_outer_vlan;
vswitch_vpls_info_1.ac_port2_inner_vlan = ac_port2_inner_vlan;
vswitch_vpls_info_1.access_index_1 = 2;
vswitch_vpls_info_1.access_index_2 = 3;
vswitch_vpls_info_1.access_index_3 = 3;
if (extend_example) {
/* init the 2 other ports */
/* incomming packet attributes */
/* access port */
vswitch_vpls_info_2.ac_in_port = ac_port;
/* Core port */
vswitch_vpls_info_2.pwe_in_port = pwe_in_port;
vswitch_vpls_info_2.pwe_out_port = pwe_out_port;
vswitch_vpls_info_2.in_vc_label = 2009;
vswitch_vpls_info_2.eg_vc_label = 1981;
/* tunnel info */
vswitch_vpls_info_2.in_tunnel_label = 400;
vswitch_vpls_info_2.out_tunnel_label = 300;
vswitch_vpls_info_2.access_index_1 = 2;
vswitch_vpls_info_2.access_index_2 = 3;
vswitch_vpls_info_2.access_index_3 = 3;
}
printf("vswitch_vpls_info_init %d\n", vswitch_vpls_info_1.vpn_id);
egress_mc = 0;
return rv;
}
int vswitch_vpls_1plus1_double_tag_ac_lif_configuration(int unit,int second_unit){
int rv=0;
if ((mpls_termination_label_index_enable == 1) &&
((mpls_termination_label_index_database_mode == 22) || (mpls_termination_label_index_database_mode == 23))) {
bcm_vlan_port_t vlan_port_1;
bcm_gport_t vlan_port_id = 0;
/* add port, according to port_vlan_vlan */
bcm_vlan_port_t_init(&vlan_port_1);
/* set port attributes, key <port-vlan-vlan> */
vlan_port_1.criteria = BCM_VLAN_PORT_MATCH_PORT_VLAN_STACKED;
vlan_port_1.vsi = mpls_lsr_info_1.eg_vid;
vlan_port_1.port = mpls_lsr_info_1.eg_port;
vlan_port_1.match_vlan = mpls_lsr_info_1.in_vid;
vlan_port_1.match_inner_vlan = 40;
vlan_port_1.egress_vlan = is_device_or_above(unit, JERICHO2) ? 0 : mpls_lsr_info_1.eg_vid;
vlan_port_1.egress_inner_vlan = is_device_or_above(unit, JERICHO2) ? 0 : 40;
vlan_port_1.flags = 0;
rv = bcm_vlan_port_create(unit, &vlan_port_1);
if (rv != BCM_E_NONE) {
printf("Error, bcm_vlan_port_create\n");
return rv;
}
printf("Unit %d: vlan_port_1.vlan_port_id 0x%08x vlan_port_encap_id1: 0x%08x\n\r", unit, vlan_port_1.vlan_port_id, vlan_port_1.encap_id);
/* In advanced vlan translation mode, bcm_vlan_port_create does not create ingress / egress
action mapping. This is here to compensate.*/
if (advanced_vlan_translation_mode) {
rv = vlan_translation_vlan_port_create_to_translation(unit, &vlan_port_1);
if (rv != BCM_E_NONE) {
printf("Error: vlan_translation_vlan_port_create_to_translation\n");
}
}
if (second_unit >= 0)
{
/* set port attributes, key <port-vlan-vlan> */
vlan_port_1.criteria = BCM_VLAN_PORT_MATCH_PORT_VLAN_STACKED;
vlan_port_1.vsi = mpls_lsr_info_1.eg_vid;
vlan_port_1.port = mpls_lsr_info_1.eg_port;
vlan_port_1.match_vlan = mpls_lsr_info_1.in_vid;
vlan_port_1.match_inner_vlan = 40;
vlan_port_1.egress_vlan = is_device_or_above(unit, JERICHO2) ? 0 : mpls_lsr_info_1.eg_vid;
vlan_port_1.egress_inner_vlan = is_device_or_above(unit, JERICHO2) ? 0 : 40;
vlan_port_1.flags = 0;
vlan_port_1.flags |= BCM_VLAN_PORT_WITH_ID;
rv = bcm_vlan_port_create(second_unit, &vlan_port_1);
if (rv != BCM_E_NONE) {
printf("Error, bcm_vlan_port_create\n");
return rv;
}
/* In advanced vlan translation mode, bcm_vlan_port_create does not create ingress / egress
action mapping. This is here to compensate. */
if (advanced_vlan_translation_mode) {
rv = vlan_translation_vlan_port_create_to_translation(second_unit, &vlan_port_1);
if (rv != BCM_E_NONE) {
printf("Error: vlan_translation_vlan_port_create_to_translation\n");
}
}
}
}
return rv;
}
/* initialize the tunnels for mpls routing
* set termination, for MPLS label 1000,1002
*/
int
mpls_tunnels_config(int unit, int second_unit, int extend_example){
int CINT_NO_FLAGS = 0;
int ingress_intf;
int egress_intf;
int encap_id;
bcm_mpls_egress_label_t label_array[2];
bcm_pbmp_t pbmp;
/*bcm_mpls_egress_label_t label_array_2[2];*/
int rv;
int flags;
bcm_mpls_label_t label_for_pop_entry_in, label_for_pop_entry_out;
create_l3_intf_s intf;
/* set mpls tunneling information with default values */
mpls_lsr_init(vswitch_vpls_info_1.pwe_in_port,vswitch_vpls_info_1.pwe_out_port,0x11,0x22,5000,8000,100,200,0);
mpls_lsr_info_1.eg_port = vswitch_vpls_info_1.pwe_out_port;
/* configure the AC lif for double tag packets - this function relevant for termination mode 22-23 only */
rv = vswitch_vpls_1plus1_double_tag_ac_lif_configuration(unit, second_unit);
if (rv != BCM_E_NONE) {
printf("Error, in vswitch_vpls_1plus1_double_tag_ac_lif_configuration \n");
return rv;
}
/* open vlan */
printf("open vlan %d\n", mpls_lsr_info_1.in_vid);
rv = bcm_vlan_create(unit,mpls_lsr_info_1.in_vid);
print rv;
/* add vlan to pwe_port */
BCM_PBMP_CLEAR(pbmp);
BCM_PBMP_PORT_ADD(pbmp, mpls_lsr_info_1.eg_port);
rv = bcm_vlan_port_add(unit, mpls_lsr_info_1.in_vid, pbmp, pbmp);
if (rv != BCM_E_NONE) {
printf("Error, in bcm_vlan_port_add, vlan=%d, \n", vlan);
return rv;
}
if (second_unit >= 0)
{
printf("open vlan %d\n", mpls_lsr_info_1.in_vid);
rv = bcm_vlan_create(second_unit,mpls_lsr_info_1.in_vid);
print rv;
BCM_PBMP_CLEAR(pbmp);
BCM_PBMP_PORT_ADD(pbmp, mpls_lsr_info_1.eg_port);
rv = bcm_vlan_port_add(unit, mpls_lsr_info_1.in_vid, pbmp, pbmp);
if (rv != BCM_E_NONE) {
printf("Error, in bcm_vlan_port_add, vlan=%d, \n", vlan);
return rv;
}
}
/* l2 termination for mpls routing: packet received on those VID MAC will be L2 terminated */
intf.vsi = mpls_lsr_info_1.in_vid;
intf.my_global_mac = mpls_lsr_info_1.my_mac;
intf.my_lsb_mac = mpls_lsr_info_1.my_mac;
rv = l3__intf_rif__create(unit, &intf);
ingress_intf = intf.rif;
if (rv != BCM_E_NONE) {
printf("Error, in l3__intf_rif__create\n");
return rv;
}
if (second_unit >= 0)
{
rv = l3__intf_rif__create(second_unit, &intf);
ingress_intf = intf.rif;
if (rv != BCM_E_NONE) {
printf("Error, in l3__intf_rif__create\n");
return rv;
}
}
/* create ingress object, packet will be routed to */
intf.vsi = mpls_lsr_info_1.eg_vid;
rv = l3__intf_rif__create(unit, &intf);
ingress_intf = intf.rif;
if (rv != BCM_E_NONE) {
printf("Error, in l3__intf_rif__create\n");
return rv;
}
if (second_unit >= 0)
{
rv = l3__intf_rif__create(second_unit, &intf);
ingress_intf = intf.rif;
if (rv != BCM_E_NONE) {
printf("Error, in l3__intf_rif__create\n");
return rv;
}
}
/* set tunnel over this l3 interface, so packet forwarded to this interface will be tunneled */
bcm_mpls_egress_label_t_init(&label_array[0]);
bcm_mpls_egress_label_t_init(&label_array[1]);
label_array[0].flags = BCM_MPLS_EGRESS_LABEL_TTL_DECREMENT;
label_array[1].flags = BCM_MPLS_EGRESS_LABEL_TTL_DECREMENT;
if (!is_device_or_above(unit, ARAD_PLUS) || mpls_pipe_mode_exp_set) {
label_array[0].exp = 2;
label_array[1].exp = 4;
if (is_device_or_above(unit, JERICHO2)) {
label_array[0].egress_qos_model.egress_qos = bcmQosEgressModelPipeNextNameSpace;
label_array[1].egress_qos_model.egress_qos = bcmQosEgressModelPipeNextNameSpace;
} else {
label_array[0].flags |= BCM_MPLS_EGRESS_LABEL_EXP_SET;
label_array[1].flags |= BCM_MPLS_EGRESS_LABEL_EXP_SET;
}
} else {
if (is_device_or_above(unit, JERICHO2)) {
label_array[0].egress_qos_model.egress_qos = bcmQosEgressModelUniform;
label_array[1].egress_qos_model.egress_qos = bcmQosEgressModelUniform;
} else {
label_array[0].flags |= BCM_MPLS_EGRESS_LABEL_EXP_COPY;
label_array[1].flags |= BCM_MPLS_EGRESS_LABEL_EXP_COPY;
}
}
if (is_device_or_above(unit, JERICHO2)) {
label_array[0].egress_qos_model.egress_ttl = bcmQosEgressModelPipeMyNameSpace;
label_array[1].egress_qos_model.egress_ttl = bcmQosEgressModelPipeMyNameSpace;
} else {
label_array[0].flags |= BCM_MPLS_EGRESS_LABEL_TTL_SET;
label_array[1].flags |= BCM_MPLS_EGRESS_LABEL_TTL_SET;
}
label_array[0].label = vswitch_vpls_info_1.in_tunnel_label;
label_array[0].ttl = 20;
label_array[1].label = vswitch_vpls_info_1.out_tunnel_label;
label_array[1].ttl = 40;
if (!is_device_or_above(unit, JERICHO2)) {
label_array[1].l3_intf_id = ingress_intf;
rv = bcm_mpls_tunnel_initiator_create(unit, 0, 2, label_array);
} else {
/** In JR2, l3_intf_id is used for next-outlif. It must be LIF type.*/
label_array[1].l3_intf_id = 0;
/** To avoid failing leagcy tests, we call sand API in JR2 only.*/
rv = sand_mpls_tunnel_initiator_create(unit, 0, 2, label_array);
}
if (rv != BCM_E_NONE) {
printf("Error, in bcm_mpls_tunnel_initiator_create\n");
return rv;
}
ingress_intf = label_array[0].tunnel_id;
vswitch_vpls_info_1.tunnel_id = ingress_intf;
mpls_tunnel_encap_id1 = ingress_intf = label_array[0].tunnel_id;
if (second_unit >= 0) {
if (!is_device_or_above(unit, JERICHO2)) {
rv = bcm_mpls_tunnel_initiator_create(second_unit, 0, 2, label_array);
} else {
/** To avoid failing leagcy tests, we call sand API in JR2 only.*/
rv = sand_mpls_tunnel_initiator_create(second_unit, 0, 2, label_array);
}
if (rv != BCM_E_NONE) {
printf("Error, in bcm_mpls_tunnel_initiator_create\n");
return rv;
}
}
flags = CINT_NO_FLAGS;
/* create egress object points to this tunnel/interface */
if (second_unit >= 0)
{
egress_intf = 0;
encap_id = 0;
create_l3_egress_s l3eg;
l3eg.out_tunnel_or_rif = ingress_intf;
sal_memcpy(l3eg.next_hop_mac_addr, mpls_lsr_info_1.next_hop_mac, 6);
l3eg.vlan = mpls_lsr_info_1.eg_vid;
l3eg.arp_encap_id = encap_id;
l3eg.fec_id = egress_intf;
l3eg.out_gport = mpls_lsr_info_1.eg_port;
rv = l3__egress__create(unit,&l3eg);
if (rv != BCM_E_NONE) {
printf("Error, in l3__egress__create\n");
return rv;
}
l3eg.allocation_flags = BCM_L3_WITH_ID;
rv = l3__egress__create(second_unit, &l3eg);
if (rv != BCM_E_NONE) {
printf("Error, in create_l3_egress\n");
return rv;
}
encap_id = l3eg.arp_encap_id;
egress_intf = l3eg.fec_id;
}
else
{
create_l3_egress_s l3eg;
egress_intf = 0;
encap_id = 0;
l3eg.out_tunnel_or_rif = ingress_intf;
sal_memcpy(l3eg.next_hop_mac_addr, mpls_lsr_info_1.next_hop_mac, 6);
l3eg.vlan = mpls_lsr_info_1.eg_vid;
l3eg.arp_encap_id = encap_id;
l3eg.fec_id = egress_intf;
l3eg.out_gport = mpls_lsr_info_1.eg_port;
rv = l3__egress__create(unit,&l3eg);
if (rv != BCM_E_NONE) {
printf("Error, in l3__egress__create\n");
return rv;
}
encap_id = l3eg.arp_encap_id;
egress_intf = l3eg.fec_id;
}
mpls_lsr_info_1.encap_id = encap_id;
/* add switch entries to pop MPLS labels */
label_for_pop_entry_out = vswitch_vpls_info_1.out_tunnel_label;
label_for_pop_entry_in = vswitch_vpls_info_1.in_tunnel_label;
rv = mpls_add_pop_entry(unit, label_for_pop_entry_out);
if (rv != BCM_E_NONE) {
printf("Error, in mpls_add_pop_entry\n");
return rv;
}
if (second_unit >= 0)
{
rv = mpls_add_pop_entry(second_unit,label_for_pop_entry_out);
if (rv != BCM_E_NONE) {
printf("Error, in mpls_add_pop_entry on second unit\n");
return rv;
}
}
rv = mpls_add_pop_entry(unit,label_for_pop_entry_in);
if (rv != BCM_E_NONE) {
printf("Error, in mpls_add_pop_entry\n");
return rv;
}
if (second_unit >= 0)
{
rv = mpls_add_pop_entry(second_unit,label_for_pop_entry_in);
if (rv != BCM_E_NONE) {
printf("Error, in mpls_add_pop_entry\n");
return rv;
}
}
vswitch_vpls_shared_info_1.egress_intf = egress_intf;
/* if extend example do the same for the second tunnel */
if (extend_example) {
label_for_pop_entry_out = vswitch_vpls_info_2.out_tunnel_label;
label_for_pop_entry_in = vswitch_vpls_info_2.in_tunnel_label;
rv = mpls_add_pop_entry(unit,label_for_pop_entry_out);
if (rv != BCM_E_NONE) {
printf("Error, in mpls_add_pop_entry2\n");
return rv;
}
if (second_unit >= 0)
{
rv = mpls_add_pop_entry(second_unit,label_for_pop_entry_out);
if (rv != BCM_E_NONE) {
printf("Error, in mpls_add_pop_entry2 second unit\n");
return rv;
}
}
rv = mpls_add_pop_entry(unit,label_for_pop_entry_in);
if (rv != BCM_E_NONE) {
printf("Error, in mpls_add_pop_entry2\n");
return rv;
}
if (second_unit >= 0)
{
rv = mpls_add_pop_entry(second_unit,label_for_pop_entry_in);
if (rv != BCM_E_NONE) {
printf("Error, in mpls_add_pop_entry2\n");
return rv;
}
}
/* create ingress object, packet will be routed to */
intf.vsi = mpls_lsr_info_1.in_vid;
intf.my_global_mac = mpls_lsr_info_1.my_mac;
intf.my_lsb_mac = mpls_lsr_info_1.my_mac;
rv = l3__intf_rif__create(unit, &intf);
ingress_intf = intf.rif;
if (rv != BCM_E_NONE) {
printf("Error, in l3__intf_rif__create\n");
return rv;
}
if (second_unit >= 0)
{
rv = l3__intf_rif__create(second_unit, &intf);
ingress_intf = intf.rif;
if (rv != BCM_E_NONE) {
printf("Error, in l3__intf_rif__create\n");
return rv;
}
}
/* set tunnel over this l3 interface, so packet forwarded to this interface will be tunneled */
bcm_mpls_egress_label_t_init(&label_array[0]);
bcm_mpls_egress_label_t_init(&label_array[1]);
label_array[0].flags = BCM_MPLS_EGRESS_LABEL_TTL_DECREMENT;
label_array[1].flags = BCM_MPLS_EGRESS_LABEL_TTL_DECREMENT;
if (!is_device_or_above(unit, ARAD_PLUS) || mpls_pipe_mode_exp_set) {
label_array[0].exp = 2;
label_array[1].exp = 4;
if (is_device_or_above(unit, JERICHO2)) {
label_array[0].egress_qos_model.egress_qos = bcmQosEgressModelPipeNextNameSpace;
label_array[1].egress_qos_model.egress_qos = bcmQosEgressModelPipeNextNameSpace;
} else {
label_array[0].flags |= BCM_MPLS_EGRESS_LABEL_EXP_SET;
label_array[1].flags |= BCM_MPLS_EGRESS_LABEL_EXP_SET;
}
} else {
if (is_device_or_above(unit, JERICHO2)) {
label_array[0].egress_qos_model.egress_qos = bcmQosEgressModelUniform;
label_array[1].egress_qos_model.egress_qos = bcmQosEgressModelUniform;
} else {
label_array[0].flags |= BCM_MPLS_EGRESS_LABEL_EXP_COPY;
label_array[1].flags |= BCM_MPLS_EGRESS_LABEL_EXP_COPY;
}
}
if (is_device_or_above(unit, JERICHO2)) {
label_array[0].egress_qos_model.egress_ttl = bcmQosEgressModelPipeMyNameSpace;
label_array[1].egress_qos_model.egress_ttl = bcmQosEgressModelPipeMyNameSpace;
} else {
label_array[0].flags |= BCM_MPLS_EGRESS_LABEL_TTL_SET;
label_array[1].flags |= BCM_MPLS_EGRESS_LABEL_TTL_SET;
}
label_array[0].label = vswitch_vpls_info_2.in_tunnel_label;
label_array[0].ttl = 20;
label_array[1].label = vswitch_vpls_info_2.out_tunnel_label;
label_array[1].ttl = 40;
if (!is_device_or_above(unit, JERICHO2)) {
label_array[1].l3_intf_id = ingress_intf;
rv = bcm_mpls_tunnel_initiator_create(unit, 0, 2, label_array);
} else {
/** In JR2, l3_intf_id is used for next-outlif. It must be LIF type.*/
label_array[1].l3_intf_id = 0;
/** To avoid failing leagcy tests, we call sand API in JR2 only.*/
rv = sand_mpls_tunnel_initiator_create(unit, 0, 2, label_array);
}
second_mpls_intf = label_array[0].tunnel_id;
vswitch_vpls_info_2.tunnel_id = second_mpls_intf;
if (second_unit >= 0) {
if (!is_device_or_above(unit, JERICHO2)) {
rv = bcm_mpls_tunnel_initiator_create(second_unit, 0, 2, label_array);
} else {
/** To avoid failing leagcy tests, we call sand API in JR2 only.*/
rv = sand_mpls_tunnel_initiator_create(second_unit, 0, 2, label_array);
}
if (rv != BCM_E_NONE) {
printf("Error, in bcm_mpls_tunnel_initiator_create\n");
return rv;
}
}
/* create egress object points to this tunne/interface */
mpls_tunnel_encap_id2 = ingress_intf = label_array[0].tunnel_id;
if (second_unit >= 0)
{
create_l3_egress_s l3eg;
egress_intf = 0;
encap_id = 0;
l3eg.out_tunnel_or_rif = ingress_intf;
sal_memcpy(l3eg.next_hop_mac_addr,mpls_lsr_info_1.next_hop_mac, 6);
l3eg.vlan = mpls_lsr_info_1.eg_vid;
l3eg.arp_encap_id = encap_id;
l3eg.fec_id = egress_intf;
l3eg.out_gport = mpls_lsr_info_1.eg_port;
rv = l3__egress__create(unit,&l3eg);
if (rv != BCM_E_NONE) {
printf("Error, in l3__egress__create\n");
return rv;
}
l3eg.allocation_flags = BCM_L3_WITH_ID;
rv = l3__egress__create(second_unit,&l3eg);
if (rv != BCM_E_NONE) {
printf("Error, in l3__egress__create\n");
return rv;
}
encap_id = l3eg.arp_encap_id;
egress_intf = l3eg.fec_id;
}
else
{
create_l3_egress_s l3eg;
egress_intf = 0;
encap_id = 0;
l3eg.out_tunnel_or_rif = ingress_intf;
sal_memcpy(l3eg.next_hop_mac_addr, mpls_lsr_info_1.next_hop_mac, 6);
l3eg.vlan = mpls_lsr_info_1.eg_vid;
l3eg.arp_encap_id = encap_id;
l3eg.fec_id = egress_intf;
l3eg.out_gport = mpls_lsr_info_1.eg_port;
rv = l3__egress__create(unit,&l3eg);
if (rv != BCM_E_NONE) {
printf("Error, in l3__egress__create\n");
return rv;
}
encap_id = l3eg.arp_encap_id;
egress_intf = l3eg.fec_id;
}
vswitch_vpls_shared_info_1.egress_intf2 = egress_intf;
}
return rv;
}
/* add switch entry to perform pop
*/
int
mpls_add_pop_entry(int unit, int in_label)
{
int rv;
bcm_mpls_tunnel_switch_t entry;
mpls_termination_label_index_enable = soc_property_get(unit , "mpls_termination_label_index_enable",0);
/* indicate new VTT mode */
mpls_termination_label_index_database_mode = soc_property_get(unit , spn_BCM886XX_MPLS_TERMINATION_DATABASE_MODE, 0);
bcm_mpls_tunnel_switch_t_init(&entry);
entry.action = BCM_MPLS_SWITCH_ACTION_POP;
/* TTL decrement has to be present */
entry.flags = BCM_MPLS_SWITCH_TTL_DECREMENT;
/* Uniform: inherit TTL and EXP,
* in general valid options:
* both present (uniform) or none of them (Pipe)
*/
if (is_device_or_above(unit, JERICHO2)) {
entry.egress_label.egress_qos_model.egress_qos = bcmQosEgressModelUniform;
entry.egress_label.egress_qos_model.egress_ttl = bcmQosEgressModelUniform;
} else {
entry.flags |= BCM_MPLS_SWITCH_OUTER_TTL|BCM_MPLS_SWITCH_OUTER_EXP;
}
/* incoming label */
entry.label = in_label;
if (mpls_termination_label_index_enable) {
BCM_MPLS_INDEXED_LABEL_SET(entry.label, in_label, 1);
}
/* Enable when testing egress QOS, need to source cint_qos.c
* This remarks the mpls egress packet
*/
entry.qos_map_id = qos_map_id_mpls_ingress_get(unit);
/* egress attributes */
/* none as it just pop */
rv = bcm_mpls_tunnel_switch_create(unit,&entry);
if (rv != BCM_E_NONE) {
printf("Error, in bcm_mpls_tunnel_switch_create\n");
return rv;
}
vswitch_vpls_pairs.id1 = entry.tunnel_id ;
vswitch_vpls_pairs.label1 = entry.label ;
return rv;
}
/*
* Fills the apropriate utility stucture with the common parameters
* port_num_call - indicates which port call is calling the function
*/
void
vswitch_vpls_common_port_properties_init(int unit, mpls_port__ingress_only_info_s *ingress_port, mpls_port__egress_only_info_s *egress_port, mpls_port_utils_s *default_port, int port_num_call){
int vc_label = vswitch_vpls_info_2.in_vc_label;
int egress_vc_label = port_num_call ? vswitch_vpls_info_2.eg_vc_label : vswitch_vpls_info_1.eg_vc_label;
int egress_label_inheritance_flags = BCM_MPLS_EGRESS_LABEL_TTL_COPY | BCM_MPLS_EGRESS_LABEL_EXP_COPY;
if(ingress_egress_separate_ports) {
if (ingress_failover) {
ingress_port->failover_id = ingress_failover;
ingress_port->failover_port_id = protected_pw_port_id;
BCM_MULTICAST_L2_SET(ingress_port->failover_mc_group,ingress_protection_group_id);
}
ingress_port->ingress_matching_criteria = BCM_MPLS_PORT_MATCH_LABEL;
ingress_port->port = port_num_call ? vswitch_vpls_info_2.pwe_in_port : vswitch_vpls_info_1.pwe_in_port;
ingress_port->vpn = vswitch_vpls_shared_info_1.vpn;
if(port_num_call) {
ingress_port->ingress_pwe_label = vc_label;
}
/* egress label: In ingress only used for learning
* In egress only used for pwe encpasulation information in EEDB multicast case
*/
ingress_port->learn_egress_label.label = egress_vc_label;
ingress_port->learn_egress_label.ttl = 10;
ingress_port->learn_egress_label.flags = BCM_MPLS_EGRESS_LABEL_TTL_SET;
egress_port->egress_label.label = egress_vc_label;
egress_port->egress_label.ttl = 10;
egress_port->egress_label.flags = BCM_MPLS_EGRESS_LABEL_TTL_SET;
default_port->failover_port_id = protected_pw_port_id;
BCM_MULTICAST_L2_SET(default_port->failover_mc_group,ingress_protection_group_id);
} else {
default_port->flags2 |= BCM_MPLS_PORT2_INGRESS_WIDE;
default_port->ingress_matching_criteria = BCM_MPLS_PORT_MATCH_LABEL;
default_port->port = port_num_call ? vswitch_vpls_info_2.pwe_in_port : vswitch_vpls_info_1.pwe_in_port;
default_port->vpn = vswitch_vpls_shared_info_1.vpn;
if(port_num_call) {
default_port->ingress_pwe_label = vc_label;
}
/* egress label: In ingress only used for learning
* In egress only used for pwe encpasulation information in EEDB multicast case
*/
default_port->egress_pwe_label = egress_vc_label;
default_port->ttl = 10;
default_port->egress_label_flags =0;
if (ingress_failover) {
default_port->failover_id = ingress_failover;
default_port->failover_port_id = protected_pw_port_id;
BCM_MULTICAST_L2_SET(default_port->failover_mc_group,ingress_protection_group_id);
}
}
}
/*
* Creates an ingress and egress PWE ports with parameters from the init function
* port_num_call - indicates which port call is calling the function
*/
int
vswitch_vpls_ingress_egress_separate_ports_create(int unit_or_second, mpls_port__ingress_only_info_s *ingress_port, mpls_port__egress_only_info_s *egress_port, int port_num_call ){
int rv;
/* Ingress_only */
ingress_port->learn_egress_if = port_num_call ? vswitch_vpls_shared_info_1.egress_intf2 : vswitch_vpls_shared_info_1.egress_intf;
if(!port_num_call) {
ingress_port->ingress_pwe_label = vswitch_vpls_info_1.in_vc_label;
}
/* WITH_ID flags is set in the utility, in case of ingress-egress configuration */
ingress_port->mpls_port_id = port_num_call ? 0x18805002 : 0x18805000;
ingress_port->encap_id = port_num_call ? 0x5002 : 0x5000;
ingress_port->failover_id = ingress_failover;
ingress_port->failover_port_id = protected_pw_port_id;
ingress_port->failover_mc_group = BCM_MULTICAST_L2_GET(ingress_protection_group_id);
rv = mpls_port__ingress_only_create(unit_or_second, ingress_port);
if (rv != BCM_E_NONE) {
printf("Error, mpls_port__ingress_only_create \n");
return rv;
}
if(verbose1){
bcm_gport_t m_port_id = ingress_port->mpls_port_id;
bcm_if_t enc_id = ingress_port->encap_id;
printf("INGRESS: add port 0x%08x to vpn \n\r",m_port_id);
printf("ENCAP ID: %d \n\r",enc_id);
}
if(!port_num_call) {
mpls_lsr_info_1.mpls_port_id = ingress_port->mpls_port_id;
}
/* Egress_only */
egress_port->mpls_port_id = ingress_port->mpls_port_id;
egress_port->encap_id = ingress_port->encap_id;
egress_port->egress_tunnel_if = vswitch_vpls_info_1.tunnel_id;
egress_port->encap_optimized = 0;
rv = mpls_port__egress_only_create(unit_or_second, egress_port);
if (rv != BCM_E_NONE) {
printf("Error, mpls_port__egress_only_create \n");
return rv;
}
protected_pw_port_id = egress_port->mpls_port_id;
return rv;
}
/*
* Creates a normal PWE port with parameters from the init function
* port_num_call - indicates which port call is calling the function
*/
int
vswitch_vpls_default_port_create(int unit_or_second, mpls_port_utils_s *default_mpls_port, int port_num_call){
int rv;
if(!port_num_call) {
default_mpls_port->ingress_pwe_label = vswitch_vpls_info_1.in_vc_label;
}
default_mpls_port->egress_mpls_tunnel_id = port_num_call ? vswitch_vpls_shared_info_1.egress_intf2 : vswitch_vpls_shared_info_1.egress_intf;
rv = mpls_port__mp_create_ingress_egress_mpls_port(unit_or_second, default_mpls_port);
if (rv != BCM_E_NONE) {
printf("Error, mpls_port__mp_create_ingress_egress_mpls_port \n");
return rv;
}
protected_pw_port_id = default_mpls_port->mpls_port_id;
return rv;
}
/*
* create mpls-port
* vlan-port: is Logical interface identified by (port-vlan-vlan).
*/
int
vswitch_vpls_add_network_port_1(int unit, int second_unit, bcm_gport_t *port_id){
int rv;
bcm_mpls_port_t mpls_port_1;
mpls_port__ingress_only_info_s ingress_port;
mpls_port__egress_only_info_s egress_port;
mpls_port_utils_s default_mpls_port;
int unit_or_second = unit;
int is_two_dev = 0;
int port;
bcm_gport_t egress_mpls_port_id;
bcm_if_t last_encap_id;
if (second_unit >= 0)
{
unit_or_second = second_unit;
is_two_dev = 1;
}
/* Initialize the common properties of ingress/egress/joined PWE ports */
vswitch_vpls_common_port_properties_init(unit_or_second, &ingress_port, &egress_port, &default_mpls_port, 0/*Which port calls the funcion*/);
if (ingress_egress_separate_ports) {
rv = vswitch_vpls_ingress_egress_separate_ports_create(unit_or_second,&ingress_port,&egress_port, 0/*Which port calls the funcion*/);
if (rv != BCM_E_NONE) {
printf("Error, vswitch_vpls_ingress_egress_separate_ports_create\n");
return rv;
}
/* handle of the created gport */
*port_id = ingress_port.mpls_port_id;
printf("EGRESS: add port 0x%08x to vpn \n\r",egress_port.mpls_port_id);
egress_mpls_port_id = egress_port.mpls_port_id;
exported_egress_mpls_port_id = egress_port.mpls_port_id;
if (is_device_or_above(unit, JERICHO2)) {
/** In JR2, egress_only flag is hoped in add egress mpls_port to mc group.*/
BCM_GPORT_SUB_TYPE_LIF_SET(exported_egress_mpls_port_id, BCM_GPORT_SUB_TYPE_LIF_EXC_EGRESS_ONLY, egress_mpls_port_id);
BCM_GPORT_MPLS_PORT_ID_SET(exported_egress_mpls_port_id, exported_egress_mpls_port_id);
}
} else {
rv = vswitch_vpls_default_port_create(unit_or_second,&default_mpls_port,0/*Which port calls the funcion*/);
if (rv != BCM_E_NONE) {
printf("Error, vswitch_vpls_default_port_create\n");
return rv;
}
if(verbose1){
printf("add port 0x%08x to vpn \n\r",default_mpls_port.mpls_port_id);
}
vswitch_vpls_pairs.id2 = default_mpls_port.mpls_port_id ;
vswitch_vpls_pairs.label2 = default_mpls_port.ingress_pwe_label ;
mpls_lsr_info_1.mpls_port_id = default_mpls_port.mpls_port_id;
/* handle of the created gport */
*port_id = egress_mpls_port_id = default_mpls_port.mpls_port_id;
if (is_device_or_above(unit, JERICHO2)) {
/** In JR2, egress_only flag is hoped in add egress mpls_port to mc group.*/
BCM_GPORT_SUB_TYPE_LIF_SET(egress_mpls_port_id, BCM_GPORT_SUB_TYPE_LIF_EXC_EGRESS_ONLY, egress_mpls_port_id);
BCM_GPORT_MPLS_PORT_ID_SET(egress_mpls_port_id, egress_mpls_port_id);
}
}
last_encap_id = ingress_egress_separate_ports ? egress_port.encap_id : default_mpls_port.encap_id;
BCM_L3_ITF_SET(pwe_encap_id, BCM_L3_ITF_TYPE_LIF, last_encap_id);
if (is_device_or_above(unit, JERICHO2)) {
/**In JR2, we record the egress mpls_port_id for using later.*/
pwe_encap_id = egress_mpls_port_id;
}
if (is_two_dev)
{
mpls_port_1.flags |= BCM_MPLS_PORT_WITH_ID;
if (is_device_or_above(unit, JERICHO2)) {
rv = bcm_mpls_port_add(unit, vswitch_vpls_shared_info_1.vpn, &mpls_port_1);
} else {
rv = sand_mpls_port_add(unit, vswitch_vpls_shared_info_1.vpn, &mpls_port_1);
}
if (rv != BCM_E_NONE) {
printf("Error, bcm_mpls_port_add two devices\n");
return rv;
}
}
mpls_port_encap_id1 = last_encap_id;
if(verbose1){
printf("mpls_port_encap_id1: 0x%08x\n\r",mpls_port_encap_id1);
}
/* update Multicast to have the added port */
port = vswitch_vpls_info_1.pwe_in_port;
/* Adding egress mpls port to multicast because when egress and ingress are configured separately the multicast should point to the outlif */
rv = multicast__mpls_port_add(unit_or_second, vswitch_vpls_shared_info_1.vpn, port, egress_mpls_port_id, 0);
if (rv != BCM_E_NONE) {
printf("Error, multicast__mpls_port_add\n");
return rv;
}
/*Adding port to protection group*/
rv = multicast__mpls_port_add(unit_or_second, ingress_protection_group_id, port, egress_mpls_port_id, 0);
if (rv != BCM_E_NONE) {
printf("Adding port to protection group Error, multicast__mpls_port_add\n");
return rv;
}
if (is_two_dev)
{
rv = multicast__mpls_port_add(unit, vswitch_vpls_shared_info_1.vpn, port, egress_mpls_port_id, egress_mc);
if (rv != BCM_E_NONE) {
printf("Error, multicast__mpls_port_add\n");
return rv;
}
rv = multicast__mpls_port_add(unit, ingress_protection_group_id, port, egress_mpls_port_id, egress_mc);
if (rv != BCM_E_NONE) {
printf("Adding port to protection group Error, multicast__mpls_port_add\n");
return rv;
}
}
if(verbose1){
printf("add port 0x%08x to multicast \n\r",egress_mpls_port_id);
}
return rv;
}
int
vswitch_vpls_add_network_port_2(int unit, bcm_gport_t *port_id){
int rv;
bcm_mpls_port_t mpls_port_1;
mpls_port__ingress_only_info_s ingress_port;
mpls_port__egress_only_info_s egress_port;
mpls_port_utils_s default_mpls_port;
bcm_gport_t egress_mpls_port_id;
int vc_label;
vswitch_vpls_common_port_properties_init(unit, &ingress_port, &egress_port, &default_mpls_port, 1/*Which port calls the funcion*/);
if (ingress_egress_separate_ports) {
rv = vswitch_vpls_ingress_egress_separate_ports_create(unit,&ingress_port,&egress_port, 1/*Which port calls the funcion*/);
if (rv != BCM_E_NONE) {
printf("Error, vswitch_vpls_ingress_egress_separate_ports_create\n");
return rv;
}
/* handle of the created gport */
*port_id = ingress_port.mpls_port_id;
printf("EGRESS: add port 0x%08x to vpn \n\r",egress_port.mpls_port_id);
egress_mpls_port_id = egress_port.mpls_port_id;
if (is_device_or_above(unit, JERICHO2)) {
/** In JR2, egress_only flag is hoped in add egress mpls_port to mc group.*/
BCM_GPORT_SUB_TYPE_LIF_SET(egress_mpls_port_id, BCM_GPORT_SUB_TYPE_LIF_EXC_EGRESS_ONLY, egress_mpls_port_id);
BCM_GPORT_MPLS_PORT_ID_SET(egress_mpls_port_id, egress_mpls_port_id);
}
} else {
rv = vswitch_vpls_default_port_create(unit,&default_mpls_port,1/*Which port calls the funcion*/);
if (rv != BCM_E_NONE) {
printf("Error, vswitch_vpls_default_port_create\n");
return rv;
}
if(verbose1){
printf("add port 0x%08x to vpn \n\r",default_mpls_port.mpls_port_id);
}
/* handle of the created gport */
*port_id = egress_mpls_port_id = default_mpls_port.mpls_port_id;
if (is_device_or_above(unit, JERICHO2)) {
/** In JR2, egress_only flag is hoped in add egress mpls_port to mc group.*/
BCM_GPORT_SUB_TYPE_LIF_SET(egress_mpls_port_id, BCM_GPORT_SUB_TYPE_LIF_EXC_EGRESS_ONLY, egress_mpls_port_id);
BCM_GPORT_MPLS_PORT_ID_SET(egress_mpls_port_id, egress_mpls_port_id);
}
}
mpls_port_encap_id2 = ingress_egress_separate_ports ? egress_port.encap_id : default_mpls_port.encap_id;
if(verbose1){
printf("mpls_port_encap_id2: 0x%08x\n\r",mpls_port_encap_id2);
}
/* update Multicast to have the added port */
rv = multicast__mpls_port_add(unit, vswitch_vpls_shared_info_1.vpn, vswitch_vpls_info_2.pwe_in_port , egress_mpls_port_id, egress_mc);
if (rv != BCM_E_NONE) {
printf("Error, multicast__mpls_port_add\n");
return rv;
}
/*Adding port to protection group*/
rv = multicast__mpls_port_add(unit, ingress_protection_group_id, vswitch_vpls_info_2.pwe_in_port , egress_mpls_port_id, egress_mc);
if (rv != BCM_E_NONE) {
printf("Adding port to protection group Error, multicast__mpls_port_add\n");
return rv;
}
if(verbose1){
printf("add port 0x%08x to multicast \n\r",egress_mpls_port_id);
}
return rv;
}
/*
* create vlan-port
* vlan-port: is Logical interface identified by (port-vlan-vlan).
*/
int
vswitch_vpls_add_access_port_1(int unit, int second_unit, bcm_gport_t *port_id){
int rv;
bcm_vlan_port_t vlan_port_1;
int unit_or_second = unit;
int is_two_dev = 0;
int port;
port = vswitch_vpls_info_1.ac_in_port;
if (second_unit >= 0)
{
unit_or_second = second_unit;
is_two_dev = 1;
}
/* add port, according to port_vlan_vlan */
bcm_vlan_port_t_init(&vlan_port_1);
/* set port attribures, key <port-vlan-vlan>*/
vlan_port_1.criteria = BCM_VLAN_PORT_MATCH_PORT_VLAN_STACKED;
vlan_port_1.port = vswitch_vpls_info_1.ac_in_port;
vlan_port_1.match_vlan = vswitch_vpls_info_1.ac_port1_outer_vlan;
vlan_port_1.match_inner_vlan = vswitch_vpls_info_1.ac_port1_inner_vlan;
vlan_port_1.egress_vlan = is_device_or_above(unit, JERICHO2) ? 0 : vswitch_vpls_info_1.ac_port1_outer_vlan;
vlan_port_1.egress_inner_vlan = is_device_or_above(unit, JERICHO2) ? 0 : vswitch_vpls_info_1.ac_port1_inner_vlan;
vlan_port_1.flags = vswitch_vpls_info_1.ac_port1_flags;
rv = bcm_vlan_port_create(unit, &vlan_port_1);
if (rv != BCM_E_NONE) {
printf("Error, bcm_vlan_port_create\n");
return rv;
}
*port_id = vlan_port_1.vlan_port_id;
/* egress_vlan and egress_inner_vlan will be used at eve */
vlan_port_1.egress_vlan = vswitch_vpls_info_1.ac_port1_outer_vlan;
vlan_port_1.egress_inner_vlan = vswitch_vpls_info_1.ac_port1_inner_vlan;
vlan_port_encap_id1 = vlan_port_1.encap_id;
if(verbose1){
printf("vlan_port_encap_id1: 0x%08x\n\r",vlan_port_encap_id1);
}
/* In advanced vlan translation mode, bcm_vlan_port_create does not create ingress / egress
action mapping. This is here to compensate. */
if (advanced_vlan_translation_mode) {
rv = vlan_translation_vlan_port_create_to_translation(unit, &vlan_port_1);
if (rv != BCM_E_NONE) {
printf("Error: vlan_translation_vlan_port_create_to_translation\n");
}
}
rv = bcm_vswitch_port_add(unit, vswitch_vpls_shared_info_1.vpn, vlan_port_1.vlan_port_id);
if (rv != BCM_E_NONE) {
printf("Error, bcm_vswitch_port_add\n");
return rv;
}
if (is_two_dev)
{
vlan_port_1.flags |= BCM_VLAN_PORT_WITH_ID;
rv = bcm_vlan_port_create(second_unit, &vlan_port_1);
if (rv != BCM_E_NONE) {
printf("Error, bcm_vlan_port_add\n");
return rv;
}
/* In advanced vlan translation mode, bcm_vlan_port_create does not create ingress / egress
action mapping. This is here to compensate. */
if (advanced_vlan_translation_mode) {
rv = vlan_translation_vlan_port_create_to_translation(second_unit, &vlan_port_1);
if (rv != BCM_E_NONE) {
printf("Error: vlan_translation_vlan_port_create_to_translation\n");
}
}
rv = bcm_vswitch_port_add(second_unit, vswitch_vpls_shared_info_1.vpn, vlan_port_1.vlan_port_id);
if (rv != BCM_E_NONE) {
printf("Error, bcm_vswitch_port_add\n");
return rv;
}
}
/* update Multicast to have the added port */
rv = multicast__vlan_port_add(unit, vswitch_vpls_info_1.vpn_id, port , vlan_port_1.vlan_port_id, egress_mc);
if (rv != BCM_E_NONE) {
printf("Error, multicast__vlan_port_add\n");
return rv;
}
if (is_two_dev)
{
rv = multicast__vlan_port_add(second_unit, vswitch_vpls_info_1.vpn_id, port , vlan_port_1.vlan_port_id, egress_mc);
if (rv != BCM_E_NONE) {
printf("Error, multicast__vlan_port_add\n");
return rv;
}
}
if(verbose1){
printf("add port 0x%08x to multicast \n\r",vlan_port_1.vlan_port_id);
}
return rv;
}
int
vswitch_vpls_add_access_port_2(int unit, bcm_gport_t *port_id){
int rv;
bcm_vlan_port_t vlan_port_1;
/* add port, according to port_vlan_vlan */
bcm_vlan_port_t_init(&vlan_port_1);
/* set port attribures, key <port-vlan-vlan>*/
vlan_port_1.criteria = BCM_VLAN_PORT_MATCH_PORT_VLAN_STACKED;
vlan_port_1.port = vswitch_vpls_info_2.ac_in_port;
vlan_port_1.match_vlan = vswitch_vpls_info_1.ac_port2_outer_vlan;;
vlan_port_1.match_inner_vlan = vswitch_vpls_info_1.ac_port2_inner_vlan; ;
vlan_port_1.egress_vlan = is_device_or_above(unit, JERICHO2) ? 0 : vswitch_vpls_info_1.ac_port2_outer_vlan;
vlan_port_1.egress_inner_vlan = is_device_or_above(unit, JERICHO2) ? 0 : vswitch_vpls_info_1.ac_port2_inner_vlan;
vlan_port_1.flags = vswitch_vpls_info_1.ac_port2_flags;
rv = bcm_vlan_port_create(unit, &vlan_port_1);
if (rv != BCM_E_NONE) {
printf("Error, bcm_vlan_port_add\n");
return rv;
}
rv = bcm_vswitch_port_add(unit, vswitch_vpls_shared_info_1.vpn, vlan_port_1.vlan_port_id);
if (rv != BCM_E_NONE) {
printf("Error, bcm_vswitch_port_add\n");
return rv;
}
/* In advanced vlan translation mode, bcm_vlan_port_create does not create ingress / egress
action mapping. This is here to compensate. */
if (advanced_vlan_translation_mode) {
rv = vlan_translation_vlan_port_create_to_translation(unit, &vlan_port_1);
if (rv != BCM_E_NONE) {
printf("Error: vlan_translation_vlan_port_create_to_translation\n");
}
}
vlan_port_encap_id2 = vlan_port_1.encap_id;
if(verbose1){
printf("vlan_port_encap_id2: 0x%08x\n\r",vlan_port_encap_id2);
}
*port_id = vlan_port_1.vlan_port_id;
/* update Multicast to have the added port */
rv = multicast__vlan_port_add(unit, vswitch_vpls_info_1.vpn_id, vswitch_vpls_info_2.ac_in_port , vlan_port_1.vlan_port_id, egress_mc);
if (rv != BCM_E_NONE) {
printf("Error, multicast__vlan_port_add\n");
return rv;
}
if(verbose1){
printf("add port 0x%08x to multicast \n\r",vlan_port_1.vlan_port_id);
}
return rv;
}
int
vswitch_vlan_init(int unit, int second_unit, int extend_example){
int rv;
int unit_or_second = second_unit;
if (second_unit < 0)
{
unit_or_second = unit;
}
advanced_vlan_translation_mode |= is_device_or_above(unit, JERICHO2);
/** Configurate port TPIDs for JR1. For JR2, use system default configuration.*/
if (!is_device_or_above(unit, JERICHO2)) {
printf("vswitch_vlan_init %d\n", vswitch_vpls_info_1.vpn_id);
if ((mpls_termination_label_index_enable == 1) &&
((mpls_termination_label_index_database_mode == 22) || (mpls_termination_label_index_database_mode == 23))) {
port_tpid_init(vswitch_vpls_info_1.pwe_in_port,1,1);
} else {
port_tpid_init(vswitch_vpls_info_1.pwe_in_port,1,0);
}
rv = port_tpid_set(unit_or_second);
if (rv != BCM_E_NONE) {
printf("Error, port_tpid_set\n");
return rv;
}
port_tpid_init(vswitch_vpls_info_1.ac_in_port,1,1);
rv = port_tpid_set(unit);
if (rv != BCM_E_NONE) {
printf("Error, port_tpid_set\n");
return rv;
}
}
/* In advanced vlan translation mode, the default ingress/ egress actions and mapping
are not configured. This is here to compensate. */
if (advanced_vlan_translation_mode) {
rv = vlan_translation_default_mode_init(unit);
if (rv != BCM_E_NONE) {
printf("Error, in vlan_translation_default_mode_init\n");
return rv;
}
}
/* Init vlan */
vlan__init_vlan(unit,vswitch_vpls_info_1.ac_port1_outer_vlan);
vlan__init_vlan(unit,vswitch_vpls_info_1.ac_port1_inner_vlan);
vlan__init_vlan(unit,vswitch_vpls_info_1.ac_port2_outer_vlan);
vlan__init_vlan(unit,vswitch_vpls_info_1.ac_port2_inner_vlan);
if (second_unit >= 0)
{
vlan__init_vlan(second_unit,vswitch_vpls_info_1.ac_port1_outer_vlan);
vlan__init_vlan(second_unit,vswitch_vpls_info_1.ac_port1_inner_vlan);
vlan__init_vlan(second_unit,vswitch_vpls_info_1.ac_port2_outer_vlan);
vlan__init_vlan(second_unit,vswitch_vpls_info_1.ac_port2_inner_vlan);
}
if (extend_example) {
/** Configurate port TPIDs for JR1. For JR2, use system default configuration.*/
if (!is_device_or_above(unit, JERICHO2)) {
/* define one more pwe_in_port and one more ac_in_port */
if ((mpls_termination_label_index_enable == 1) &&
((mpls_termination_label_index_database_mode == 22) || (mpls_termination_label_index_database_mode == 23))) {
port_tpid_init(vswitch_vpls_info_2.pwe_in_port,1,1);
} else {
port_tpid_init(vswitch_vpls_info_2.pwe_in_port,1,0);
}
rv = port_tpid_set(unit_or_second);
if (rv != BCM_E_NONE) {
printf("Error, port_tpid_set\n");
return rv;
}
port_tpid_init(vswitch_vpls_info_2.ac_in_port,1,1);
rv = port_tpid_set(unit);
if (rv != BCM_E_NONE) {
printf("Error, port_tpid_set\n");
return rv;
}
}
/* In advanced vlan translation mode, the default ingress/ egress actions and mapping
are not configured. This is here to compensate. */
if (advanced_vlan_translation_mode) {
rv = vlan_translation_default_mode_init(unit);
if (rv != BCM_E_NONE) {
printf("Error, in vlan_translation_default_mode_init\n");
return rv;
}
}
}
}
int
vswitch_vpls_run(int unit, int second_unit, int extend_example){
bcm_gport_t access_port_id2;
bcm_gport_t network_port_id2;
bcm_mpls_vpn_config_t vpn_info;
bcm_mpls_port_t mpls_port_1;
bcm_mpls_port_t mpls_port_2;
bcm_vlan_t vpn;
int rv;
int unit_or_second = second_unit;
bcm_multicast_replication_t reps[5] = {{0}};
vswitch_vlan_init(unit,second_unit,extend_example);
if (second_unit < 0)
{
unit_or_second = unit;
}
printf("vswitch_vpls_run configure mpls tunnels\n");
/* configure MPLS tunnels */
rv = mpls_tunnels_config(unit, second_unit, extend_example);
if (rv != BCM_E_NONE) {
printf("Error, in mpls_tunnels_config\n");
return rv;
}
/* create vswitch */
rv = mpls_port__vswitch_vpls_vpn_create__set(unit, ingress_protection_group_id);
if (rv != BCM_E_NONE) {
printf("Create protection multicast group Error, in mpls_port__vswitch_vpls_vpn_create__set\n");
return rv;
}
if (second_unit >= 0)
{
rv = mpls_port__vswitch_vpls_vpn_create__set(second_unit, ingress_protection_group_id);
if (rv != BCM_E_NONE) {
printf("Create protection multicast group Error, in mpls_port__vswitch_vpls_vpn_create__set\n");
return rv;
}
}
printf("vswitch_vpls_run create vswitch\n");
/* create vswitch */
rv = mpls_port__vswitch_vpls_vpn_create__set(unit, vswitch_vpls_info_1.vpn_id);
if (rv != BCM_E_NONE) {
printf("Error, in mpls_port__vswitch_vpls_vpn_create__set\n");
return rv;
}
if (second_unit >= 0)
{
rv = mpls_port__vswitch_vpls_vpn_create__set(second_unit, vswitch_vpls_info_1.vpn_id);
if (rv != BCM_E_NONE) {
printf("Error, in mpls_port__vswitch_vpls_vpn_create__set\n");
return rv;
}
}
/* create failover id for MPLS */
rv = bcm_failover_create(unit, BCM_FAILOVER_INGRESS, &ingress_failover);
if (rv != BCM_E_NONE) {
printf("Error, bcm_failover_create for Ingress protection, rv - %d\n", rv);
return rv;
}
printf("Ingress Failover id: 0x%x\n", ingress_failover);
printf("vswitch_vpls_run add vlan access port\n");
/* add mpls access port */
rv = vswitch_vpls_add_access_port_1(unit, second_unit, &vswitch_vpls_info_1.access_port_id);
if (rv != BCM_E_NONE) {
printf("Error, in vswitch_vpls_add_access_port_1\n");
return rv;
}
printf("Added access port\n");
printf("vswitch_vpls_run add mpls network port\n");
/* add mpls network port */
rv = vswitch_vpls_add_network_port_1(unit, second_unit, &network_port_id);
if (rv != BCM_E_NONE) {
printf("Error, in vswitch_vpls_add_network_port_1\n");
return rv;
}
printf("Added network port\n");
/* add mpls access port */
rv = vswitch_vpls_add_access_port_2(unit_or_second, &access_port_id2);
if (rv != BCM_E_NONE) {
printf("Error, in vswitch_vpls_add_access_port_2\n");
return rv;
}
/* add mpls network port */
rv = vswitch_vpls_add_network_port_2(unit, &network_port_id2);
if (rv != BCM_E_NONE) {
printf("Error, in vswitch_vpls_add_network_port_2\n");
return rv;
}
return rv;
}
/* Switch PWE1 from mpls tunnel (1000,1002) to the other (300,400) that was prevously configured for PWE2.
This configures only the multicast traffic to go throught the new tunnel, unicast traffic is not effected. */
int
switch_pwe_tunnel(int unit) {
int rv = BCM_E_NONE;
bcm_mpls_egress_label_t label_array[1];
int label_count;
rv = bcm_mpls_tunnel_initiator_get(unit, pwe_encap_id, 1, &label_array, &label_count);
if (rv != BCM_E_NONE) {
printf("Error, in bcm_mpls_tunnel_initiator_get\n");
return rv;
}
label_array[0].flags |= BCM_MPLS_EGRESS_LABEL_WITH_ID | BCM_MPLS_EGRESS_LABEL_REPLACE;
label_array[0].tunnel_id = pwe_encap_id;
label_array[0].l3_intf_id = second_mpls_intf;
rv = bcm_mpls_tunnel_initiator_create(unit, 0, 1, label_array);
if (rv != BCM_E_NONE) {
printf("Error, in bcm_mpls_tunnel_initiator_create\n");
return rv;
}
return rv;
}
/* Delete PWE1, all the traffic should go throuth PWE2 (termination) */
int
delete_pwe_lif(int unit) {
int rv = BCM_E_NONE;
bcm_mpls_egress_label_t label_array[1];
int label_count;
rv = bcm_mpls_port_delete(unit, vswitch_vpls_shared_info_1.vpn, network_port_id);
if (rv != BCM_E_NONE) {
printf("Error, bcm_mpls_port_delete\n");
return rv;
}
return rv;
}
int
vswitch_vpls_fill_mact(int unit){
int rv = BCM_E_NONE;
bcm_mac_t ac_mac = {0x07,0x08,0x09,0x0A,0x0B,0x0C};
bcm_mac_t pwe_mac = {0x01,0x02,0x03,0x04,0x05,0x06};
rv = vswitch_add_l2_addr_to_gport(unit, vswitch_vpls_info_1.access_port_id, ac_mac, vswitch_vpls_shared_info_1.vpn);
if (rv != BCM_E_NONE) {
printf("Error, in vswitch_add_l2_addr_to_gport\n");
return rv;
}
rv = vswitch_add_l2_addr_to_gport(unit, network_port_id, pwe_mac, vswitch_vpls_shared_info_1.vpn);
if (rv != BCM_E_NONE) {
printf("Error, in vswitch_add_l2_addr_to_gport\n");
return rv;
}
return rv;
}
int vswitch_1plus1_double_tag_port_configuration(int unit, int acP, int pweP){
int rv = BCM_E_NONE;
/* read mpls index soc property */
mpls_termination_label_index_enable = soc_property_get(unit , "mpls_termination_label_index_enable",0);
mpls_termination_label_index_database_mode = soc_property_get(unit , spn_BCM886XX_MPLS_TERMINATION_DATABASE_MODE, 0);
if ((mpls_termination_label_index_enable == 1) &&
((mpls_termination_label_index_database_mode == 22) || (mpls_termination_label_index_database_mode == 23))) {
rv = bcm_vlan_control_port_set(unit, pweP, bcmVlanPortDoubleLookupEnable, 1);
if (rv != BCM_E_NONE) {
printf("(%s) \n",bcm_errmsg(rv));
return rv;
}
/* when a port is configured with "bcmVlanPortDoubleLookupEnable" the VLAN domain must be unique in the device */
rv = bcm_port_class_set(unit, pweP, bcmPortClassId, pweP);
if (rv != BCM_E_NONE) {
printf("Error, bcm_port_class_set (%s) \n",bcm_errmsg(rv));
return rv;
}
rv = bcm_vlan_control_port_set(unit, acP, bcmVlanPortDoubleLookupEnable, 1);
if (rv != BCM_E_NONE) {
printf("(%s) \n",bcm_errmsg(rv));
return rv;
}
/* when a port is configured with "bcmVlanPortDoubleLookupEnable" the VLAN domain must be unique in the device */
rv = bcm_port_class_set(unit, acP, bcmPortClassId, acP);
if (rv != BCM_E_NONE) {
printf("Error, bcm_port_class_set (%s) \n",bcm_errmsg(rv));
return rv;
}
}
return rv;
}
int
vswitch_vpls_1plus1_protection_run_with_defaults_dvapi(int unit, int acP, int pweP){
int second_unit = -1;
int extend_example = 1;
int rv = BCM_E_NONE;
rv = mpls__mpls_pipe_mode_exp_set(unit);
if (rv != BCM_E_NONE) {
printf("Error, in mpls__mpls_pipe_mode_exp_set\n");
return rv;
}
/* configure the port for double tag packets - this function relevant for termination mode 22-23 only */
rv = vswitch_1plus1_double_tag_port_configuration(unit,acP,pweP);
if (rv != BCM_E_NONE) {
printf("Error, in vswitch_1plus1_double_tag_port_configuration\n");
return rv;
}
vswitch_vlps_info_init(extend_example,acP,pweP,pweP,10,20,15,30,default_vpn_id);
return vswitch_vpls_run(unit, second_unit, extend_example);
}
/*
* This is an example of push profile allocation by user.
* bcm_mpls_port_add api is used in this cae to allocate push profile with id given by the user.
* This profile can be later used in bcm_mpls_port_add (PWE creation) and bcm_mpls_tunnel_initiator.
* To do this the proprties given here as function parameters should be configured in these apis.
*/
int
vswitch_vpls_allocate_push_profile(int unit, int exp, int ttl, int has_cw, int push_profile){
int rv = BCM_E_NONE;
bcm_mpls_port_t mpls_port_push;
bcm_mpls_port_t_init(&mpls_port_push);
mpls_port_push.flags = BCM_MPLS_PORT_WITH_ID;
mpls_port_push.mpls_port_id = push_profile;
mpls_port_push.egress_label.exp = exp;
mpls_port_push.egress_label.ttl = ttl;
mpls_port_push.flags |= has_cw ? BCM_MPLS_PORT_CONTROL_WORD : 0;
mpls_port_push.flags2 = BCM_MPLS_PORT2_TUNNEL_PUSH_INFO;
rv = bcm_mpls_port_add(unit, 0, &mpls_port_push);
if (rv != BCM_E_NONE) {
printf("Error, bcm_mpls_port_add\n");
return rv;
}
return rv;
}
int vswitch_vpls_1plus1_failover_set(int unit, int enable){
int rv;
rv = bcm_failover_set(unit, ingress_failover, enable);
if (rv != BCM_E_NONE) {
printf("Error, in bcm_failover_set failover_id: 0x%08x \n", failover_id);
return rv;
}
return rv;
}
int vswitch_vpls_pwe_ingress_only_path_selection(int unit,int acP, int pweP){
int rv;
mpls_termination_label_index_enable = soc_property_get(unit , "mpls_termination_label_index_enable",0);
mpls_termination_label_index_database_mode = soc_property_get(unit, spn_BCM886XX_MPLS_TERMINATION_DATABASE_MODE, 0);
vswitch_vlps_info_init(unit,acP,pweP,pweP,10,20,15,30,default_vpn_id);
vswitch_vlan_init(unit,-1,0);
printf("vswitch_vpls_run configure mpls tunnels\n");
/* configure MPLS tunnels */
rv = mpls_tunnels_config(unit, -1, 0);
if (rv != BCM_E_NONE) {
printf("Error, in mpls_tunnels_config\n");
return rv;
}
/* create failover id for MPLS */
rv = bcm_failover_create(unit, BCM_FAILOVER_INGRESS, &ingress_failover);
if (rv != BCM_E_NONE) {
printf("Error, bcm_failover_create for Ingress protection, rv - %d\n", rv);
return rv;
}
printf("Ingress Failover id: 0x%x\n", ingress_failover);
rv = mpls_port__vswitch_vpls_vpn_create__set(unit, default_vpn_id);
if (rv != BCM_E_NONE) {
printf("Create protection multicast group Error, in mpls_port__vswitch_vpls_vpn_create__set\n");
return rv;
}
bcm_gport_t access_port_id;
rv = vswitch_vpls_add_access_port_1(unit, -1, &access_port_id);
if (rv != BCM_E_NONE) {
printf("Error, in vswitch_vpls_add_access_port_1\n");
return rv;
}
printf("vlan port id: 0x%x\n", access_port_id);
bcm_mpls_port_t mpls_port;
bcm_mpls_port_t_init(&mpls_port);
mpls_port.criteria = BCM_MPLS_PORT_MATCH_LABEL;
mpls_port.flags |= BCM_MPLS_PORT_EGRESS_TUNNEL | BCM_MPLS_PORT_WITH_ID;
mpls_port.flags2 |= BCM_MPLS_PORT2_INGRESS_ONLY;
mpls_port.match_label = 0x1111;
if (mpls_termination_label_index_enable) {
if ((mpls_termination_label_index_database_mode >= 20) && (mpls_termination_label_index_database_mode <= 21)) {
BCM_MPLS_INDEXED_LABEL_SET(mpls_port.match_label, 0x1111, 3);
} else if ((mpls_termination_label_index_database_mode >= 22) && (mpls_termination_label_index_database_mode <= 23)) {
BCM_MPLS_INDEXED_LABEL_SET(mpls_port.match_label, 0x1111, 2);
} else {
BCM_MPLS_INDEXED_LABEL_SET(mpls_port.match_label, 0x1111, 1);
}
}
BCM_GPORT_FORWARD_PORT_SET(mpls_port.failover_port_id,0x1000);
mpls_port.mpls_port_id=0x18907000;
mpls_port.ingress_failover_id=ingress_failover;
rv = bcm_mpls_port_add(unit,default_vpn_id,&mpls_port);
if (rv != BCM_E_NONE) {
printf("Error, bcm_mpls_port_add for Ingress path selection, rv - %d\n", rv);
return rv;
}
printf("backup Ingress mpls port id: 0x%x\n", mpls_port.mpls_port_id);
bcm_mpls_port_t_init(&mpls_port);
mpls_port.criteria = BCM_MPLS_PORT_MATCH_LABEL;
mpls_port.flags |= BCM_MPLS_PORT_EGRESS_TUNNEL | BCM_MPLS_PORT_WITH_ID;
mpls_port.flags2 |= BCM_MPLS_PORT2_INGRESS_ONLY;
mpls_port.match_label = 0x2222;
if (mpls_termination_label_index_enable) {
if ((mpls_termination_label_index_database_mode >= 20) && (mpls_termination_label_index_database_mode <= 21)) {
BCM_MPLS_INDEXED_LABEL_SET(mpls_port.match_label, 0x2222, 3);
} else if ((mpls_termination_label_index_database_mode >= 22) && (mpls_termination_label_index_database_mode <= 23)) {
BCM_MPLS_INDEXED_LABEL_SET(mpls_port.match_label, 0x2222, 2);
} else {
BCM_MPLS_INDEXED_LABEL_SET(mpls_port.match_label, 0x2222, 1);
}
}
BCM_GPORT_FORWARD_PORT_SET(mpls_port.failover_port_id,0x1000);
mpls_port.mpls_port_id=0x18907002;
mpls_port.ingress_failover_id=ingress_failover;
mpls_port.ingress_failover_port_id=1;
rv = bcm_mpls_port_add(unit,default_vpn_id,&mpls_port);
if (rv != BCM_E_NONE) {
printf("Error, bcm_mpls_port_add for Ingress path selection, rv - %d\n", rv);
return rv;
}
printf("primary Ingress mpls port id: 0x%x\n", mpls_port.mpls_port_id);
return rv;
}
int vswitch_vpls_ac_1plus1_run(int unit, int acP, int pweP){
int rv;
bcm_mac_t pwe_mac = {0x00, 0x011,0x022,0x033,0x044,0x055};
rv = mpls__mpls_pipe_mode_exp_set(unit);
if (rv != BCM_E_NONE) {
printf("Error, in mpls__mpls_pipe_mode_exp_set\n");
return rv;
}
/* configure the port for double tag packets - this function relevant for termination mode 22-23 only */
rv = vswitch_1plus1_double_tag_port_configuration(unit,acP,pweP);
if (rv != BCM_E_NONE) {
printf("Error, in vswitch_1plus1_double_tag_port_configuration\n");
return rv;
}
vswitch_vlps_info_init(unit,acP,pweP,pweP,10,20,15,30,default_vpn_id);
vswitch_vlan_init(unit,-1,1);
printf("vswitch_vpls_ac_1plus1_run configure mpls tunnels\n");
/* configure MPLS tunnels */
rv = mpls_tunnels_config(unit, -1, 0);
if (rv != BCM_E_NONE) {
printf("Error, in mpls_tunnels_config\n");
return rv;
}
/* create vswitch */
printf("vswitch_vpls_ac_1plus1_run create vswitch\n");
rv = mpls_port__vswitch_vpls_vpn_create__set(unit, vswitch_vpls_info_1.vpn_id);
if (rv != BCM_E_NONE) {
printf("Create protection multicast group Error, in mpls_port__vswitch_vpls_vpn_create__set\n");
return rv;
}
/*create ac proctection multicast group*/
egress_mc = 1;
rv = multicast__open_mc_group(unit, &ingress_protection_group_id, (is_device_or_above(unit, JERICHO2)?0:BCM_MULTICAST_TYPE_L2));
if (rv != BCM_E_NONE) {
printf("Error, multicast__open_mc_group\n");
return rv;
}
/** JR2 egress MC needs fabric replication for each egress core*/
if (egress_mc) {
rv = multicast__open_fabric_mc_or_ingress_mc_for_egress_mc(unit, ingress_protection_group_id, ingress_protection_group_id);
if (rv != BCM_E_NONE) {
printf("Error, multicast__open_fabric_mc_or_ingress_mc_for_egress_mc \n");
return rv;
}
}
/* create failover id for AC */
rv = bcm_failover_create(unit, BCM_FAILOVER_INGRESS, &ingress_failover);
if (rv != BCM_E_NONE) {
printf("Error, bcm_failover_create for Ingress protection, rv - %d\n", rv);
return rv;
}
printf("Ingress Failover id: 0x%x\n", ingress_failover);
printf("vswitch_vpls_ac_1plus1_run add vlan access port\n");
/* add mpls access port */
rv = vswitch_vpls_ac_1plus1_add_access_port(unit);
if (rv != BCM_E_NONE) {
printf("Error, in vswitch_vpls_ac_1plus1_add_access_port\n");
return rv;
}
printf("Added access port\n");
printf("vswitch_vpls_ac_1plus1_run add mpls network port\n");
/* add mpls network port */
rv = vswitch_vpls_ac_1plus1_add_network_port(unit);
if (rv != BCM_E_NONE) {
printf("Error, in vswitch_vpls_ac_1plus1_add_network_port\n");
return rv;
}
printf("Added network port\n");
rv = vswitch_add_l2_addr_to_gport(unit, network_port_id, pwe_mac, vswitch_vpls_shared_info_1.vpn);
if (rv != BCM_E_NONE) {
printf("Error, in vswitch_add_l2_addr_to_gport\n");
return rv;
}
return rv;
}
int
vswitch_vpls_ac_1plus1_add_network_port(int unit){
int rv = BCM_E_NONE;
bcm_mpls_port_t mpls_port;
bcm_mpls_port_t_init(&mpls_port);
/* Initialize the common properties of ingress/egress/joined PWE ports */
mpls_port.criteria = BCM_MPLS_PORT_MATCH_LABEL;
mpls_port.port = vswitch_vpls_info_1.pwe_in_port;
mpls_port.flags |= BCM_MPLS_PORT_EGRESS_TUNNEL;
mpls_port.egress_label.ttl = 10;
mpls_port.egress_label.exp = 3;
mpls_port.egress_label.label = vswitch_vpls_info_1.eg_vc_label;
mpls_port.match_label = vswitch_vpls_info_1.in_vc_label;
mpls_port.egress_tunnel_if = vswitch_vpls_info_1.tunnel_id;
mpls_termination_label_index_enable = soc_property_get(unit , "mpls_termination_label_index_enable",0);
/* We check if mpls_termination_label_index_enable is set, if so, we check if the label passed to the utility call has been idnexed already.
If so, we do nothing, if not, we index it with index=2, as PWE labels are mostly looked at in SEM B */
if (mpls_termination_label_index_enable) {
int label_val = vswitch_vpls_info_1.in_vc_label;
if ((mpls_termination_label_index_database_mode >= 20) && (mpls_termination_label_index_database_mode <= 21)) {
BCM_MPLS_INDEXED_LABEL_SET(mpls_port.match_label, label_val, 3);
} else {
BCM_MPLS_INDEXED_LABEL_SET(mpls_port.match_label, label_val, 2);
}
}
if (is_device_or_above(unit, JERICHO2)) {
mpls_port.egress_label.egress_qos_model.egress_ttl = bcmQosEgressModelPipeMyNameSpace;
mpls_port.egress_label.egress_qos_model.egress_qos = bcmQosEgressModelPipeMyNameSpace;
/** to avoid updating in loading cints, we call sand_mpls_port_add in JR2 only.*/
rv = sand_mpls_port_add(unit, vswitch_vpls_info_1.vpn_id, mpls_port);
} else {
mpls_port.egress_label.flags = BCM_MPLS_EGRESS_LABEL_TTL_SET;
rv = bcm_mpls_port_add(unit, vswitch_vpls_info_1.vpn_id, mpls_port);
}
if (rv != BCM_E_NONE) {
printf("Error, bcm_mpls_port_add\n");
print rv;
return rv;
}
/* handle of the created gport */
network_port_id = mpls_port.mpls_port_id;
mpls_port_encap_id1 = mpls_port.encap_id;
if(verbose1){
printf("mpls_port_encap_id1: 0x%08x\n\r",mpls_port_encap_id1);
}
if(verbose1){
printf("add mpls port 0x%08x \n\r",network_port_id);
}
return rv;
}
int vswitch_vpls_ac_1plus1_add_access_port(int unit)
{
int rv;
bcm_vlan_port_t vlan_port_1;
bcm_vlan_port_t vlan_port_2;
bcm_gport_t port_id;
bcm_if_t fec_id;
bcm_gport_t fec_gport_id = BCM_GPORT_INVALID;
int port;
port = vswitch_vpls_info_1.ac_in_port;
if (ingress_failover || ingress_protection_group_id)
{
if ((soc_property_get(unit, spn_SYSTEM_HEADERS_MODE, 1) == 0)&&
is_device_or_above(unit, JERICHO2)) {
/**
*For JR2:
*JR1 mode:, FEC is used as learning, FEC is created explicitly
*JR2 mode: MC Group is used as learning,
*/
create_l3_egress_s l3_egress;
sal_memset(&l3_egress, 0, sizeof(l3_egress));
BCM_GPORT_MCAST_SET(l3_egress.out_gport, ingress_protection_group_id);
rv = l3__egress_only_fec__create(unit, &l3_egress);
if (rv != BCM_E_NONE) {
printf("Error, l3__egress_only_fec__create for FEC\n");
return rv;
}
fec_id = l3_egress.fec_id;
}
else if(jr1_ac_1plus1_fec_with_id)
{
BCM_L3_ITF_SET(fec_id, BCM_L3_ITF_TYPE_FEC, 0x1200);
}
fec_id_with_ac_1plus1_group = fec_id;
}
if ((mpls_termination_label_index_enable == 1) &&
((mpls_termination_label_index_database_mode == 22) || (mpls_termination_label_index_database_mode == 23))) {
/* add port, according to port_vlan_vlan */
bcm_vlan_port_t_init(&vlan_port_1);
/* set port attribures, key <port-vlan-vlan>*/
vlan_port_1.criteria = BCM_VLAN_PORT_MATCH_PORT_VLAN_STACKED;
vlan_port_1.port = vswitch_vpls_info_1.ac_in_port;
vlan_port_1.vsi = 0;
vlan_port_1.match_vlan = vswitch_vpls_info_1.ac_port1_outer_vlan;
vlan_port_1.match_inner_vlan = vswitch_vpls_info_1.ac_port1_inner_vlan;
vlan_port_1.egress_vlan = is_device_or_above(unit, JERICHO2) ? 0 : vswitch_vpls_info_1.ac_port1_outer_vlan;
vlan_port_1.egress_inner_vlan = is_device_or_above(unit, JERICHO2) ? 0 : vswitch_vpls_info_1.ac_port1_inner_vlan;
vlan_port_1.flags = vswitch_vpls_info_1.ac_port1_flags;
vlan_port_1.ingress_failover_id = ingress_failover;
if (!is_device_or_above(unit, JERICHO2) && jr1_ac_1plus1_fec_with_id)
{
vlan_port_1.failover_port_id = fec_id;
}
vlan_port_1.failover_mc_group = ingress_protection_group_id;
} else {
/* add port, according to port_vlan */
bcm_vlan_port_t_init(&vlan_port_1);
/* set port attribures, key <port-vlan>*/
vlan_port_1.criteria = BCM_VLAN_PORT_MATCH_PORT_VLAN;
vlan_port_1.port = vswitch_vpls_info_1.ac_in_port;
vlan_port_1.vsi = 0;
vlan_port_1.match_vlan = vswitch_vpls_info_1.ac_port1_outer_vlan;
vlan_port_1.egress_vlan = is_device_or_above(unit, JERICHO2) ? 0 : vswitch_vpls_info_1.ac_port1_outer_vlan;
vlan_port_1.flags = vswitch_vpls_info_1.ac_port1_flags;
vlan_port_1.ingress_failover_id = ingress_failover;
if (!is_device_or_above(unit, JERICHO2))
{
vlan_port_1.failover_port_id = jr1_ac_1plus1_fec_with_id?fec_id:0;
vlan_port_1.failover_mc_group = ingress_protection_group_id;
}
else
{
/** In case of JR2, failover_port_id should be type of FEC.*/
BCM_L3_ITF_FEC_TO_GPORT_FORWARD_GROUP(fec_gport_id, fec_id);
vlan_port_1.failover_port_id = soc_property_get(unit, spn_SYSTEM_HEADERS_MODE, 1)?0:fec_gport_id;
vlan_port_1.failover_mc_group = soc_property_get(unit, spn_SYSTEM_HEADERS_MODE, 1)?ingress_protection_group_id:0;
}
}
rv = bcm_vlan_port_create(unit, &vlan_port_1);
if (rv != BCM_E_NONE) {
printf("Error, bcm_vlan_port_create for standby AC\n");
return rv;
}
/* egress_vlan and egress_inner_vlan will be used at eve */
vlan_port_1.egress_vlan = vswitch_vpls_info_1.ac_port1_outer_vlan;
if ((mpls_termination_label_index_enable == 1) &&
((mpls_termination_label_index_database_mode == 22) || (mpls_termination_label_index_database_mode == 23))) {
vlan_port_1.egress_inner_vlan = vswitch_vpls_info_1.ac_port1_inner_vlan;
}
port_id = vlan_port_1.vlan_port_id;
vlan_port_encap_id1 = vlan_port_1.encap_id;
if(verbose1){
printf("vlan_port_encap_id1: 0x%08x\n\r",vlan_port_encap_id1);
}
/* In advanced vlan translation mode, bcm_vlan_port_create does not create ingress / egress
action mapping. This is here to compensate. */
if (advanced_vlan_translation_mode) {
rv = vlan_translation_vlan_port_create_to_translation(unit, &vlan_port_1);
if (rv != BCM_E_NONE) {
printf("Error: vlan_translation_vlan_port_create_to_translation\n");
}
}
rv = bcm_vswitch_port_add(unit, vswitch_vpls_shared_info_1.vpn, vlan_port_1.vlan_port_id);
if (rv != BCM_E_NONE) {
printf("Error, bcm_vswitch_port_add\n");
return rv;
}
/* update Multicast to have the added port */
rv = multicast__vlan_port_add(unit, ingress_protection_group_id, port , vlan_port_1.vlan_port_id, 1);
if (rv != BCM_E_NONE) {
printf("Error, multicast__vlan_port_add\n");
return rv;
}
if(verbose1){
printf("add port 0x%08x to multicast \n\r",vlan_port_1.vlan_port_id);
}
if ((mpls_termination_label_index_enable == 1) &&
((mpls_termination_label_index_database_mode == 22) || (mpls_termination_label_index_database_mode == 23))) {
/* add port, according to port_vlan_vlan */
bcm_vlan_port_t_init(&vlan_port_2);
/* set port attributes, key <port-vlan-vlan>*/
vlan_port_2.criteria = BCM_VLAN_PORT_MATCH_PORT_VLAN_STACKED;
vlan_port_2.port = vswitch_vpls_info_1.ac_in_port;
vlan_port_2.vsi = 0;
vlan_port_2.match_vlan = vswitch_vpls_info_1.ac_port2_outer_vlan;
vlan_port_2.match_inner_vlan = vswitch_vpls_info_1.ac_port2_inner_vlan;
vlan_port_2.egress_vlan = is_device_or_above(unit, JERICHO2) ? 0 : vswitch_vpls_info_1.ac_port2_outer_vlan;
vlan_port_2.egress_inner_vlan = is_device_or_above(unit, JERICHO2) ? 0 : vswitch_vpls_info_1.ac_port2_inner_vlan;
vlan_port_2.flags = vswitch_vpls_info_1.ac_port2_flags;
vlan_port_2.ingress_failover_id = ingress_failover;
vlan_port_2.failover_mc_group = ingress_protection_group_id;
vlan_port_2.failover_port_id = port_id;
vlan_port_2.ingress_failover_port_id = port_id;
} else {
/* add port, according to port_vlan*/
bcm_vlan_port_t_init(&vlan_port_2);
/* set port attributes, key <port-vlan-vlan>*/
vlan_port_2.criteria = BCM_VLAN_PORT_MATCH_PORT_VLAN;
vlan_port_2.port = vswitch_vpls_info_1.ac_in_port;
vlan_port_2.vsi = 0;
vlan_port_2.match_vlan = vswitch_vpls_info_1.ac_port2_outer_vlan;
vlan_port_2.egress_vlan = is_device_or_above(unit, JERICHO2) ? 0 : vswitch_vpls_info_1.ac_port2_outer_vlan;
vlan_port_2.flags = vswitch_vpls_info_1.ac_port2_flags;
vlan_port_2.ingress_failover_id = ingress_failover;
vlan_port_2.ingress_failover_port_id = port_id;
if (!is_device_or_above(unit, JERICHO2))
{
vlan_port_2.failover_port_id = port_id;
vlan_port_2.failover_mc_group = ingress_protection_group_id;
}
else
{
/** In case of JR2, failover_port_id should be type of FEC.*/
BCM_L3_ITF_FEC_TO_GPORT_FORWARD_GROUP(fec_gport_id, fec_id);
vlan_port_2.failover_port_id = soc_property_get(unit, spn_SYSTEM_HEADERS_MODE, 1)?0:fec_gport_id;
vlan_port_2.failover_mc_group = soc_property_get(unit, spn_SYSTEM_HEADERS_MODE, 1)?ingress_protection_group_id:0;
}
}
rv = bcm_vlan_port_create(unit, &vlan_port_2);
if (rv != BCM_E_NONE) {
printf("Error, bcm_vlan_port_create for primary AC\n");
return rv;
}
vswitch_vpls_info_1.access_port_id = vlan_port_2.vlan_port_id;
vlan_port_encap_id2= vlan_port_2.encap_id;
if(verbose1){
printf("vlan_port_encap_id1: 0x%08x\n\r",vlan_port_encap_id2);
}
/* egress_vlan and egress_inner_vlan will be used at eve */
vlan_port_2.egress_vlan = vswitch_vpls_info_1.ac_port2_outer_vlan;
if ((mpls_termination_label_index_enable == 1) &&
((mpls_termination_label_index_database_mode == 22) || (mpls_termination_label_index_database_mode == 23))) {
vlan_port_2.egress_inner_vlan = vswitch_vpls_info_1.ac_port2_inner_vlan;
}
/* In advanced vlan translation mode, bcm_vlan_port_create does not create ingress / egress
action mapping. This is here to compensate. */
if (advanced_vlan_translation_mode) {
rv = vlan_translation_vlan_port_create_to_translation(unit, &vlan_port_2);
if (rv != BCM_E_NONE) {
printf("Error: vlan_translation_vlan_port_create_to_translation\n");
}
}
rv = bcm_vswitch_port_add(unit, vswitch_vpls_shared_info_1.vpn, vlan_port_2.vlan_port_id);
if (rv != BCM_E_NONE) {
printf("Error, bcm_vswitch_port_add\n");
return rv;
}
/* update Multicast to have the added port */
rv = multicast__vlan_port_add(unit, ingress_protection_group_id, port , vlan_port_2.vlan_port_id, 1);
if (rv != BCM_E_NONE) {
printf("Error, multicast__vlan_port_add\n");
return rv;
}
if(verbose1){
printf("add port 0x%08x to multicast \n\r",vlan_port_2.vlan_port_id);
}
/*SDK-130008 bcm_port_learn_set will clear learn info, so add verification*/
print bcm_port_learn_set(unit,vlan_port_1.vlan_port_id,BCM_PORT_LEARN_ARL|BCM_PORT_LEARN_FWD);
print bcm_port_learn_set(unit,vlan_port_2.vlan_port_id,BCM_PORT_LEARN_ARL|BCM_PORT_LEARN_FWD);
return rv;
}
| 40.414954 | 193 | 0.625788 | [
"object"
] |
8a08da5a0d152e2d0d25dc8f357a36a7717f4b20 | 4,652 | c | C | src/rpc_rpcd_client.c | twoporeguys/librpc | 0b1795b3d4f4e3a7d7b6c17e5725769d233a1e6f | [
"BSD-2-Clause"
] | 7 | 2017-08-26T00:05:13.000Z | 2021-04-21T13:40:52.000Z | src/rpc_rpcd_client.c | twoporeguys/librpc | 0b1795b3d4f4e3a7d7b6c17e5725769d233a1e6f | [
"BSD-2-Clause"
] | 21 | 2018-01-05T19:41:27.000Z | 2019-06-05T16:00:23.000Z | src/rpc_rpcd_client.c | twoporeguys/librpc | 0b1795b3d4f4e3a7d7b6c17e5725769d233a1e6f | [
"BSD-2-Clause"
] | 4 | 2019-03-27T17:16:07.000Z | 2021-04-13T00:50:11.000Z | /*
* Copyright 2015-2017 Two Pore Guys, Inc.
* All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted providing that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <errno.h>
#include <rpc/object.h>
#include <rpc/client.h>
#include <rpc/connection.h>
#include <rpc/rpcd.h>
#include "internal.h"
static const char *rpcd_get_socket_location(void);
static const char *
rpcd_get_socket_location(void)
{
const char *location;
location = getenv(RPCD_SOCKET_ENV);
if (location == NULL)
return (RPCD_SOCKET_LOCATION);
return (location);
}
rpc_client_t
rpcd_connect_to(const char *rpcd_uri, const char *service_name)
{
rpc_client_t client;
rpc_connection_t conn;
rpc_auto_object_t result = NULL;
char *path;
if (rpcd_uri == NULL)
rpcd_uri = rpcd_get_socket_location();
client = rpc_client_create(rpcd_uri, NULL);
if (client == NULL)
return (NULL);
conn = rpc_client_get_connection(client);
path = g_strdup_printf("/%s", service_name);
result = rpc_connection_call_syncp(conn, path, RPCD_SERVICE_INTERFACE,
"connect", "[]");
if (result == NULL) {
rpc_client_close(client);
return (NULL);
}
if (rpc_is_error(result)) {
rpc_client_close(client);
rpc_set_last_rpc_error(result);
return (NULL);
}
if (rpc_get_type(result) == RPC_TYPE_FD) {
/* File descriptor was passed to us */
rpc_client_close(client);
return (rpc_client_create("socket://", result));
}
if (g_strcmp0(rpc_string_get_string_ptr(result), "BRIDGED") == 0) {
/* Bi-dir bridging */
return (client);
}
g_assert_not_reached();
}
int
rpcd_services_apply(const char *rpcd_uri, rpcd_service_applier_t applier)
{
rpc_client_t client;
rpc_connection_t conn;
rpc_auto_object_t result = NULL;
if (rpcd_uri == NULL)
rpcd_uri = rpcd_get_socket_location();
client = rpc_client_create(rpcd_uri, NULL);
if (client == NULL)
return (-1);
conn = rpc_client_get_connection(client);
result = rpc_connection_call_syncp(conn, "/",
RPC_DISCOVERABLE_INTERFACE, "get_instances", "[]");
rpc_array_apply(result, ^(size_t idx, rpc_object_t value) {
const char *path;
rpc_auto_object_t name;
rpc_auto_object_t description;
path = rpc_dictionary_get_string(value, "name");
name = rpc_connection_get_property(conn, path,
RPCD_SERVICE_INTERFACE, "name");
description = rpc_connection_get_property(conn, path,
RPCD_SERVICE_INTERFACE, "description");
applier(rpc_string_get_string_ptr(name),
rpc_string_get_string_ptr(description));
return ((bool)true);
});
return (0);
}
int
rpcd_register(const char *uri, const char *name, const char *description)
{
rpc_client_t client;
rpc_connection_t conn;
rpc_auto_object_t result = NULL;
client = rpc_client_create(rpcd_get_socket_location(), NULL);
if (client == NULL)
return (-1);
conn = rpc_client_get_connection(client);
result = rpc_connection_call_syncp(conn, "/", RPCD_MANAGER_INTERFACE,
"register_service",
"[<com.twoporeguys.librpc.rpcd.Service>{s,s,s}]",
"uri", uri,
"name", name,
"description", description);
if (result == NULL) {
rpc_client_close(client);
return (-1);
}
if (rpc_is_error(result)) {
rpc_client_close(client);
rpc_set_last_rpc_error(result);
return (-1);
}
return (0);
}
int
rpcd_unregister(const char *name)
{
rpc_client_t client;
rpc_connection_t conn;
rpc_auto_object_t result = NULL;
client = rpc_client_create(rpcd_get_socket_location(), NULL);
if (client == NULL)
return (-1);
return (-1);
}
| 26.582857 | 74 | 0.732158 | [
"object"
] |
8a152f368de7b9a48a3c3986d04bd735f8594853 | 565 | h | C | examples/gltf/shaders.h | cristiandonosoc/rothko | 0d488e10cc3b4150f638da9cf6711e66ba19b1c5 | [
"MIT",
"BSL-1.0",
"BSD-4-Clause",
"Unlicense"
] | 5 | 2019-05-26T00:04:06.000Z | 2020-12-28T19:20:12.000Z | examples/gltf/shaders.h | cristiandonosoc/rothko | 0d488e10cc3b4150f638da9cf6711e66ba19b1c5 | [
"MIT",
"BSL-1.0",
"BSD-4-Clause",
"Unlicense"
] | null | null | null | examples/gltf/shaders.h | cristiandonosoc/rothko | 0d488e10cc3b4150f638da9cf6711e66ba19b1c5 | [
"MIT",
"BSL-1.0",
"BSD-4-Clause",
"Unlicense"
] | 1 | 2019-06-02T19:35:59.000Z | 2019-06-02T19:35:59.000Z | // Copyright 2019, Cristián Donoso.
// This code has a BSD license. See LICENSE.
#pragma once
#include <rothko/graphics/shader.h>
#include <memory>
namespace rothko {
namespace gltf {
struct ModelUBO {
/* struct Model { */
/* Mat4 transform; */
/* } model; */
/* struct Node { */
/* Mat4 transform; */
/* } node; */
struct Model {
Mat4 transform;
Mat4 inverse_transform;
} model_transform;
struct Frag {
Vec4 base_color;
};
};
std::unique_ptr<Shader> CreateModelShader(Renderer*);
} // namespace gltf
} // namespace
| 15.27027 | 53 | 0.633628 | [
"model",
"transform"
] |
8a196a93888ae2a3960538e44627a8a2ab1405c7 | 3,648 | h | C | cpp/model/AccountUpdate.h | havenmoney/platform-clients | 60a2553bde73c1f8a2b02e76ad7adcdf04283cd3 | [
"Apache-2.0"
] | null | null | null | cpp/model/AccountUpdate.h | havenmoney/platform-clients | 60a2553bde73c1f8a2b02e76ad7adcdf04283cd3 | [
"Apache-2.0"
] | null | null | null | cpp/model/AccountUpdate.h | havenmoney/platform-clients | 60a2553bde73c1f8a2b02e76ad7adcdf04283cd3 | [
"Apache-2.0"
] | null | null | null | /**
* Haven Money API
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* The version of the OpenAPI document: 1.0
*
* NOTE: This class is auto generated by OpenAPI-Generator 4.0.2.
* https://openapi-generator.tech
* Do not edit the class manually.
*/
/*
* AccountUpdate.h
*
*
*/
#ifndef DEV_HAVEN_CLIENT_MODEL_AccountUpdate_H_
#define DEV_HAVEN_CLIENT_MODEL_AccountUpdate_H_
#include "../ModelBase.h"
#include "AccountType.h"
#include "AccountSubType.h"
#include "Balance.h"
#include <cpprest/details/basic_types.h>
namespace dev {
namespace haven {
namespace client {
namespace model {
/// <summary>
///
/// </summary>
class AccountUpdate
: public ModelBase
{
public:
AccountUpdate();
virtual ~AccountUpdate();
/////////////////////////////////////////////
/// ModelBase overrides
void validate() override;
web::json::value toJson() const override;
void fromJson(const web::json::value& json) override;
void toMultipart(std::shared_ptr<MultipartFormData> multipart, const utility::string_t& namePrefix) const override;
void fromMultiPart(std::shared_ptr<MultipartFormData> multipart, const utility::string_t& namePrefix) override;
/////////////////////////////////////////////
/// AccountUpdate members
/// <summary>
///
/// </summary>
utility::string_t getId() const;
void setId(const utility::string_t& value);
/// <summary>
///
/// </summary>
utility::string_t getUser() const;
void setUser(const utility::string_t& value);
/// <summary>
///
/// </summary>
utility::string_t getDisplayName() const;
void setDisplayName(const utility::string_t& value);
/// <summary>
///
/// </summary>
utility::string_t getDisplayNameDetailed() const;
void setDisplayNameDetailed(const utility::string_t& value);
/// <summary>
///
/// </summary>
utility::string_t getInstitution() const;
void setInstitution(const utility::string_t& value);
/// <summary>
///
/// </summary>
utility::string_t getInstitutionName() const;
void setInstitutionName(const utility::string_t& value);
/// <summary>
///
/// </summary>
std::shared_ptr<AccountType> getAccountType() const;
void setAccountType(const std::shared_ptr<AccountType>& value);
/// <summary>
///
/// </summary>
std::shared_ptr<AccountSubType> getAccountSubType() const;
void setAccountSubType(const std::shared_ptr<AccountSubType>& value);
/// <summary>
///
/// </summary>
std::shared_ptr<Balance> getBalance() const;
void setBalance(const std::shared_ptr<Balance>& value);
/// <summary>
///
/// </summary>
utility::datetime getCreatedAt() const;
void setCreatedAt(const utility::datetime& value);
/// <summary>
///
/// </summary>
utility::datetime getUpdatedAt() const;
void setUpdatedAt(const utility::datetime& value);
protected:
utility::string_t m_Id;
utility::string_t m_User;
utility::string_t m_DisplayName;
utility::string_t m_DisplayNameDetailed;
utility::string_t m_Institution;
utility::string_t m_InstitutionName;
std::shared_ptr<AccountType> m_AccountType;
std::shared_ptr<AccountSubType> m_AccountSubType;
std::shared_ptr<Balance> m_Balance;
utility::datetime m_CreatedAt;
utility::datetime m_UpdatedAt;
};
}
}
}
}
#endif /* DEV_HAVEN_CLIENT_MODEL_AccountUpdate_H_ */
| 23.088608 | 119 | 0.635691 | [
"model"
] |
8a29314a0529a06dbc55adcba2362fe15f28312c | 629 | h | C | include/entTypeEnum.h | Kirthos/Aerria | ec9eb6c78690838ba5a3509369e2c4d8428b4dc2 | [
"MIT"
] | null | null | null | include/entTypeEnum.h | Kirthos/Aerria | ec9eb6c78690838ba5a3509369e2c4d8428b4dc2 | [
"MIT"
] | null | null | null | include/entTypeEnum.h | Kirthos/Aerria | ec9eb6c78690838ba5a3509369e2c4d8428b4dc2 | [
"MIT"
] | null | null | null | #include "Entity.hpp"
#include <SFML/Graphics.hpp>
#include "Personnage.hpp"
#include "GlobalSys.hpp"
#include <vector>
#include "allEnum.h"
class EntityStdStorage //les nom des entity que vous creez ici doivent etre identique a leurs nom dans l'enum
{
public:
EntityStdStorage();
void init(GlobalSys* passValue);
Entity getEntityCpy(entType type);
int getEntitySize(entType type);
Entity* storageEntity[entType::last+1];
int storageEntitySize[entType::last+1];
private:
protected:
GlobalSys* m_globalSys;
};
struct EntityPtrContainer
{
void update();
std::vector<Entity*> entityList;
};
| 20.966667 | 109 | 0.72496 | [
"vector"
] |
8a4a58a56740af133e64a3aeedc5d4f9383653b7 | 291 | h | C | src/Bullet.h | seroron/termshooter | 6b5ff5a4af3a2f67255864d5303604fba4dffa99 | [
"MIT"
] | null | null | null | src/Bullet.h | seroron/termshooter | 6b5ff5a4af3a2f67255864d5303604fba4dffa99 | [
"MIT"
] | null | null | null | src/Bullet.h | seroron/termshooter | 6b5ff5a4af3a2f67255864d5303604fba4dffa99 | [
"MIT"
] | null | null | null | #pragma once
#include <string>
#include "Object.h"
class Bullet : public Object
{
public:
Bullet(int x, int y, int mx, int my, std::string aa);
virtual ~Bullet();
virtual void move(taskarg_sptr arg);
virtual void draw(taskarg_sptr arg);
private:
std::string aa_;
};
| 16.166667 | 57 | 0.66323 | [
"object"
] |
8a4c604f9f48ba200aa27ec3ac0f56c7a75a4378 | 905 | h | C | code/engine.vc2008/xrGame/moving_bones_snd_player.h | Rikoshet-234/xray-oxygen | eaac3fa4780639152684f3251b8b4452abb8e439 | [
"Apache-2.0"
] | 7 | 2018-03-27T12:36:07.000Z | 2020-06-26T11:31:52.000Z | code/engine.vc2008/xrGame/moving_bones_snd_player.h | Rikoshet-234/xray-oxygen | eaac3fa4780639152684f3251b8b4452abb8e439 | [
"Apache-2.0"
] | 2 | 2018-05-26T23:17:14.000Z | 2019-04-14T18:33:27.000Z | code/engine.vc2008/xrGame/moving_bones_snd_player.h | Rikoshet-234/xray-oxygen | eaac3fa4780639152684f3251b8b4452abb8e439 | [
"Apache-2.0"
] | 5 | 2020-10-18T11:55:26.000Z | 2022-03-28T07:21:35.000Z | #pragma once
class IKinematics;
class CInifile;
class CGameObject;
class moving_bones_snd_player
{
u16 bone_id;
float min_factor;
float max_factor;
float base_velocity;
float smothed_velocity;
ref_sound sound;
Fmatrix previous_position;
IKinematics *kinematics;
public:
moving_bones_snd_player( IKinematics *K, CInifile* ini, LPCSTR section, const Fmatrix &object );
~moving_bones_snd_player();
void update( float time_delta, CGameObject &object );
void play( CGameObject &O );
void stop( );
IC bool is_active(){ return true;/*!!sound._feedback();*/ }
private:
void load( IKinematics &K, CInifile& ini, LPCSTR section, const Fmatrix &object );
Fmatrix &bone_matrix( );
};
extern moving_bones_snd_player* create_moving_bones_snd_player( CGameObject &O );
IC bool is_active( moving_bones_snd_player* player )
{
return player && player->is_active();
} | 26.617647 | 100 | 0.734807 | [
"object"
] |
8a4d56754eae79e6655cf61457940d204343f0aa | 7,850 | h | C | ObitSystem/Obit/include/ObitFileFITS.h | sarrvesh/Obit | e4ce6029e9beb2a8c0316ee81ea710b66b2b7986 | [
"Linux-OpenIB"
] | 5 | 2019-08-26T06:53:08.000Z | 2020-10-20T01:08:59.000Z | ObitSystem/Obit/include/ObitFileFITS.h | sarrvesh/Obit | e4ce6029e9beb2a8c0316ee81ea710b66b2b7986 | [
"Linux-OpenIB"
] | null | null | null | ObitSystem/Obit/include/ObitFileFITS.h | sarrvesh/Obit | e4ce6029e9beb2a8c0316ee81ea710b66b2b7986 | [
"Linux-OpenIB"
] | 8 | 2017-08-29T15:12:32.000Z | 2022-03-31T12:16:08.000Z | /* $Id$ */
/*--------------------------------------------------------------------*/
/*; Copyright (C) 2004-2008 */
/*; Associated Universities, Inc. Washington DC, USA. */
/*; */
/*; This program is free software; you can redistribute it and/or */
/*; modify it under the terms of the GNU General Public License as */
/*; published by the Free Software Foundation; either version 2 of */
/*; the License, or (at your option) any later version. */
/*; */
/*; This program is distributed in the hope that it will be useful, */
/*; but WITHOUT ANY WARRANTY; without even the implied warranty of */
/*; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/*; GNU General Public License for more details. */
/*; */
/*; You should have received a copy of the GNU General Public */
/*; License along with this program; if not, write to the Free */
/*; Software Foundation, Inc., 675 Massachusetts Ave, Cambridge, */
/*; MA 02139, USA. */
/*; */
/*; Correspondence about this software should be addressed as follows:*/
/*; Internet email: bcotton@nrao.edu. */
/*; Postal address: William Cotton */
/*; National Radio Astronomy Observatory */
/*; 520 Edgemont Road */
/*; Charlottesville, VA 22903-2475 USA */
/*--------------------------------------------------------------------*/
#ifndef OBITFILEFITS_H
#define OBITFILEFITS_H
#include "fitsio.h"
#include "Obit.h"
#include "ObitErr.h"
#include "ObitThread.h"
#include "ObitInfoList.h"
#include "ObitFile.h"
/*-------- Obit: Merx mollis mortibus nuper ------------------*/
/**
* \file ObitFileFITS.h
* ObitFileFITS class definition.
*
* This class is derived from the #ObitFile class.
*
* This class provides an I/O interface to FITS files.
* This implementation uses cfitsio.
* The structure is also defined in ObitFileFITSDef.h to allow recursive
* definition in derived classes.
*
* \section ObitFileFITSUsage Usage
* Instances of this class are for access to disk files and is used
* for access to AIPS data files.
* Instances can be made using the #newObitFileFITS constructor,
* or the #ObitFileFITSCopy copy constructor and pointers copied
* (with reference pointer update) using #ObitFileFITSRef.
* The destructor (when reference count goes to zero) is
* #ObitIOUnref.
*/
/*----------------- typedefs ---------------------------*/
/*---------------Class Structure---------------------------*/
/** ObitFileFITS Class. */
typedef struct {
#include "ObitFileFITSDef.h" /* actual definition */
} ObitFileFITS;
/*----------------- Macroes ---------------------------*/
/**
* Macro to unreference (and possibly destroy) an ObitFileFITS
* returns a ObitFileFITS*.
* in = object to unreference
*/
#define ObitFileFITSUnref(in) ObitUnref (in)
/**
* Macro to reference (update reference count) an ObitFileFITS.
* returns a ObitFileFITS*.
* in = object to reference
*/
#define ObitFileFITSRef(in) ObitRef (in)
/**
* Macro to determine if an object is the member of this or a
* derived class.
* Returns TRUE if a member, else FALSE
* in = object to reference
*/
#define ObitFileFITSIsA(in) ObitIsA (in, ObitFileFITSGetClass())
/* Private functions are only defined in the .c file */
/*---------------Public functions---------------------------*/
/** Public: Class initializer. */
void ObitFileFITSClassInit (void);
/** Public: Constructor. */
ObitFileFITS* newObitFileFITS (gchar* name);
/** Public: ClassInfo pointer */
gconstpointer ObitFileFITSGetClass (void);
/** Public: destroy */
ObitFileFITS* ObitFileFITSZap (ObitFileFITS *in, ObitErr *err);
/** Public: Destroy current HDU */
ObitIOCode ObitFileFITSZapHDU (ObitFileFITS *in, ObitErr *err);
/** Public: Copy constructor. */
ObitFileFITS* ObitFileFITSCopy (ObitFileFITS *in, ObitFileFITS *out, ObitErr *err);
/** Public: Open */
ObitIOCode
ObitFileFITSOpen (ObitFileFITS *in, gchar *fileName, olong disk,
ObitIOAccess access, ObitErr *err);
/** Public: Close */
ObitIOCode ObitFileFITSClose (ObitFileFITS *in, ObitErr *err);
/** Public: Does a given file exist? */
gboolean ObitFileFITSExist (gchar *fileName, ObitErr *err);
/** Public: Position to given extention number */
ObitIOCode ObitFileFITSNum (ObitFileFITS *in, olong hdunum, olong *hdutype,
ObitErr *err);
/** Public: Position to given extention name */
ObitIOCode ObitFileFITSName (ObitFileFITS *in, olong hdutype, gchar *extname,
olong extver, ObitErr *err);
/** Public: Read String keyword */
ObitIOCode
ObitFileFITSReadKeyStr (ObitFileFITS *in, gchar *Name,
gchar *Value, gchar *Comment, ObitErr *err);
/** Public: Read float keyword */
ObitIOCode
ObitFileFITSReadKeyFlt (ObitFileFITS *in, gchar *Name,
ofloat *Value, gchar *Comment, ObitErr *err);
/** Public: Read double keyword */
ObitIOCode
ObitFileFITSReadKeyDbl (ObitFileFITS *in, gchar *Name,
odouble *Value, gchar *Comment, ObitErr *err);
/** Public: Read long keyword */
ObitIOCode
ObitFileFITSReadKeyLng (ObitFileFITS *in, gchar *Name,
olong *Value, gchar *Comment, ObitErr *err);
/** Public: Read next HISTORY header card */
ObitIOCode
ObitFileFITSReadHistory (ObitFileFITS *in, gchar *hiCard, ObitErr *err);
/** Public: Write String keyword */
ObitIOCode
ObitFileFITSWriteKeyStr (ObitFileFITS *in, gchar *Name, gboolean update,
gchar *Value, gchar *Comment, ObitErr *err);
/** Public: Write float keyword */
ObitIOCode
ObitFileFITSWriteKeyFlt (ObitFileFITS *in, gchar *Name, gboolean update,
ofloat Value, gchar *Comment, ObitErr *err);
/** Public: Write double keyword */
ObitIOCode
ObitFileFITSWriteKeyDbl (ObitFileFITS *in, gchar *Name, gboolean update,
odouble Value, gchar *Comment, ObitErr *err);
/** Public: Write long keyword */
ObitIOCode
ObitFileFITSWriteKeyLng (ObitFileFITS *in, gchar *Name, gboolean update,
olong Value, gchar *Comment, ObitErr *err);
/** Public: Write String HISTORY keyword */
ObitIOCode
ObitFileFITSWriteHisKeyStr (fitsfile *inFptr, gchar *Name, gchar *Value,
gchar *Comment, ObitErr *err);
/** Public: Write float HISTORY keyword */
ObitIOCode
ObitFileFITSWriteHisKeyFlt (fitsfile *inFptr, gchar *Name, ofloat Value,
gchar *Comment, ObitErr *err);
/** Public: Write double HISTORY keyword */
ObitIOCode
ObitFileFITSWriteHisKeyDbl (fitsfile *inFptr, gchar *Name, odouble Value,
gchar *Comment, ObitErr *err);
/** Public: Write long HISTORY keyword */
ObitIOCode
ObitFileFITSWriteHisKeyLng (fitsfile *inFptr, gchar *Name, olong Value,
gchar *Comment, ObitErr *err);
/** Public: Write Date keyword */
ObitIOCode
ObitFileFITSWriteDate (ObitFileFITS *in, ObitErr *err);
/** Public: Write next HISTORY header card */
ObitIOCode
ObitFileFITSWriteHistory (ObitFileFITS *in, gchar *hiCard, ObitErr *err);
/** Public: Expand the current HDR by a specified number of keywords*/
ObitIOCode
ObitFileFITSAddKeys (ObitFileFITS *in, olong morekeys, ObitErr *err);
/*-------------------Class Info--------------------------*/
/**
* ClassInfo Structure.
* Contains class name, a pointer to any parent class
* (NULL if none) and function pointers.
*/
typedef struct {
#include "ObitFileFITSClassDef.h" /* Actual definition */
} ObitFileFITSClassInfo;
#endif /* OBITFILEFITS_H */
| 36.009174 | 84 | 0.62879 | [
"object"
] |
8a4ee791c3ff4a1a0e1c92d1ca75c399b25664c5 | 2,082 | h | C | libs/wgtcc/scope.h | dfranx/ShaderDebugger | c6f0e9390a3e3cca9b0fec538a920cde4fec8cfd | [
"MIT"
] | 355 | 2019-09-27T15:29:05.000Z | 2022-02-08T23:34:46.000Z | libs/wgtcc/scope.h | Vicfred/ShaderDebugger | cfdea282056d5c2aa0b4626d852afe031426f44c | [
"MIT"
] | 5 | 2019-11-08T23:49:48.000Z | 2020-07-13T20:22:19.000Z | libs/wgtcc/scope.h | Vicfred/ShaderDebugger | cfdea282056d5c2aa0b4626d852afe031426f44c | [
"MIT"
] | 18 | 2020-01-26T22:40:19.000Z | 2022-03-29T05:33:22.000Z | #ifndef _WGTCC_SCOPE_H_
#define _WGTCC_SCOPE_H_
#include <iostream>
#include <map>
#include <string>
#include <vector>
namespace pp
{
class Identifier;
class Token;
enum ScopeType {
S_FILE,
S_PROTO,
S_BLOCK,
S_FUNC,
};
class Scope {
friend class StructType;
using TagList = std::vector<Identifier*>;
using IdentMap = std::map<std::string, Identifier*>;
public:
explicit Scope(Scope* parent, enum ScopeType type)
: parent_(parent), type_(type) {}
~Scope() {}
Scope* Parent() { return parent_; }
void SetParent(Scope* parent) { parent_ = parent; }
enum ScopeType Type() const { return type_; }
Identifier* Find(const Token* tok);
Identifier* FindInCurScope(const Token* tok);
Identifier* FindTag(const Token* tok);
Identifier* FindTagInCurScope(const Token* tok);
TagList AllTagsInCurScope() const;
void Insert(Identifier* ident);
void Insert(const std::string& name, Identifier* ident);
void InsertTag(Identifier* ident);
void Print();
bool operator==(const Scope& other) const { return type_ == other.type_; }
IdentMap::iterator begin() { return identMap_.begin(); }
IdentMap::iterator end() { return identMap_.end(); }
size_t size() const { return identMap_.size(); }
private:
Identifier* Find(const std::string& name);
Identifier* FindInCurScope(const std::string& name);
Identifier* FindTag(const std::string& name);
Identifier* FindTagInCurScope(const std::string& name);
std::string TagName(const std::string& name) {
return name + "@:tag";
}
static bool IsTagName(const std::string& name) {
return name.size() > 5 && name[name.size() - 5] == '@';
}
const Scope& operator=(const Scope& other);
Scope(const Scope& scope);
Scope* parent_;
enum ScopeType type_;
IdentMap identMap_;
};
}
#endif
| 28.135135 | 82 | 0.598943 | [
"vector"
] |
8a5bd699fc4e6eb3159a68f2151aba85c7a7a22c | 10,682 | h | C | scripting/javascript/spidermonkey-ios/include/gc/Root.h | pontelua/Lua2D | dde44d78d1600bb9a06e27908cfddf441e1012a6 | [
"Zlib",
"libtiff",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"Libpng",
"curl",
"BSD-3-Clause"
] | 2 | 2015-03-22T00:17:30.000Z | 2017-09-30T03:23:25.000Z | scripting/javascript/spidermonkey-win32/include/gc/Root.h | ReubenBond/cocos2d-x | e153902b2e123155c2beb098aa9fc00e659ff0d3 | [
"Zlib",
"libtiff",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"Libpng",
"curl",
"BSD-3-Clause"
] | null | null | null | scripting/javascript/spidermonkey-win32/include/gc/Root.h | ReubenBond/cocos2d-x | e153902b2e123155c2beb098aa9fc00e659ff0d3 | [
"Zlib",
"libtiff",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"Libpng",
"curl",
"BSD-3-Clause"
] | null | null | null | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sw=4 et tw=78:
*
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is SpiderMonkey global object code.
*
* The Initial Developer of the Original Code is
* the Mozilla Foundation.
* Portions created by the Initial Developer are Copyright (C) 2012
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef jsgc_root_h__
#define jsgc_root_h__
#include "jspubtd.h"
#include "js/Utility.h"
#ifdef __cplusplus
namespace JS {
/*
* Moving GC Stack Rooting
*
* A moving GC may change the physical location of GC allocated things, even
* when they are rooted, updating all pointers to the thing to refer to its new
* location. The GC must therefore know about all live pointers to a thing,
* not just one of them, in order to behave correctly.
*
* The classes below are used to root stack locations whose value may be held
* live across a call that can trigger GC (i.e. a call which might allocate any
* GC things). For a code fragment such as:
*
* Foo();
* ... = obj->lastProperty();
*
* If Foo() can trigger a GC, the stack location of obj must be rooted to
* ensure that the GC does not move the JSObject referred to by obj without
* updating obj's location itself. This rooting must happen regardless of
* whether there are other roots which ensure that the object itself will not
* be collected.
*
* If Foo() cannot trigger a GC, and the same holds for all other calls made
* between obj's definitions and its last uses, then no rooting is required.
*
* Several classes are available for rooting stack locations. All are templated
* on the type T of the value being rooted, for which RootMethods<T> must
* have an instantiation.
*
* - Root<T> roots an existing stack allocated variable or other location of
* type T. This is typically used either when a variable only needs to be
* rooted on certain rare paths, or when a function takes a bare GC thing
* pointer as an argument and needs to root it. In the latter case a
* Handle<T> is generally preferred, see below.
*
* - RootedVar<T> declares a variable of type T, whose value is always rooted.
*
* - Handle<T> is a const reference to a Root<T> or RootedVar<T>. Handles are
* coerced automatically from such a Root<T> or RootedVar<T>. Functions which
* take GC things or values as arguments and need to root those arguments
* should generally replace those arguments with handles and avoid any
* explicit rooting. This has two benefits. First, when several such
* functions call each other then redundant rooting of multiple copies of the
* GC thing can be avoided. Second, if the caller does not pass a rooted
* value a compile error will be generated, which is quicker and easier to
* fix than when relying on a separate rooting analysis.
*/
template <typename T> class Root;
template <typename T> class RootedVar;
template <typename T>
struct RootMethods { };
/*
* Reference to a stack location rooted for GC. See the "Moving GC Stack
* Rooting" comment above.
*/
template <typename T>
class Handle
{
public:
/* Copy handles of different types, with implicit coercion. */
template <typename S> Handle(Handle<S> handle) {
testAssign<S>();
ptr = reinterpret_cast<const T *>(handle.address());
}
/* Get a handle from a rooted stack location, with implicit coercion. */
template <typename S> inline Handle(const Root<S> &root);
template <typename S> inline Handle(const RootedVar<S> &root);
const T *address() { return ptr; }
operator T () { return value(); }
T operator ->() { return value(); }
private:
const T *ptr;
T value() { return *ptr; }
template <typename S>
void testAssign() {
#ifdef DEBUG
T a = RootMethods<T>::initial();
S b = RootMethods<S>::initial();
a = b;
(void)a;
#endif
}
};
typedef Handle<JSObject*> HandleObject;
typedef Handle<JSFunction*> HandleFunction;
typedef Handle<JSString*> HandleString;
typedef Handle<jsid> HandleId;
typedef Handle<Value> HandleValue;
template <typename T>
struct RootMethods<T *>
{
static T *initial() { return NULL; }
static ThingRootKind kind() { return T::rootKind(); }
static bool poisoned(T *v) { return IsPoisonedPtr(v); }
};
/*
* Root a stack location holding a GC thing. This takes a stack pointer
* and ensures that throughout its lifetime the referenced variable
* will remain pinned against a moving GC.
*
* It is important to ensure that the location referenced by a Root is
* initialized, as otherwise the GC may try to use the the uninitialized value.
* It is generally preferable to use either RootedVar for local variables, or
* Handle for arguments.
*/
template <typename T>
class Root
{
public:
Root(JSContext *cx_, const T *ptr
JS_GUARD_OBJECT_NOTIFIER_PARAM)
{
#ifdef JSGC_ROOT_ANALYSIS
ContextFriendFields *cx = ContextFriendFields::get(cx_);
ThingRootKind kind = RootMethods<T>::kind();
this->stack = reinterpret_cast<Root<T>**>(&cx->thingGCRooters[kind]);
this->prev = *stack;
*stack = this;
#endif
JS_ASSERT(!RootMethods<T>::poisoned(*ptr));
this->ptr = ptr;
JS_GUARD_OBJECT_NOTIFIER_INIT;
}
~Root()
{
#ifdef JSGC_ROOT_ANALYSIS
JS_ASSERT(*stack == this);
*stack = prev;
#endif
}
#ifdef JSGC_ROOT_ANALYSIS
Root<T> *previous() { return prev; }
#endif
const T *address() const { return ptr; }
private:
#ifdef JSGC_ROOT_ANALYSIS
Root<T> **stack, *prev;
#endif
const T *ptr;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
};
template<typename T> template <typename S>
inline
Handle<T>::Handle(const Root<S> &root)
{
testAssign<S>();
ptr = reinterpret_cast<const T *>(root.address());
}
typedef Root<JSObject*> RootObject;
typedef Root<JSFunction*> RootFunction;
typedef Root<JSString*> RootString;
typedef Root<jsid> RootId;
typedef Root<Value> RootValue;
/*
* Mark a stack location as a root for the rooting analysis, without actually
* rooting it in release builds. This should only be used for stack locations
* of GC things that cannot be relocated by a garbage collection, and that
* are definitely reachable via another path.
*/
class SkipRoot
{
#if defined(DEBUG) && defined(JSGC_ROOT_ANALYSIS)
SkipRoot **stack, *prev;
const uint8_t *start;
const uint8_t *end;
public:
template <typename T>
SkipRoot(JSContext *cx_, const T *ptr
JS_GUARD_OBJECT_NOTIFIER_PARAM)
{
ContextFriendFields *cx = ContextFriendFields::get(cx_);
this->stack = &cx->skipGCRooters;
this->prev = *stack;
*stack = this;
this->start = (const uint8_t *) ptr;
this->end = this->start + sizeof(T);
JS_GUARD_OBJECT_NOTIFIER_INIT;
}
~SkipRoot()
{
JS_ASSERT(*stack == this);
*stack = prev;
}
SkipRoot *previous() { return prev; }
bool contains(const uint8_t *v, size_t len) {
return v >= start && v + len <= end;
}
#else /* DEBUG && JSGC_ROOT_ANALYSIS */
public:
template <typename T>
SkipRoot(JSContext *cx, const T *ptr
JS_GUARD_OBJECT_NOTIFIER_PARAM)
{
JS_GUARD_OBJECT_NOTIFIER_INIT;
}
#endif /* DEBUG && JSGC_ROOT_ANALYSIS */
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
};
/* Make a local variable which stays rooted throughout its lifetime. */
template <typename T>
class RootedVar
{
public:
RootedVar(JSContext *cx)
: ptr(RootMethods<T>::initial()), root(cx, &ptr)
{}
RootedVar(JSContext *cx, T initial)
: ptr(initial), root(cx, &ptr)
{}
operator T () const { return ptr; }
T operator ->() const { return ptr; }
T * address() { return &ptr; }
const T * address() const { return &ptr; }
T & reference() { return ptr; }
T raw() { return ptr; }
/*
* This method is only necessary due to an obscure C++98 requirement (that
* there be an accessible, usable copy constructor when passing a temporary
* to an implicitly-called constructor for use with a const-ref parameter).
* (Head spinning yet?) We can remove this when we build the JS engine
* with -std=c++11.
*/
operator Handle<T> () const { return Handle<T>(*this); }
T & operator =(T value)
{
JS_ASSERT(!RootMethods<T>::poisoned(value));
ptr = value;
return ptr;
}
T & operator =(const RootedVar &value)
{
ptr = value;
return ptr;
}
private:
T ptr;
Root<T> root;
RootedVar() MOZ_DELETE;
RootedVar(const RootedVar &) MOZ_DELETE;
};
template <typename T> template <typename S>
inline
Handle<T>::Handle(const RootedVar<S> &root)
{
testAssign<S>();
ptr = reinterpret_cast<const T *>(root.address());
}
typedef RootedVar<JSObject*> RootedVarObject;
typedef RootedVar<JSFunction*> RootedVarFunction;
typedef RootedVar<JSString*> RootedVarString;
typedef RootedVar<jsid> RootedVarId;
typedef RootedVar<Value> RootedVarValue;
} /* namespace JS */
#endif /* __cplusplus */
#endif /* jsgc_root_h___ */
| 30.346591 | 79 | 0.678057 | [
"object"
] |
8a61b1594e8cc6e762c210e6efc41136a7244e04 | 24,471 | h | C | libWetCloth/Core/LinearizedImplicitEuler.h | rushmash/libwetcloth | 24f16481c68952c3d2a91acd6e3b74eb091b66bc | [
"BSD-3-Clause-Clear"
] | null | null | null | libWetCloth/Core/LinearizedImplicitEuler.h | rushmash/libwetcloth | 24f16481c68952c3d2a91acd6e3b74eb091b66bc | [
"BSD-3-Clause-Clear"
] | null | null | null | libWetCloth/Core/LinearizedImplicitEuler.h | rushmash/libwetcloth | 24f16481c68952c3d2a91acd6e3b74eb091b66bc | [
"BSD-3-Clause-Clear"
] | null | null | null | //
// This file is part of the libWetCloth open source project
//
// The code is licensed solely for academic and non-commercial use under the
// terms of the Clear BSD License. The terms of the Clear BSD License are
// provided below. Other licenses may be obtained by contacting the faculty
// of the Columbia Computer Graphics Group or a Columbia University licensing officer.
//
// We would like to hear from you if you appreciate this work.
//
// The Clear BSD License
//
// Copyright 2018 Yun (Raymond) Fei, Christopher Batty, Eitan Grinspun, and Changxi Zheng
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted (subject to the limitations in the disclaimer
// below) provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of the copyright holder nor the names of its contributors may be used
// to endorse or promote products derived from this software without specific
// prior written permission.
//
// NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS
// LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
// OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
#ifndef __LINEARIZED_IMPLICIT_EULER__
#define __LINEARIZED_IMPLICIT_EULER__
#include <Eigen/Core>
#include <iostream>
#include "SceneStepper.h"
#include "MathUtilities.h"
#include "StringUtilities.h"
#include "array3.h"
#include "pcgsolver/sparse_matrix.h"
class LinearizedImplicitEuler : public SceneStepper
{
public:
LinearizedImplicitEuler( const scalar& criterion, const scalar& pressure_criterion, const scalar& quasi_static_criterion, const scalar& viscous_criterion, int maxiters, int manifold_substeps, int viscosity_substeps, int surf_tension_substeps );
virtual ~LinearizedImplicitEuler();
virtual bool stepScene( TwoDScene& scene, scalar dt );
virtual bool stepVelocity( TwoDScene& scene, scalar dt );
virtual bool stepVelocityLagrangian( TwoDScene& scene, scalar dt );
virtual bool projectFine( TwoDScene& scene, scalar dt );
virtual bool acceptVelocity( TwoDScene& scene );
virtual bool stepImplicitElasto( TwoDScene& scene, scalar dt );
virtual bool stepImplicitElastoLagrangian( TwoDScene& scene, scalar dt );
virtual bool stepImplicitElastoDiagonalPCR( TwoDScene& scene, scalar dt );
virtual bool stepImplicitElastoDiagonalPCG( TwoDScene& scene, scalar dt );
virtual bool stepImplicitViscosityDiagonalPCG( const TwoDScene& scene,
const std::vector< VectorXs >& node_vel_src_x,
const std::vector< VectorXs >& node_vel_src_y,
const std::vector< VectorXs >& node_vel_src_z,
std::vector< VectorXs >& node_vel_x,
std::vector< VectorXs >& node_vel_y,
std::vector< VectorXs >& node_vel_z,
const scalar& dt );
virtual bool stepImplicitElastoDiagonalPCGCoSolve( TwoDScene& scene, scalar dt );
virtual bool stepImplicitElastoDiagonalPCRCoSolve( TwoDScene& scene, scalar dt );
virtual bool stepImplicitElastoAMGPCG( TwoDScene& scene, scalar dt );
virtual bool applyPressureDragElasto( TwoDScene& scene, scalar dt );
virtual bool applyPressureDragFluid( TwoDScene& scene, scalar dt );
virtual bool solveBiCGSTAB( TwoDScene& scene, scalar dt );
virtual bool manifoldPropagate( TwoDScene& scene, scalar dt );
virtual bool advectSurfTension( TwoDScene& scene, scalar dt );
virtual scalar computeDivergence( TwoDScene& scene );
virtual void pushFluidVelocity();
virtual void popFluidVelocity();
virtual void pushElastoVelocity();
virtual void popElastoVelocity();
virtual std::string getName() const;
private:
void zeroFixedDoFs( const TwoDScene& scene, VectorXs& vec );
void performLocalSolve( const TwoDScene& scene,
const std::vector< VectorXs >& node_rhs_x,
const std::vector< VectorXs >& node_rhs_y,
const std::vector< VectorXs >& node_rhs_z,
const std::vector< VectorXs >& node_mass_x,
const std::vector< VectorXs >& node_mass_y,
const std::vector< VectorXs >& node_mass_z,
std::vector< VectorXs >& out_node_vec_x,
std::vector< VectorXs >& out_node_vec_y,
std::vector< VectorXs >& out_node_vec_z );
void prepareGroupPrecondition(
const TwoDScene& scene,
const std::vector< VectorXs >& node_m_x,
const std::vector< VectorXs >& node_m_y,
const std::vector< VectorXs >& node_m_z,
const scalar& dt );
void performLocalSolveTwist( const TwoDScene& scene, const VectorXs& rhs, const VectorXs& m, VectorXs& out);
void performLocalSolve( const TwoDScene& scene, const VectorXs& rhs, const VectorXs& m, VectorXs& out);
void performInvLocalSolve( const TwoDScene& scene,
const std::vector< VectorXs >& node_rhs_x,
const std::vector< VectorXs >& node_rhs_y,
const std::vector< VectorXs >& node_rhs_z,
const std::vector< VectorXs >& node_inv_mass_x,
const std::vector< VectorXs >& node_inv_mass_y,
const std::vector< VectorXs >& node_inv_mass_z,
std::vector< VectorXs >& out_node_vec_x,
std::vector< VectorXs >& out_node_vec_y,
std::vector< VectorXs >& out_node_vec_z );
void performGroupedLocalSolve( const TwoDScene& scene,
const std::vector< VectorXs >& node_rhs_x,
const std::vector< VectorXs >& node_rhs_y,
const std::vector< VectorXs >& node_rhs_z,
std::vector< VectorXs >& out_node_vec_x,
std::vector< VectorXs >& out_node_vec_y,
std::vector< VectorXs >& out_node_vec_z );
void performGlobalMultiply( const TwoDScene& scene, const scalar& dt,
const std::vector< VectorXs >& node_m_x,
const std::vector< VectorXs >& node_m_y,
const std::vector< VectorXs >& node_m_z,
const std::vector< VectorXs >& node_v_x,
const std::vector< VectorXs >& node_v_y,
const std::vector< VectorXs >& node_v_z,
std::vector< VectorXs >& out_node_vec_x,
std::vector< VectorXs >& out_node_vec_y,
std::vector< VectorXs >& out_node_vec_z );
void performGlobalMultiply( const TwoDScene& scene, const scalar& dt,
const std::vector< VectorXs >& node_m_x,
const std::vector< VectorXs >& node_m_y,
const std::vector< VectorXs >& node_m_z,
const std::vector< VectorXs >& node_v_x,
const std::vector< VectorXs >& node_v_y,
const std::vector< VectorXs >& node_v_z,
std::vector< VectorXs >& out_node_vec_x,
std::vector< VectorXs >& out_node_vec_y,
std::vector< VectorXs >& out_node_vec_z,
const VectorXs& m,
const VectorXs& angular_vec,
VectorXs& out);
void performGlobalMultiply( const TwoDScene& scene, const scalar& dt,
const VectorXs& m,
const VectorXs& vec,
VectorXs& out);
void performAngularGlobalMultiply( const TwoDScene& scene, const scalar& dt,
const VectorXs& m,
const VectorXs& v,
VectorXs& out);
void performGlobalMultiplyBiCGSTAB( const TwoDScene& scene, const scalar& dt,
const std::vector< VectorXs >& node_v_s_x,
const std::vector< VectorXs >& node_v_s_y,
const std::vector< VectorXs >& node_v_s_z,
const std::vector< VectorXs >& node_v_f_x,
const std::vector< VectorXs >& node_v_f_y,
const std::vector< VectorXs >& node_v_f_z,
const std::vector< VectorXs >& node_v_p,
std::vector< VectorXs >& out_node_vec_s_x,
std::vector< VectorXs >& out_node_vec_s_y,
std::vector< VectorXs >& out_node_vec_s_z,
std::vector< VectorXs >& out_node_vec_f_x,
std::vector< VectorXs >& out_node_vec_f_y,
std::vector< VectorXs >& out_node_vec_f_z,
std::vector< VectorXs >& out_node_vec_p);
void constructNodeForceCoarse( TwoDScene& scene, const scalar& dt,
Array3s& node_rhs_fluid_x,
Array3s& node_rhs_fluid_y,
Array3s& node_rhs_fluid_z );
void performInvLocalSolveCoarse( TwoDScene& scene,
const Array3s& node_rhs_x,
const Array3s& node_rhs_y,
const Array3s& node_rhs_z,
const Array3s& node_inv_mass_x,
const Array3s& node_inv_mass_y,
const Array3s& node_inv_mass_z,
Array3s& out_node_vec_x,
Array3s& out_node_vec_y,
Array3s& out_node_vec_z );
void constructHDVCoarse( TwoDScene& scene, const scalar& dt,
Array3s& node_hdv_x,
Array3s& node_hdv_y,
Array3s& node_hdv_z,
Array3s& node_hdvs_x,
Array3s& node_hdvs_y,
Array3s& node_hdvs_z,
const Array3s& node_v_x,
const Array3s& node_v_y,
const Array3s& node_v_z,
const Array3s& node_fluid_v_x,
const Array3s& node_fluid_v_y,
const Array3s& node_fluid_v_z);
void constructInvMDVCoarse( TwoDScene& scene,
Array3s& node_inv_mdv_x,
Array3s& node_inv_mdv_y,
Array3s& node_inv_mdv_z,
const Array3s& node_hdv_x,
const Array3s& node_hdv_y,
const Array3s& node_hdv_z);
void constructMsDVsCoarse( TwoDScene& scene,
Array3s& node_msdv2_x,
Array3s& node_msdv2_y,
Array3s& node_msdv2_z,
Array3s& node_inv_msdv2_x,
Array3s& node_inv_msdv2_y,
Array3s& node_inv_msdv2_z,
const Array3s& node_hdvs_x,
const Array3s& node_hdvs_y,
const Array3s& node_hdvs_z);
void constructPsiSFCoarse( TwoDScene& scene,
Array3s& node_psi_sf_x,
Array3s& node_psi_sf_y,
Array3s& node_psi_sf_z,
const Array3s& node_inv_mdv_x,
const Array3s& node_inv_mdv_y,
const Array3s& node_inv_mdv_z,
const Array3s& node_hdv_x,
const Array3s& node_hdv_y,
const Array3s& node_hdv_z );
void constructPsiFSCoarse( TwoDScene& scene,
Array3s& node_psi_fs_x,
Array3s& node_psi_fs_y,
Array3s& node_psi_fs_z,
const Array3s& node_inv_mdvs_x,
const Array3s& node_inv_mdvs_y,
const Array3s& node_inv_mdvs_z,
const Array3s& node_hdvs_x,
const Array3s& node_hdvs_y,
const Array3s& node_hdvs_z );
void constructNodeForce( TwoDScene& scene, const scalar& dt, std::vector< VectorXs >& node_rhs_x, std::vector< VectorXs >& node_rhs_y, std::vector< VectorXs >& node_rhs_z, std::vector< VectorXs >& node_rhs_fluid_x, std::vector< VectorXs >& node_rhs_fluid_y, std::vector< VectorXs >& node_rhs_fluid_z );
void addSolidDragRHS( TwoDScene& scene,
const std::vector< VectorXs >& node_vel_x,
const std::vector< VectorXs >& node_vel_y,
const std::vector< VectorXs >& node_vel_z,
std::vector< VectorXs >& node_rhs_x,
std::vector< VectorXs >& node_rhs_y,
std::vector< VectorXs >& node_rhs_z );
void addFluidDragRHS( TwoDScene& scene,
const std::vector< VectorXs >& node_fluid_vel_x,
const std::vector< VectorXs >& node_fluid_vel_y,
const std::vector< VectorXs >& node_fluid_vel_z,
std::vector< VectorXs >& node_rhs_x,
std::vector< VectorXs >& node_rhs_y,
std::vector< VectorXs >& node_rhs_z );
void addSolidDrag( TwoDScene& scene,
const std::vector< VectorXs >& node_vel_x,
const std::vector< VectorXs >& node_vel_y,
const std::vector< VectorXs >& node_vel_z,
std::vector< VectorXs >& node_fluid_vel_x,
std::vector< VectorXs >& node_fluid_vel_y,
std::vector< VectorXs >& node_fluid_vel_z );
void constructHDV( TwoDScene& scene, const scalar& dt );
void constructInvMDV( TwoDScene& scene);
void constructMsDVs( TwoDScene& scene);
void constructPsiSF( TwoDScene& scene );
void constructHessianPreProcess( TwoDScene& scene, const scalar& dt );
void constructHessianPostProcess( TwoDScene& scene, const scalar& dt );
void constructAngularHessianPreProcess( TwoDScene& scene, const scalar& dt );
void constructAngularHessianPostProcess( TwoDScene& scene, const scalar& dt );
// std::vector< Eigen::SimplicialLDLT< SparseXs >* > m_local_solvers;
// SparseXs m_A;
std::vector< std::pair<int, int> > m_triA_sup;
std::vector< std::pair<int, int> > m_angular_triA_sup;
TripletXs m_triA;
TripletXs m_angular_triA;
VectorXs m_multiply_buffer;
VectorXs m_pre_mult_buffer;
VectorXs m_angular_moment_buffer;
VectorXs m_angular_v_plus_buffer;
std::vector< std::vector< std::vector< Matrix3s > > > m_gauss_ddphidfdf;
const scalar m_pcg_criterion;
const scalar m_pressure_criterion;
const scalar m_quasi_static_criterion;
const scalar m_viscous_criterion;
const int m_maxiters;
const int m_manifold_substeps;
const int m_viscosity_substeps;
const int m_surf_tension_substeps;
std::vector< VectorXs > m_node_rhs_x;
std::vector< VectorXs > m_node_rhs_y;
std::vector< VectorXs > m_node_rhs_z;
std::vector< VectorXs > m_node_v_plus_x;
std::vector< VectorXs > m_node_v_plus_y;
std::vector< VectorXs > m_node_v_plus_z;
std::vector< VectorXs > m_node_v_fluid_plus_x;
std::vector< VectorXs > m_node_v_fluid_plus_y;
std::vector< VectorXs > m_node_v_fluid_plus_z;
std::vector< VectorXs > m_node_v_0_x;
std::vector< VectorXs > m_node_v_0_y;
std::vector< VectorXs > m_node_v_0_z;
std::vector< VectorXs > m_node_v_tmp_x;
std::vector< VectorXs > m_node_v_tmp_y;
std::vector< VectorXs > m_node_v_tmp_z;
std::vector< VectorXs > m_node_r_x; //r0
std::vector< VectorXs > m_node_r_y;
std::vector< VectorXs > m_node_r_z;
std::vector< VectorXs > m_node_z_x; // s
std::vector< VectorXs > m_node_z_y;
std::vector< VectorXs > m_node_z_z;
std::vector< VectorXs > m_node_p_x; // p
std::vector< VectorXs > m_node_p_y;
std::vector< VectorXs > m_node_p_z;
std::vector< VectorXs > m_node_q_x; // h
std::vector< VectorXs > m_node_q_y;
std::vector< VectorXs > m_node_q_z;
std::vector< VectorXs > m_node_w_x; // v
std::vector< VectorXs > m_node_w_y;
std::vector< VectorXs > m_node_w_z;
std::vector< VectorXs > m_node_t_x; // t
std::vector< VectorXs > m_node_t_y;
std::vector< VectorXs > m_node_t_z;
VectorXs m_angular_r;
VectorXs m_angular_z;
VectorXs m_angular_p;
VectorXs m_angular_q;
VectorXs m_angular_w;
VectorXs m_angular_t;
std::vector< VectorXs > m_node_rhs_p;
std::vector< VectorXs > m_node_r_p;
std::vector< VectorXs > m_node_z_p;
std::vector< VectorXs > m_node_p_p;
std::vector< VectorXs > m_node_q_p;
std::vector< VectorXs > m_node_jacobi_precond;
std::vector< VectorXs > m_node_rhs_sat;
std::vector< VectorXs > m_node_perm_sat_x;
std::vector< VectorXs > m_node_perm_sat_y;
std::vector< VectorXs > m_node_perm_sat_z;
std::vector< VectorXs > m_node_r_sat;
std::vector< VectorXs > m_node_z_sat;
std::vector< VectorXs > m_node_p_sat;
std::vector< VectorXs > m_node_q_sat;
std::vector< VectorXs > m_node_jacobi_precond_sat;
std::vector< VectorXs > m_node_rhs_fluid_x;
std::vector< VectorXs > m_node_rhs_fluid_y;
std::vector< VectorXs > m_node_rhs_fluid_z;
std::vector< VectorXs > m_node_hdvm_x; // hDVm
std::vector< VectorXs > m_node_hdvm_y;
std::vector< VectorXs > m_node_hdvm_z;
std::vector< VectorXs > m_node_epsilon_x; // 1 - psi
std::vector< VectorXs > m_node_epsilon_y;
std::vector< VectorXs > m_node_epsilon_z;
std::vector< VectorXs > m_node_mshdvm_x; // M_s + hDVm
std::vector< VectorXs > m_node_mshdvm_y;
std::vector< VectorXs > m_node_mshdvm_z;
std::vector< VectorXs > m_node_damped_x; // damped M_s
std::vector< VectorXs > m_node_damped_y;
std::vector< VectorXs > m_node_damped_z;
std::vector< VectorXs > m_node_mfhdvm_x; // M_f + hDVm
std::vector< VectorXs > m_node_mfhdvm_y;
std::vector< VectorXs > m_node_mfhdvm_z;
std::vector< VectorXs > m_node_inv_mfhdvm_x; // (M_f+hDVm)^{-1}
std::vector< VectorXs > m_node_inv_mfhdvm_y;
std::vector< VectorXs > m_node_inv_mfhdvm_z;
std::vector< VectorXs > m_node_mfhdvm_hdvm_x; // (M_f+hDVm)^{-1}hDVm
std::vector< VectorXs > m_node_mfhdvm_hdvm_y;
std::vector< VectorXs > m_node_mfhdvm_hdvm_z;
std::vector< VectorXs > m_node_mshdvm_hdvm_x; // (M_s+hDVm)^{-1}hDVm
std::vector< VectorXs > m_node_mshdvm_hdvm_y;
std::vector< VectorXs > m_node_mshdvm_hdvm_z;
std::vector< VectorXs > m_node_inv_C_x; // (M_f+[hdv]*M_s)^{-1}
std::vector< VectorXs > m_node_inv_C_y;
std::vector< VectorXs > m_node_inv_C_z;
std::vector< VectorXs > m_node_Cs_x; // M_s+[hdvs]*M_f
std::vector< VectorXs > m_node_Cs_y;
std::vector< VectorXs > m_node_Cs_z;
std::vector< VectorXs > m_node_inv_Cs_x; // (M_s+[hdvs]*M_f)^{-1}
std::vector< VectorXs > m_node_inv_Cs_y;
std::vector< VectorXs > m_node_inv_Cs_z;
std::vector< VectorXs > m_node_inv_precond_x; // (M_s+[hdvs]*M_f+h^2 diag(W.diag(H).W^T))^{-1}
std::vector< VectorXs > m_node_inv_precond_y;
std::vector< VectorXs > m_node_inv_precond_z;
std::vector< VectorXs > m_node_psi_sf_x;
std::vector< VectorXs > m_node_psi_sf_y;
std::vector< VectorXs > m_node_psi_sf_z;
std::vector< VectorXs > m_node_psi_fs_x;
std::vector< VectorXs > m_node_psi_fs_y;
std::vector< VectorXs > m_node_psi_fs_z;
// bicgstab
std::vector< VectorXs > m_node_bi_r_S_x; //r
std::vector< VectorXs > m_node_bi_r_S_y;
std::vector< VectorXs > m_node_bi_r_S_z;
std::vector< VectorXs > m_node_bi_r_hat_S_x; //r0
std::vector< VectorXs > m_node_bi_r_hat_S_y;
std::vector< VectorXs > m_node_bi_r_hat_S_z;
std::vector< VectorXs > m_node_bi_s_S_x; // s
std::vector< VectorXs > m_node_bi_s_S_y;
std::vector< VectorXs > m_node_bi_s_S_z;
std::vector< VectorXs > m_node_bi_p_S_x; // p
std::vector< VectorXs > m_node_bi_p_S_y;
std::vector< VectorXs > m_node_bi_p_S_z;
std::vector< VectorXs > m_node_bi_h_S_x; // h
std::vector< VectorXs > m_node_bi_h_S_y;
std::vector< VectorXs > m_node_bi_h_S_z;
std::vector< VectorXs > m_node_bi_t_S_x; // h
std::vector< VectorXs > m_node_bi_t_S_y;
std::vector< VectorXs > m_node_bi_t_S_z;
std::vector< VectorXs > m_node_bi_v_S_x; // h
std::vector< VectorXs > m_node_bi_v_S_y;
std::vector< VectorXs > m_node_bi_v_S_z;
std::vector< VectorXs > m_node_bi_r_L_x; //r
std::vector< VectorXs > m_node_bi_r_L_y;
std::vector< VectorXs > m_node_bi_r_L_z;
std::vector< VectorXs > m_node_bi_r_hat_L_x; //r0
std::vector< VectorXs > m_node_bi_r_hat_L_y;
std::vector< VectorXs > m_node_bi_r_hat_L_z;
std::vector< VectorXs > m_node_bi_s_L_x; // s
std::vector< VectorXs > m_node_bi_s_L_y;
std::vector< VectorXs > m_node_bi_s_L_z;
std::vector< VectorXs > m_node_bi_p_L_x; // p
std::vector< VectorXs > m_node_bi_p_L_y;
std::vector< VectorXs > m_node_bi_p_L_z;
std::vector< VectorXs > m_node_bi_h_L_x; // h
std::vector< VectorXs > m_node_bi_h_L_y;
std::vector< VectorXs > m_node_bi_h_L_z;
std::vector< VectorXs > m_node_bi_t_L_x; // h
std::vector< VectorXs > m_node_bi_t_L_y;
std::vector< VectorXs > m_node_bi_t_L_z;
std::vector< VectorXs > m_node_bi_v_L_x; // h
std::vector< VectorXs > m_node_bi_v_L_y;
std::vector< VectorXs > m_node_bi_v_L_z;
std::vector< VectorXs > m_node_bi_r_P; //r
std::vector< VectorXs > m_node_bi_r_hat_P; //r0
std::vector< VectorXs > m_node_bi_s_P; // s
std::vector< VectorXs > m_node_bi_p_P; // p
std::vector< VectorXs > m_node_bi_h_P; // h
std::vector< VectorXs > m_node_bi_t_P; // h
std::vector< VectorXs > m_node_bi_v_P; // h
std::stack< std::vector< VectorXs > > m_fluid_vel_stack;
std::stack< std::vector< VectorXs > > m_elasto_vel_stack;
std::vector<double> m_arr_pressure_rhs;
robertbridson::SparseMatrix<scalar> m_arr_pressure_matrix;
std::vector<double> m_fine_pressure_rhs;
robertbridson::SparseMatrix<scalar> m_fine_pressure_matrix;
std::vector< VectorXi > m_fine_global_indices;
SparseXs m_A;
std::vector< VectorXi > m_node_global_indices_x;
std::vector< VectorXi > m_node_global_indices_y;
std::vector< VectorXi > m_node_global_indices_z;
std::vector< Vector3i > m_effective_node_indices;
std::vector< Vector3i > m_dof_ijk;
TripletXs m_tri_W;
SparseXs m_W;
TripletXs m_tri_M;
SparseXs m_M;
std::vector< double > m_elasto_rhs;
std::vector< double > m_elasto_result;
robertbridson::SparseMatrix<scalar> m_H;
VectorXs m_lagrangian_rhs;
VectorXs m_v_plus;
VectorXs m_r;
VectorXs m_z;
VectorXs m_p;
VectorXs m_q;
std::vector< std::shared_ptr< Eigen::SimplicialLDLT< SparseXs > > > m_group_preconditioners;
std::vector< VectorXi > m_node_visc_indices_x;
std::vector< VectorXi > m_node_visc_indices_y;
std::vector< VectorXi > m_node_visc_indices_z;
robertbridson::SparseMatrix<scalar> m_visc_matrix;
std::vector< scalar > m_visc_rhs;
std::vector< scalar > m_visc_solution;
std::vector< Vector2i > m_effective_node_indices_x;
std::vector< Vector2i > m_effective_node_indices_y;
std::vector< Vector2i > m_effective_node_indices_z;
};
#endif
| 43.00703 | 303 | 0.63704 | [
"vector"
] |
8a6e9e86a054c776e43e1b5b3e5312c18677959e | 701 | c | C | Programming/C/vector.c | alexstrive/Ifmo-Works | 1f4fb67dfde9c4e14130b6614be9ec4b0642d3c0 | [
"Unlicense"
] | 4 | 2017-10-06T08:26:23.000Z | 2017-10-17T12:40:52.000Z | Programming/C/vector.c | novopashin/Ifmo-Works | 1f4fb67dfde9c4e14130b6614be9ec4b0642d3c0 | [
"Unlicense"
] | 5 | 2017-11-16T18:01:51.000Z | 2018-01-15T18:05:58.000Z | Programming/C/vector.c | allordiron/LabWorks | 1f4fb67dfde9c4e14130b6614be9ec4b0642d3c0 | [
"Unlicense"
] | 1 | 2018-05-20T19:30:57.000Z | 2018-05-20T19:30:57.000Z | #include "vector.h"
void readVector(struct Vector *coordinate) {
scanf("%d %d %d", &coordinate->x, &coordinate->y, &coordinate->z);
}
void printVector(struct Vector coordinate) {
printf("[%d, %d, %d]\n", coordinate.x, coordinate.y, coordinate.z);
}
float getVectorLength(struct Vector vector) {
return (float) sqrt(pow(vector.x, 2) + pow(vector.y, 2) + pow(vector.z, 2));
}
void createVector(struct Vector *targetVector, struct Vector startPoint, struct Vector endPoint) {
struct Vector newVector = {0, 0, 0};
newVector.x = endPoint.x - startPoint.x;
newVector.y = endPoint.y - startPoint.y;
newVector.z = endPoint.z - startPoint.z;
*targetVector = newVector;
}
| 28.04 | 98 | 0.676177 | [
"vector"
] |
8a71f62a07d8ce9d08b1fa1c24918fd5404df5e4 | 33,964 | c | C | src/gallium/drivers/etnaviv/etnaviv_compiler_nir.c | PWN-Hunter/mesa3d | be12e189989e3476d7c9d40e1c0c3a35143ee51a | [
"MIT"
] | null | null | null | src/gallium/drivers/etnaviv/etnaviv_compiler_nir.c | PWN-Hunter/mesa3d | be12e189989e3476d7c9d40e1c0c3a35143ee51a | [
"MIT"
] | null | null | null | src/gallium/drivers/etnaviv/etnaviv_compiler_nir.c | PWN-Hunter/mesa3d | be12e189989e3476d7c9d40e1c0c3a35143ee51a | [
"MIT"
] | null | null | null | /*
* Copyright (c) 2012-2019 Etnaviv Project
* Copyright (c) 2019 Zodiac Inflight Innovations
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Jonathan Marek <jonathan@marek.ca>
* Wladimir J. van der Laan <laanwj@gmail.com>
*/
#include "etnaviv_compiler.h"
#include "etnaviv_asm.h"
#include "etnaviv_context.h"
#include "etnaviv_debug.h"
#include "etnaviv_disasm.h"
#include "etnaviv_uniforms.h"
#include "etnaviv_util.h"
#include <math.h>
#include "util/u_memory.h"
#include "util/register_allocate.h"
#include "compiler/nir/nir_builder.h"
#include "compiler/nir/nir_worklist.h"
#include "tgsi/tgsi_strings.h"
#include "util/u_half.h"
struct etna_compile {
nir_shader *nir;
#define is_fs(c) ((c)->nir->info.stage == MESA_SHADER_FRAGMENT)
const struct etna_specs *specs;
struct etna_shader_variant *variant;
/* block # to instr index */
unsigned *block_ptr;
/* Code generation */
int inst_ptr; /* current instruction pointer */
struct etna_inst code[ETNA_MAX_INSTRUCTIONS * ETNA_INST_SIZE];
/* constants */
uint64_t consts[ETNA_MAX_IMM];
/* There was an error during compilation */
bool error;
};
/* io related lowering
* run after lower_int_to_float because it adds i2f/f2i ops
*/
static void
etna_lower_io(nir_shader *shader, struct etna_shader_variant *v)
{
nir_foreach_function(function, shader) {
nir_builder b;
nir_builder_init(&b, function->impl);
nir_foreach_block(block, function->impl) {
nir_foreach_instr_safe(instr, block) {
if (instr->type == nir_instr_type_intrinsic) {
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_load_front_face: {
/* HW front_face is 0.0/1.0, not 0/~0u for bool
* lower with a comparison with 0
*/
intr->dest.ssa.bit_size = 32;
b.cursor = nir_after_instr(instr);
nir_ssa_def *ssa = nir_ine(&b, &intr->dest.ssa, nir_imm_int(&b, 0));
if (v->key.front_ccw)
nir_instr_as_alu(ssa->parent_instr)->op = nir_op_ieq;
nir_ssa_def_rewrite_uses_after(&intr->dest.ssa,
nir_src_for_ssa(ssa),
ssa->parent_instr);
} break;
case nir_intrinsic_store_deref: {
nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
if (shader->info.stage != MESA_SHADER_FRAGMENT || !v->key.frag_rb_swap)
break;
assert(deref->deref_type == nir_deref_type_var);
if (deref->var->data.location != FRAG_RESULT_COLOR &&
deref->var->data.location != FRAG_RESULT_DATA0)
break;
b.cursor = nir_before_instr(instr);
nir_ssa_def *ssa = nir_mov(&b, intr->src[1].ssa);
nir_alu_instr *alu = nir_instr_as_alu(ssa->parent_instr);
alu->src[0].swizzle[0] = 2;
alu->src[0].swizzle[2] = 0;
nir_instr_rewrite_src(instr, &intr->src[1], nir_src_for_ssa(ssa));
} break;
case nir_intrinsic_load_uniform: {
/* convert indirect load_uniform to load_ubo when possible
* this is required on HALTI5+ because address register is not implemented
* address register loads also arent done optimally
*/
if (v->shader->specs->halti < 2 || nir_src_is_const(intr->src[0]))
break;
nir_intrinsic_instr *load_ubo =
nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
load_ubo->num_components = intr->num_components;
nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
load_ubo->num_components, 32, NULL);
b.cursor = nir_before_instr(instr);
load_ubo->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0));
load_ubo->src[1] = nir_src_for_ssa(nir_iadd(&b,
nir_imul(&b, intr->src[0].ssa, nir_imm_int(&b, 16)),
nir_imm_int(&b, nir_intrinsic_base(intr) * 16)));
nir_builder_instr_insert(&b, &load_ubo->instr);
nir_ssa_def_rewrite_uses(&intr->dest.ssa,
nir_src_for_ssa(&load_ubo->dest.ssa));
nir_instr_remove(&intr->instr);
} break;
case nir_intrinsic_load_ubo: {
nir_const_value *idx = nir_src_as_const_value(intr->src[0]);
assert(idx);
/* offset index by 1, index 0 is used for converted load_uniform */
b.cursor = nir_before_instr(instr);
nir_instr_rewrite_src(instr, &intr->src[0],
nir_src_for_ssa(nir_imm_int(&b, idx[0].u32 + 1)));
} break;
case nir_intrinsic_load_vertex_id:
case nir_intrinsic_load_instance_id:
/* detect use of vertex_id/instance_id */
v->vs_id_in_reg = v->infile.num_reg;
break;
default:
break;
}
}
if (instr->type != nir_instr_type_tex)
continue;
nir_tex_instr *tex = nir_instr_as_tex(instr);
nir_src *coord = NULL;
nir_src *lod_bias = NULL;
unsigned lod_bias_idx;
assert(tex->sampler_index == tex->texture_index);
for (unsigned i = 0; i < tex->num_srcs; i++) {
switch (tex->src[i].src_type) {
case nir_tex_src_coord:
coord = &tex->src[i].src;
break;
case nir_tex_src_bias:
case nir_tex_src_lod:
assert(!lod_bias);
lod_bias = &tex->src[i].src;
lod_bias_idx = i;
break;
case nir_tex_src_comparator:
break;
default:
assert(0);
break;
}
}
if (tex->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
/* use a dummy load_uniform here to represent texcoord scale */
b.cursor = nir_before_instr(instr);
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_uniform);
nir_intrinsic_set_base(load, ~tex->sampler_index);
load->num_components = 2;
load->src[0] = nir_src_for_ssa(nir_imm_float(&b, 0.0f));
nir_ssa_dest_init(&load->instr, &load->dest, 2, 32, NULL);
nir_intrinsic_set_type(load, nir_type_float);
nir_builder_instr_insert(&b, &load->instr);
nir_ssa_def *new_coord = nir_fmul(&b, coord->ssa, &load->dest.ssa);
nir_instr_rewrite_src(&tex->instr, coord, nir_src_for_ssa(new_coord));
}
/* pre HALTI5 needs texture sources in a single source */
if (!lod_bias || v->shader->specs->halti >= 5)
continue;
assert(coord && lod_bias && tex->coord_components < 4);
nir_alu_instr *vec = nir_alu_instr_create(shader, nir_op_vec4);
for (unsigned i = 0; i < tex->coord_components; i++) {
vec->src[i].src = nir_src_for_ssa(coord->ssa);
vec->src[i].swizzle[0] = i;
}
for (unsigned i = tex->coord_components; i < 4; i++)
vec->src[i].src = nir_src_for_ssa(lod_bias->ssa);
vec->dest.write_mask = 0xf;
nir_ssa_dest_init(&vec->instr, &vec->dest.dest, 4, 32, NULL);
nir_tex_instr_remove_src(tex, lod_bias_idx);
nir_instr_rewrite_src(&tex->instr, coord, nir_src_for_ssa(&vec->dest.dest.ssa));
tex->coord_components = 4;
nir_instr_insert_before(&tex->instr, &vec->instr);
}
}
}
}
static bool
etna_alu_to_scalar_filter_cb(const nir_instr *instr, const void *data)
{
const struct etna_specs *specs = data;
if (instr->type != nir_instr_type_alu)
return false;
nir_alu_instr *alu = nir_instr_as_alu(instr);
switch (alu->op) {
case nir_op_frsq:
case nir_op_frcp:
case nir_op_flog2:
case nir_op_fexp2:
case nir_op_fsqrt:
case nir_op_fcos:
case nir_op_fsin:
case nir_op_fdiv:
case nir_op_imul:
return true;
/* TODO: can do better than alu_to_scalar for vector compares */
case nir_op_b32all_fequal2:
case nir_op_b32all_fequal3:
case nir_op_b32all_fequal4:
case nir_op_b32any_fnequal2:
case nir_op_b32any_fnequal3:
case nir_op_b32any_fnequal4:
case nir_op_b32all_iequal2:
case nir_op_b32all_iequal3:
case nir_op_b32all_iequal4:
case nir_op_b32any_inequal2:
case nir_op_b32any_inequal3:
case nir_op_b32any_inequal4:
return true;
case nir_op_fdot2:
if (!specs->has_halti2_instructions)
return true;
break;
default:
break;
}
return false;
}
static void
etna_lower_alu_impl(nir_function_impl *impl, struct etna_compile *c)
{
nir_shader *shader = impl->function->shader;
nir_builder b;
nir_builder_init(&b, impl);
/* in a seperate loop so we can apply the multiple-uniform logic to the new fmul */
nir_foreach_block(block, impl) {
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_alu)
continue;
nir_alu_instr *alu = nir_instr_as_alu(instr);
/* multiply sin/cos src by constant
* TODO: do this earlier (but it breaks const_prop opt)
*/
if (alu->op == nir_op_fsin || alu->op == nir_op_fcos) {
b.cursor = nir_before_instr(instr);
nir_ssa_def *imm = c->specs->has_new_transcendentals ?
nir_imm_float(&b, 1.0 / M_PI) :
nir_imm_float(&b, 2.0 / M_PI);
nir_instr_rewrite_src(instr, &alu->src[0].src,
nir_src_for_ssa(nir_fmul(&b, alu->src[0].src.ssa, imm)));
}
/* change transcendental ops to vec2 and insert vec1 mul for the result
* TODO: do this earlier (but it breaks with optimizations)
*/
if (c->specs->has_new_transcendentals && (
alu->op == nir_op_fdiv || alu->op == nir_op_flog2 ||
alu->op == nir_op_fsin || alu->op == nir_op_fcos)) {
nir_ssa_def *ssa = &alu->dest.dest.ssa;
assert(ssa->num_components == 1);
nir_alu_instr *mul = nir_alu_instr_create(shader, nir_op_fmul);
mul->src[0].src = mul->src[1].src = nir_src_for_ssa(ssa);
mul->src[1].swizzle[0] = 1;
mul->dest.write_mask = 1;
nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, 32, NULL);
ssa->num_components = 2;
mul->dest.saturate = alu->dest.saturate;
alu->dest.saturate = 0;
nir_instr_insert_after(instr, &mul->instr);
nir_ssa_def_rewrite_uses_after(ssa, nir_src_for_ssa(&mul->dest.dest.ssa), &mul->instr);
}
}
}
}
static void etna_lower_alu(nir_shader *shader, struct etna_compile *c)
{
nir_foreach_function(function, shader) {
if (function->impl)
etna_lower_alu_impl(function->impl, c);
}
}
static void
emit_inst(struct etna_compile *c, struct etna_inst *inst)
{
c->code[c->inst_ptr++] = *inst;
}
/* to map nir srcs should to etna_inst srcs */
enum {
SRC_0_1_2 = (0 << 0) | (1 << 2) | (2 << 4),
SRC_0_1_X = (0 << 0) | (1 << 2) | (3 << 4),
SRC_0_X_X = (0 << 0) | (3 << 2) | (3 << 4),
SRC_0_X_1 = (0 << 0) | (3 << 2) | (1 << 4),
SRC_0_1_0 = (0 << 0) | (1 << 2) | (0 << 4),
SRC_X_X_0 = (3 << 0) | (3 << 2) | (0 << 4),
SRC_0_X_0 = (0 << 0) | (3 << 2) | (0 << 4),
};
/* info to translate a nir op to etna_inst */
struct etna_op_info {
uint8_t opcode; /* INST_OPCODE_ */
uint8_t src; /* SRC_ enum */
uint8_t cond; /* INST_CONDITION_ */
uint8_t type; /* INST_TYPE_ */
};
static const struct etna_op_info etna_ops[] = {
[0 ... nir_num_opcodes - 1] = {0xff},
#undef TRUE
#undef FALSE
#define OPCT(nir, op, src, cond, type) [nir_op_##nir] = { \
INST_OPCODE_##op, \
SRC_##src, \
INST_CONDITION_##cond, \
INST_TYPE_##type \
}
#define OPC(nir, op, src, cond) OPCT(nir, op, src, cond, F32)
#define IOPC(nir, op, src, cond) OPCT(nir, op, src, cond, S32)
#define UOPC(nir, op, src, cond) OPCT(nir, op, src, cond, U32)
#define OP(nir, op, src) OPC(nir, op, src, TRUE)
#define IOP(nir, op, src) IOPC(nir, op, src, TRUE)
#define UOP(nir, op, src) UOPC(nir, op, src, TRUE)
OP(mov, MOV, X_X_0), OP(fneg, MOV, X_X_0), OP(fabs, MOV, X_X_0), OP(fsat, MOV, X_X_0),
OP(fmul, MUL, 0_1_X), OP(fadd, ADD, 0_X_1), OP(ffma, MAD, 0_1_2),
OP(fdot2, DP2, 0_1_X), OP(fdot3, DP3, 0_1_X), OP(fdot4, DP4, 0_1_X),
OPC(fmin, SELECT, 0_1_0, GT), OPC(fmax, SELECT, 0_1_0, LT),
OP(ffract, FRC, X_X_0), OP(frcp, RCP, X_X_0), OP(frsq, RSQ, X_X_0),
OP(fsqrt, SQRT, X_X_0), OP(fsin, SIN, X_X_0), OP(fcos, COS, X_X_0),
OP(fsign, SIGN, X_X_0), OP(ffloor, FLOOR, X_X_0), OP(fceil, CEIL, X_X_0),
OP(flog2, LOG, X_X_0), OP(fexp2, EXP, X_X_0),
OPC(seq, SET, 0_1_X, EQ), OPC(sne, SET, 0_1_X, NE), OPC(sge, SET, 0_1_X, GE), OPC(slt, SET, 0_1_X, LT),
OPC(fcsel, SELECT, 0_1_2, NZ),
OP(fdiv, DIV, 0_1_X),
OP(fddx, DSX, 0_X_0), OP(fddy, DSY, 0_X_0),
/* type convert */
IOP(i2f32, I2F, 0_X_X),
UOP(u2f32, I2F, 0_X_X),
IOP(f2i32, F2I, 0_X_X),
UOP(f2u32, F2I, 0_X_X),
UOP(b2f32, AND, 0_X_X), /* AND with fui(1.0f) */
UOP(b2i32, AND, 0_X_X), /* AND with 1 */
OPC(f2b32, CMP, 0_X_X, NE), /* != 0.0 */
UOPC(i2b32, CMP, 0_X_X, NE), /* != 0 */
/* arithmetic */
IOP(iadd, ADD, 0_X_1),
IOP(imul, IMULLO0, 0_1_X),
/* IOP(imad, IMADLO0, 0_1_2), */
IOP(ineg, ADD, X_X_0), /* ADD 0, -x */
IOP(iabs, IABS, X_X_0),
IOP(isign, SIGN, X_X_0),
IOPC(imin, SELECT, 0_1_0, GT),
IOPC(imax, SELECT, 0_1_0, LT),
UOPC(umin, SELECT, 0_1_0, GT),
UOPC(umax, SELECT, 0_1_0, LT),
/* select */
UOPC(b32csel, SELECT, 0_1_2, NZ),
/* compare with int result */
OPC(feq32, CMP, 0_1_X, EQ),
OPC(fne32, CMP, 0_1_X, NE),
OPC(fge32, CMP, 0_1_X, GE),
OPC(flt32, CMP, 0_1_X, LT),
IOPC(ieq32, CMP, 0_1_X, EQ),
IOPC(ine32, CMP, 0_1_X, NE),
IOPC(ige32, CMP, 0_1_X, GE),
IOPC(ilt32, CMP, 0_1_X, LT),
UOPC(uge32, CMP, 0_1_X, GE),
UOPC(ult32, CMP, 0_1_X, LT),
/* bit ops */
IOP(ior, OR, 0_X_1),
IOP(iand, AND, 0_X_1),
IOP(ixor, XOR, 0_X_1),
IOP(inot, NOT, X_X_0),
IOP(ishl, LSHIFT, 0_X_1),
IOP(ishr, RSHIFT, 0_X_1),
UOP(ushr, RSHIFT, 0_X_1),
};
static void
etna_emit_block_start(struct etna_compile *c, unsigned block)
{
c->block_ptr[block] = c->inst_ptr;
}
static void
etna_emit_alu(struct etna_compile *c, nir_op op, struct etna_inst_dst dst,
struct etna_inst_src src[3], bool saturate)
{
struct etna_op_info ei = etna_ops[op];
unsigned swiz_scalar = INST_SWIZ_BROADCAST(ffs(dst.write_mask) - 1);
assert(ei.opcode != 0xff);
struct etna_inst inst = {
.opcode = ei.opcode,
.type = ei.type,
.cond = ei.cond,
.dst = dst,
.sat = saturate,
};
switch (op) {
case nir_op_fdiv:
case nir_op_flog2:
case nir_op_fsin:
case nir_op_fcos:
if (c->specs->has_new_transcendentals)
inst.tex.amode = 1;
/* fall through */
case nir_op_frsq:
case nir_op_frcp:
case nir_op_fexp2:
case nir_op_fsqrt:
case nir_op_imul:
/* scalar instructions we want src to be in x component */
src[0].swiz = inst_swiz_compose(src[0].swiz, swiz_scalar);
src[1].swiz = inst_swiz_compose(src[1].swiz, swiz_scalar);
break;
/* deal with instructions which don't have 1:1 mapping */
case nir_op_b2f32:
inst.src[2] = etna_immediate_float(1.0f);
break;
case nir_op_b2i32:
inst.src[2] = etna_immediate_int(1);
break;
case nir_op_f2b32:
inst.src[1] = etna_immediate_float(0.0f);
break;
case nir_op_i2b32:
inst.src[1] = etna_immediate_int(0);
break;
case nir_op_ineg:
inst.src[0] = etna_immediate_int(0);
src[0].neg = 1;
break;
default:
break;
}
/* set the "true" value for CMP instructions */
if (inst.opcode == INST_OPCODE_CMP)
inst.src[2] = etna_immediate_int(-1);
for (unsigned j = 0; j < 3; j++) {
unsigned i = ((ei.src >> j*2) & 3);
if (i < 3)
inst.src[j] = src[i];
}
emit_inst(c, &inst);
}
static void
etna_emit_tex(struct etna_compile *c, nir_texop op, unsigned texid, unsigned dst_swiz,
struct etna_inst_dst dst, struct etna_inst_src coord,
struct etna_inst_src lod_bias, struct etna_inst_src compare)
{
struct etna_inst inst = {
.dst = dst,
.tex.id = texid + (is_fs(c) ? 0 : c->specs->vertex_sampler_offset),
.tex.swiz = dst_swiz,
.src[0] = coord,
};
if (lod_bias.use)
inst.src[1] = lod_bias;
if (compare.use)
inst.src[2] = compare;
switch (op) {
case nir_texop_tex: inst.opcode = INST_OPCODE_TEXLD; break;
case nir_texop_txb: inst.opcode = INST_OPCODE_TEXLDB; break;
case nir_texop_txl: inst.opcode = INST_OPCODE_TEXLDL; break;
default:
assert(0);
}
emit_inst(c, &inst);
}
static void
etna_emit_jump(struct etna_compile *c, unsigned block, struct etna_inst_src condition)
{
if (!condition.use) {
emit_inst(c, &(struct etna_inst) {.opcode = INST_OPCODE_BRANCH, .imm = block });
return;
}
struct etna_inst inst = {
.opcode = INST_OPCODE_BRANCH,
.cond = INST_CONDITION_NOT,
.type = INST_TYPE_U32,
.src[0] = condition,
.imm = block,
};
inst.src[0].swiz = INST_SWIZ_BROADCAST(inst.src[0].swiz & 3);
emit_inst(c, &inst);
}
static void
etna_emit_discard(struct etna_compile *c, struct etna_inst_src condition)
{
if (!condition.use) {
emit_inst(c, &(struct etna_inst) { .opcode = INST_OPCODE_TEXKILL });
return;
}
struct etna_inst inst = {
.opcode = INST_OPCODE_TEXKILL,
.cond = INST_CONDITION_NZ,
.type = (c->specs->halti < 2) ? INST_TYPE_F32 : INST_TYPE_U32,
.src[0] = condition,
};
inst.src[0].swiz = INST_SWIZ_BROADCAST(inst.src[0].swiz & 3);
emit_inst(c, &inst);
}
static void
etna_emit_output(struct etna_compile *c, nir_variable *var, struct etna_inst_src src)
{
struct etna_shader_io_file *sf = &c->variant->outfile;
if (is_fs(c)) {
switch (var->data.location) {
case FRAG_RESULT_COLOR:
case FRAG_RESULT_DATA0: /* DATA0 is used by gallium shaders for color */
c->variant->ps_color_out_reg = src.reg;
break;
case FRAG_RESULT_DEPTH:
c->variant->ps_depth_out_reg = src.reg;
break;
default:
unreachable("Unsupported fs output");
}
return;
}
switch (var->data.location) {
case VARYING_SLOT_POS:
c->variant->vs_pos_out_reg = src.reg;
break;
case VARYING_SLOT_PSIZ:
c->variant->vs_pointsize_out_reg = src.reg;
break;
default:
sf->reg[sf->num_reg].reg = src.reg;
sf->reg[sf->num_reg].slot = var->data.location;
sf->reg[sf->num_reg].num_components = glsl_get_components(var->type);
sf->num_reg++;
break;
}
}
#define OPT(nir, pass, ...) ({ \
bool this_progress = false; \
NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__); \
this_progress; \
})
#define OPT_V(nir, pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
static void
etna_optimize_loop(nir_shader *s)
{
bool progress;
do {
progress = false;
OPT_V(s, nir_lower_vars_to_ssa);
progress |= OPT(s, nir_opt_copy_prop_vars);
progress |= OPT(s, nir_copy_prop);
progress |= OPT(s, nir_opt_dce);
progress |= OPT(s, nir_opt_cse);
progress |= OPT(s, nir_opt_peephole_select, 16, true, true);
progress |= OPT(s, nir_opt_intrinsics);
progress |= OPT(s, nir_opt_algebraic);
progress |= OPT(s, nir_opt_constant_folding);
progress |= OPT(s, nir_opt_dead_cf);
if (OPT(s, nir_opt_trivial_continues)) {
progress = true;
/* If nir_opt_trivial_continues makes progress, then we need to clean
* things up if we want any hope of nir_opt_if or nir_opt_loop_unroll
* to make progress.
*/
OPT(s, nir_copy_prop);
OPT(s, nir_opt_dce);
}
progress |= OPT(s, nir_opt_loop_unroll, nir_var_all);
progress |= OPT(s, nir_opt_if, false);
progress |= OPT(s, nir_opt_remove_phis);
progress |= OPT(s, nir_opt_undef);
}
while (progress);
}
static int
etna_glsl_type_size(const struct glsl_type *type, bool bindless)
{
return glsl_count_attribute_slots(type, false);
}
static void
copy_uniform_state_to_shader(struct etna_shader_variant *sobj, uint64_t *consts, unsigned count)
{
struct etna_shader_uniform_info *uinfo = &sobj->uniforms;
uinfo->imm_count = count * 4;
uinfo->imm_data = MALLOC(uinfo->imm_count * sizeof(*uinfo->imm_data));
uinfo->imm_contents = MALLOC(uinfo->imm_count * sizeof(*uinfo->imm_contents));
for (unsigned i = 0; i < uinfo->imm_count; i++) {
uinfo->imm_data[i] = consts[i];
uinfo->imm_contents[i] = consts[i] >> 32;
}
etna_set_shader_uniforms_dirty_flags(sobj);
}
#include "etnaviv_compiler_nir_emit.h"
bool
etna_compile_shader_nir(struct etna_shader_variant *v)
{
if (unlikely(!v))
return false;
struct etna_compile *c = CALLOC_STRUCT(etna_compile);
if (!c)
return false;
c->variant = v;
c->specs = v->shader->specs;
c->nir = nir_shader_clone(NULL, v->shader->nir);
nir_shader *s = c->nir;
const struct etna_specs *specs = c->specs;
v->stage = s->info.stage;
v->num_loops = 0; /* TODO */
v->vs_id_in_reg = -1;
v->vs_pos_out_reg = -1;
v->vs_pointsize_out_reg = -1;
v->ps_color_out_reg = 0; /* 0 for shader that doesn't write fragcolor.. */
v->ps_depth_out_reg = -1;
/* setup input linking */
struct etna_shader_io_file *sf = &v->infile;
if (s->info.stage == MESA_SHADER_VERTEX) {
nir_foreach_variable(var, &s->inputs) {
unsigned idx = var->data.driver_location;
sf->reg[idx].reg = idx;
sf->reg[idx].slot = var->data.location;
sf->reg[idx].num_components = glsl_get_components(var->type);
sf->num_reg = MAX2(sf->num_reg, idx+1);
}
} else {
unsigned count = 0;
nir_foreach_variable(var, &s->inputs) {
unsigned idx = var->data.driver_location;
sf->reg[idx].reg = idx + 1;
sf->reg[idx].slot = var->data.location;
sf->reg[idx].num_components = glsl_get_components(var->type);
sf->num_reg = MAX2(sf->num_reg, idx+1);
count++;
}
assert(sf->num_reg == count);
}
NIR_PASS_V(s, nir_lower_io, ~nir_var_shader_out, etna_glsl_type_size,
(nir_lower_io_options)0);
OPT_V(s, nir_lower_regs_to_ssa);
OPT_V(s, nir_lower_vars_to_ssa);
OPT_V(s, nir_lower_indirect_derefs, nir_var_all);
OPT_V(s, nir_lower_tex, &(struct nir_lower_tex_options) { .lower_txp = ~0u });
OPT_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
etna_optimize_loop(s);
OPT_V(s, etna_lower_io, v);
if (v->shader->specs->vs_need_z_div)
NIR_PASS_V(s, nir_lower_clip_halfz);
/* lower pre-halti2 to float (halti0 has integers, but only scalar..) */
if (c->specs->halti < 2) {
/* use opt_algebraic between int_to_float and boot_to_float because
* int_to_float emits ftrunc, and ftrunc lowering generates bool ops
*/
OPT_V(s, nir_lower_int_to_float);
OPT_V(s, nir_opt_algebraic);
OPT_V(s, nir_lower_bool_to_float);
} else {
OPT_V(s, nir_lower_idiv, nir_lower_idiv_fast);
OPT_V(s, nir_lower_bool_to_int32);
}
etna_optimize_loop(s);
if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
nir_print_shader(s, stdout);
while( OPT(s, nir_opt_vectorize) );
OPT_V(s, nir_lower_alu_to_scalar, etna_alu_to_scalar_filter_cb, specs);
NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp);
NIR_PASS_V(s, nir_opt_algebraic_late);
NIR_PASS_V(s, nir_move_vec_src_uses_to_dest);
NIR_PASS_V(s, nir_copy_prop);
/* only HW supported integer source mod is ineg for iadd instruction (?) */
NIR_PASS_V(s, nir_lower_to_source_mods, ~nir_lower_int_source_mods);
/* need copy prop after uses_to_dest, and before src mods: see
* dEQP-GLES2.functional.shaders.random.all_features.fragment.95
*/
NIR_PASS_V(s, nir_opt_dce);
NIR_PASS_V(s, etna_lower_alu, c);
if (DBG_ENABLED(ETNA_DBG_DUMP_SHADERS))
nir_print_shader(s, stdout);
unsigned block_ptr[nir_shader_get_entrypoint(s)->num_blocks];
c->block_ptr = block_ptr;
unsigned num_consts;
ASSERTED bool ok = emit_shader(c, &v->num_temps, &num_consts);
assert(ok);
/* empty shader, emit NOP */
if (!c->inst_ptr)
emit_inst(c, &(struct etna_inst) { .opcode = INST_OPCODE_NOP });
/* assemble instructions, fixing up labels */
uint32_t *code = MALLOC(c->inst_ptr * 16);
for (unsigned i = 0; i < c->inst_ptr; i++) {
struct etna_inst *inst = &c->code[i];
if (inst->opcode == INST_OPCODE_BRANCH)
inst->imm = block_ptr[inst->imm];
inst->halti5 = specs->halti >= 5;
etna_assemble(&code[i * 4], inst);
}
v->code_size = c->inst_ptr * 4;
v->code = code;
v->needs_icache = c->inst_ptr > specs->max_instructions;
copy_uniform_state_to_shader(v, c->consts, num_consts);
if (s->info.stage == MESA_SHADER_FRAGMENT) {
v->input_count_unk8 = 31; /* XXX what is this */
assert(v->ps_depth_out_reg <= 0);
ralloc_free(c->nir);
FREE(c);
return true;
}
v->input_count_unk8 = DIV_ROUND_UP(v->infile.num_reg + 4, 16); /* XXX what is this */
/* fill in "mystery meat" load balancing value. This value determines how
* work is scheduled between VS and PS
* in the unified shader architecture. More precisely, it is determined from
* the number of VS outputs, as well as chip-specific
* vertex output buffer size, vertex cache size, and the number of shader
* cores.
*
* XXX this is a conservative estimate, the "optimal" value is only known for
* sure at link time because some
* outputs may be unused and thus unmapped. Then again, in the general use
* case with GLSL the vertex and fragment
* shaders are linked already before submitting to Gallium, thus all outputs
* are used.
*
* note: TGSI compiler counts all outputs (including position and pointsize), here
* v->outfile.num_reg only counts varyings, +1 to compensate for the position output
* TODO: might have a problem that we don't count pointsize when it is used
*/
int half_out = v->outfile.num_reg / 2 + 1;
assert(half_out);
uint32_t b = ((20480 / (specs->vertex_output_buffer_size -
2 * half_out * specs->vertex_cache_size)) +
9) /
10;
uint32_t a = (b + 256 / (specs->shader_core_count * half_out)) / 2;
v->vs_load_balancing = VIVS_VS_LOAD_BALANCING_A(MIN2(a, 255)) |
VIVS_VS_LOAD_BALANCING_B(MIN2(b, 255)) |
VIVS_VS_LOAD_BALANCING_C(0x3f) |
VIVS_VS_LOAD_BALANCING_D(0x0f);
ralloc_free(c->nir);
FREE(c);
return true;
}
void
etna_destroy_shader_nir(struct etna_shader_variant *shader)
{
assert(shader);
FREE(shader->code);
FREE(shader->uniforms.imm_data);
FREE(shader->uniforms.imm_contents);
FREE(shader);
}
extern const char *tgsi_swizzle_names[];
void
etna_dump_shader_nir(const struct etna_shader_variant *shader)
{
if (shader->stage == MESA_SHADER_VERTEX)
printf("VERT\n");
else
printf("FRAG\n");
etna_disasm(shader->code, shader->code_size, PRINT_RAW);
printf("num loops: %i\n", shader->num_loops);
printf("num temps: %i\n", shader->num_temps);
printf("immediates:\n");
for (int idx = 0; idx < shader->uniforms.imm_count; ++idx) {
printf(" [%i].%s = %f (0x%08x) (%d)\n",
idx / 4,
tgsi_swizzle_names[idx % 4],
*((float *)&shader->uniforms.imm_data[idx]),
shader->uniforms.imm_data[idx],
shader->uniforms.imm_contents[idx]);
}
printf("inputs:\n");
for (int idx = 0; idx < shader->infile.num_reg; ++idx) {
printf(" [%i] name=%s comps=%i\n", shader->infile.reg[idx].reg,
(shader->stage == MESA_SHADER_VERTEX) ?
gl_vert_attrib_name(shader->infile.reg[idx].slot) :
gl_varying_slot_name(shader->infile.reg[idx].slot),
shader->infile.reg[idx].num_components);
}
printf("outputs:\n");
for (int idx = 0; idx < shader->outfile.num_reg; ++idx) {
printf(" [%i] name=%s comps=%i\n", shader->outfile.reg[idx].reg,
(shader->stage == MESA_SHADER_VERTEX) ?
gl_varying_slot_name(shader->outfile.reg[idx].slot) :
gl_frag_result_name(shader->outfile.reg[idx].slot),
shader->outfile.reg[idx].num_components);
}
printf("special:\n");
if (shader->stage == MESA_SHADER_VERTEX) {
printf(" vs_pos_out_reg=%i\n", shader->vs_pos_out_reg);
printf(" vs_pointsize_out_reg=%i\n", shader->vs_pointsize_out_reg);
printf(" vs_load_balancing=0x%08x\n", shader->vs_load_balancing);
} else {
printf(" ps_color_out_reg=%i\n", shader->ps_color_out_reg);
printf(" ps_depth_out_reg=%i\n", shader->ps_depth_out_reg);
}
printf(" input_count_unk8=0x%08x\n", shader->input_count_unk8);
}
static const struct etna_shader_inout *
etna_shader_vs_lookup(const struct etna_shader_variant *sobj,
const struct etna_shader_inout *in)
{
for (int i = 0; i < sobj->outfile.num_reg; i++)
if (sobj->outfile.reg[i].slot == in->slot)
return &sobj->outfile.reg[i];
return NULL;
}
bool
etna_link_shader_nir(struct etna_shader_link_info *info,
const struct etna_shader_variant *vs,
const struct etna_shader_variant *fs)
{
int comp_ofs = 0;
/* For each fragment input we need to find the associated vertex shader
* output, which can be found by matching on semantic name and index. A
* binary search could be used because the vs outputs are sorted by their
* semantic index and grouped by semantic type by fill_in_vs_outputs.
*/
assert(fs->infile.num_reg < ETNA_NUM_INPUTS);
info->pcoord_varying_comp_ofs = -1;
for (int idx = 0; idx < fs->infile.num_reg; ++idx) {
const struct etna_shader_inout *fsio = &fs->infile.reg[idx];
const struct etna_shader_inout *vsio = etna_shader_vs_lookup(vs, fsio);
struct etna_varying *varying;
bool interpolate_always = true;
assert(fsio->reg > 0 && fsio->reg <= ARRAY_SIZE(info->varyings));
if (fsio->reg > info->num_varyings)
info->num_varyings = fsio->reg;
varying = &info->varyings[fsio->reg - 1];
varying->num_components = fsio->num_components;
if (!interpolate_always) /* colors affected by flat shading */
varying->pa_attributes = 0x200;
else /* texture coord or other bypasses flat shading */
varying->pa_attributes = 0x2f1;
varying->use[0] = VARYING_COMPONENT_USE_UNUSED;
varying->use[1] = VARYING_COMPONENT_USE_UNUSED;
varying->use[2] = VARYING_COMPONENT_USE_UNUSED;
varying->use[3] = VARYING_COMPONENT_USE_UNUSED;
/* point coord is an input to the PS without matching VS output,
* so it gets a varying slot without being assigned a VS register.
*/
if (fsio->slot == VARYING_SLOT_PNTC) {
varying->use[0] = VARYING_COMPONENT_USE_POINTCOORD_X;
varying->use[1] = VARYING_COMPONENT_USE_POINTCOORD_Y;
info->pcoord_varying_comp_ofs = comp_ofs;
} else {
if (vsio == NULL) { /* not found -- link error */
BUG("Semantic value not found in vertex shader outputs\n");
return true;
}
varying->reg = vsio->reg;
}
comp_ofs += varying->num_components;
}
assert(info->num_varyings == fs->infile.num_reg);
return false;
}
| 33.997998 | 106 | 0.61312 | [
"vector"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.