repo_name
stringlengths 5
122
| path
stringlengths 3
232
| text
stringlengths 6
1.05M
|
|---|---|---|
ivan-mogilko/ags-refactoring
|
Engine/main/config.h
|
<reponame>ivan-mogilko/ags-refactoring<gh_stars>1-10
//=============================================================================
//
// Adventure Game Studio (AGS)
//
// Copyright (C) 1999-2011 <NAME> and 2011-20xx others
// The full list of copyright holders can be found in the Copyright.txt
// file, which is part of this source code distribution.
//
// The AGS source code is provided under the Artistic License 2.0.
// A copy of this license can be found in the file License.txt and at
// http://www.opensource.org/licenses/artistic-license-2.0.php
//
//=============================================================================
//
//
//
//=============================================================================
#ifndef __AGS_EE_MAIN__CONFIG_H
#define __AGS_EE_MAIN__CONFIG_H
#include "main/graphics_mode.h"
#include "util/ini_util.h"
using AGS::Common::String;
using AGS::Common::ConfigTree;
// Set up default config settings
void config_defaults();
// Find and default configuration file (usually located in the game installation directory)
String find_default_cfg_file();
// Find all-games user configuration file
String find_user_global_cfg_file();
// Find and game-specific user configuration file (located into writable user directory)
String find_user_cfg_file();
// Apply overriding values from the external config (e.g. for mobile ports)
void override_config_ext(ConfigTree &cfg);
// Setup game using final config tree
void apply_config(const ConfigTree &cfg);
// Fixup game setup parameters
void post_config();
void save_config_file();
WindowSetup parse_window_mode(const String &option, bool as_windowed, WindowSetup def_value = WindowSetup());
FrameScaleDef parse_scaling_option(const String &option, FrameScaleDef def_value = kFrame_Undefined);
String make_window_mode_option(const WindowSetup &ws, const Size &game_res, const Size &desktop_res);
String make_scaling_option(FrameScaleDef scale_def);
uint32_t convert_scaling_to_fp(int scale_factor);
int convert_fp_to_scaling(uint32_t scaling);
bool INIreaditem(const ConfigTree &cfg, const String §n, const String &item, String &value);
int INIreadint(const ConfigTree &cfg, const String §n, const String &item, int def_value = 0);
float INIreadfloat(const ConfigTree &cfg, const String §n, const String &item, float def_value = 0.f);
String INIreadstring(const ConfigTree &cfg, const String §n, const String &item, const String &def_value = "");
void INIwriteint(ConfigTree &cfg, const String §n, const String &item, int value);
void INIwritestring(ConfigTree &cfg, const String §n, const String &item, const String &value);
void INIwriteint(ConfigTree &cfg, const String §n, const String &item, int value);
#endif // __AGS_EE_MAIN__CONFIG_H
|
valerio-vaccaro/GianGiacomoMora
|
firmware/main/main.c
|
#include <freertos/FreeRTOS.h>
#include <freertos/task.h>
#include <string.h>
#include "nvs_flash.h"
#include "esp_err.h"
#include "esp_log.h"
#include "esp_bt.h"
#include "esp_bt_main.h"
#include "esp_gap_ble_api.h"
// array of found devices
#define MAX_DISCOVERED_DEVICES 1000
esp_bd_addr_t discovered_devices[MAX_DISCOVERED_DEVICES];
int discovered_devices_num = 0;
uint8_t test_payload[] = {
//0x02,0x01,0x1A,
0x03,0x03,0x6F,0xFD,
0x17,0x16,0x6F,0xFD,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x80,0xFF,
0x00,0x00
};
// scan parameters
static esp_ble_scan_params_t ble_scan_params = {
.scan_type = BLE_SCAN_TYPE_ACTIVE,
.own_addr_type = BLE_ADDR_TYPE_PUBLIC,
.scan_filter_policy = BLE_SCAN_FILTER_ALLOW_ALL,
.scan_interval = 0x50,
.scan_window = 0x30
};
// advertise parameters
static esp_ble_adv_params_t ble_adv_params = {
.adv_int_min = 0x20,
.adv_int_max = 0x40,
.adv_type = ADV_TYPE_NONCONN_IND,
.own_addr_type = BLE_ADDR_TYPE_PUBLIC,
.channel_map = ADV_CHNL_ALL,
.adv_filter_policy = ADV_FILTER_ALLOW_SCAN_ANY_CON_ANY,
};
// check if the device was already discovered
bool alreadyDiscovered(esp_bd_addr_t address) {
bool found = false;
for(int i = 0; i < discovered_devices_num; i++) {
for(int j = 0; j < ESP_BD_ADDR_LEN; j++)
found = (discovered_devices[i][j] == address[j]);
if(found) break;
}
return found;
}
// add a new device to the list
void addDevice(esp_bd_addr_t address) {
discovered_devices_num++;
if(discovered_devices_num > MAX_DISCOVERED_DEVICES) return;
for(int i = 0; i < ESP_BD_ADDR_LEN; i++)
discovered_devices[discovered_devices_num - 1][i] = address[i];
}
// clean devices list
void cleanDevices() {
discovered_devices_num = 0;
}
void stop_adv_task(void *pvParameter){
vTaskDelay(10*1000 / portTICK_RATE_MS);
ESP_ERROR_CHECK(esp_ble_gap_stop_advertising());
vTaskDelete(NULL);
}
// GAP callback
static void esp_gap_cb(esp_gap_ble_cb_event_t event, esp_ble_gap_cb_param_t *param)
{
switch (event) {
case ESP_GAP_BLE_SCAN_PARAM_SET_COMPLETE_EVT:
printf("ESP_GAP_BLE_SCAN_PARAM_SET_COMPLETE_EVT\n");
if(param->scan_param_cmpl.status == ESP_BT_STATUS_SUCCESS) {
printf("Scan parameters set, start scanning for 10 seconds\n\n");
esp_ble_gap_start_scanning(10);
}
else printf("Unable to set scan parameters, error code %d\n\n", param->scan_param_cmpl.status);
break;
case ESP_GAP_BLE_SCAN_START_COMPLETE_EVT:
printf("ESP_GAP_BLE_SCAN_START_COMPLETE_EVT\n");
if(param->scan_start_cmpl.status == ESP_BT_STATUS_SUCCESS) {
printf("Scan started\n\n");
}
else printf("Unable to start scan process, error code %d\n\n", param->scan_start_cmpl.status);
break;
case ESP_GAP_BLE_SCAN_RESULT_EVT:
//esp_ble_gap_cb_param_t* scan_result = (esp_ble_gap_cb_param_t*)param;
switch(param->scan_rst.search_evt) {
case ESP_GAP_SEARCH_INQ_RES_EVT:
{
if(!alreadyDiscovered(param->scan_rst.bda)) {
//printf("ESP_GAP_BLE_SCAN_RESULT_EVT\n");
if(memcmp(test_payload, param->scan_rst.ble_adv, 8)==0){
printf("addr = ");
for(int i = 0; i < ESP_BD_ADDR_LEN; i++) {
printf("%02X", param->scan_rst.bda[i]);
if(i != ESP_BD_ADDR_LEN -1) printf(":");
}
printf(" payload = ");
for(int i = 0; i < param->scan_rst.adv_data_len + param->scan_rst.scan_rsp_len; i++) {
printf("%02X", param->scan_rst.ble_adv[i]);
if(i != param->scan_rst.adv_data_len + param->scan_rst.scan_rsp_len -1) printf(" ");
}
printf("\n");
}
addDevice(param->scan_rst.bda);
}
break;
}
case ESP_GAP_SEARCH_INQ_CMPL_EVT:
{
printf("Scan complete - advertise\n\n");
ESP_ERROR_CHECK(esp_ble_gap_config_adv_data_raw(test_payload, 28));
xTaskCreate(&stop_adv_task, "stop_adv_task", 2048, NULL, 5, NULL);
break;
}
default:
break;
}
break;
case ESP_GAP_BLE_SCAN_STOP_COMPLETE_EVT:
printf("ESP_GAP_BLE_SCAN_STOP_COMPLETE_EVT\n\n");
esp_err_t err;
if((err = param->scan_stop_cmpl.status) != ESP_BT_STATUS_SUCCESS) {
printf("Scan stop failed: %s", esp_err_to_name(err));
}
else {
printf("Stop scan successfully");
}
break;
case ESP_GAP_BLE_ADV_DATA_RAW_SET_COMPLETE_EVT:
printf("ESP_GAP_BLE_ADV_DATA_RAW_SET_COMPLETE_EVT\n");
esp_ble_gap_start_advertising(&ble_adv_params);
break;
case ESP_GAP_BLE_ADV_START_COMPLETE_EVT:
printf("ESP_GAP_BLE_ADV_START_COMPLETE_EVT\n");
if(param->adv_start_cmpl.status == ESP_BT_STATUS_SUCCESS) {
printf("Advertising started\n\n");
}
else printf("Unable to start advertising process, error code %d\n\n", param->scan_start_cmpl.status);
break;
case ESP_GAP_BLE_ADV_STOP_COMPLETE_EVT:
printf("ESP_GAP_BLE_ADV_STOP_COMPLETE_EVT\n");
printf("advertise complete - scan\n\n");
ESP_ERROR_CHECK(esp_bt_controller_disable());
ESP_ERROR_CHECK(esp_bt_controller_enable(ESP_BT_MODE_BLE));
ESP_ERROR_CHECK(esp_ble_gap_set_scan_params(&ble_scan_params));
cleanDevices();
ESP_ERROR_CHECK(esp_ble_gap_start_scanning(10));
break;
default:
printf("Event %d unhandled\n\n", event);
break;
}
}
void app_main() {
printf("BT scan and broadcast\n\n");
// set components to log only errors
esp_log_level_set("*", ESP_LOG_ERROR);
// initialize nvs
ESP_ERROR_CHECK(nvs_flash_init());
printf("- NVS init ok\n");
// release memory reserved for classic BT (not used)
ESP_ERROR_CHECK(esp_bt_controller_mem_release(ESP_BT_MODE_CLASSIC_BT));
printf("- Memory for classic BT released\n");
// initialize the BT controller with the default config
esp_bt_controller_config_t bt_cfg = BT_CONTROLLER_INIT_CONFIG_DEFAULT();
esp_bt_controller_init(&bt_cfg);
printf("- BT controller init ok\n");
// enable the BT controller in BLE mode
esp_bt_controller_enable(ESP_BT_MODE_BLE);
printf("- BT controller enabled in BLE mode\n");
// initialize Bluedroid library
esp_bluedroid_init();
esp_bluedroid_enable();
printf("- Bluedroid initialized and enabled\n");
// register GAP callback function
ESP_ERROR_CHECK(esp_ble_gap_register_callback(esp_gap_cb));
printf("- GAP callback registered\n\n");
// configure scan parameters
ESP_ERROR_CHECK(esp_ble_gap_set_scan_params(&ble_scan_params));
}
|
mindspore-ai/mindspore
|
mindspore/ccsrc/minddata/dataset/engine/datasetops/parallel_op.h
|
<filename>mindspore/ccsrc/minddata/dataset/engine/datasetops/parallel_op.h<gh_stars>1000+
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_PARALLEL_OP_H_
#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_PARALLEL_OP_H_
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "minddata/dataset/include/dataset/constants.h"
#include "minddata/dataset/engine/datasetops/dataset_op.h"
#include "minddata/dataset/engine/execution_tree.h"
#include "minddata/dataset/engine/datasetops/source/io_block.h"
#include "minddata/dataset/util/status.h"
namespace mindspore {
namespace dataset {
class ExecutionTree;
// A ParallelOp provides a multi-threaded DatasetOp
template <typename T, typename S>
class ParallelOp : public DatasetOp {
public:
/// Constructor
/// \param num_workers
/// \param op_connector_size - size of the output connector for this operator
/// \param sampler - The sampler for the op
ParallelOp(int32_t num_workers, int32_t op_connector_size, std::shared_ptr<SamplerRT> sampler = nullptr)
: DatasetOp(op_connector_size, sampler),
num_workers_(num_workers),
worker_connector_size_(op_connector_size),
num_workers_paused_(0),
epoch_sync_flag_(false) {
// reduce excessive memory usage with high parallelism
// when num_workers > 4, reduce op_connector_size to have similar total size if there were only 4 workers
constexpr int32_t worker_limit = 4;
if (num_workers_ > worker_limit) {
oc_queue_size_ = std::max(1, op_connector_size * worker_limit / num_workers_);
worker_connector_size_ = std::max(1, op_connector_size * worker_limit / num_workers_);
}
}
// Destructor
~ParallelOp() = default;
/// A print method typically used for debugging
/// \param out - The output stream to write output to
/// \param show_all - A bool to control if you want to show all info or just a summary
void Print(std::ostream &out, bool show_all) const override {
DatasetOp::Print(out, show_all);
out << " [workers: " << num_workers_ << "]";
}
std::string Name() const override { return kParallelOp; }
// << Stream output operator overload
// @notes This allows you to write the debug print info using stream operators
// @param out - reference to the output stream being overloaded
// @param pO - reference to the ParallelOp to display
// @return - the output stream must be returned
friend std::ostream &operator<<(std::ostream &out, const ParallelOp &po) {
po.Print(out, false);
return out;
}
int32_t NumWorkers() const override { return num_workers_; }
protected:
/// Interface for derived classes to implement. All derived classes must provide the entry
/// function with the main execution loop for worker threads.
/// \return Status The status code returned
virtual Status WorkerEntry(int32_t workerId) = 0;
/// Called first when function is called
/// \return Status The status code returned
virtual Status RegisterAndLaunchThreads() {
RETURN_UNEXPECTED_IF_NULL(tree_);
worker_in_queues_.Init(num_workers_, worker_connector_size_);
worker_out_queues_.Init(num_workers_, worker_connector_size_);
// Registers QueueList and individual Queues for interrupt services
RETURN_IF_NOT_OK(worker_in_queues_.Register(tree_->AllTasks()));
RETURN_IF_NOT_OK(worker_out_queues_.Register(tree_->AllTasks()));
RETURN_IF_NOT_OK(wait_for_workers_post_.Register(tree_->AllTasks()));
RETURN_IF_NOT_OK(tree_->LaunchWorkers(
num_workers_, std::bind(&ParallelOp::WorkerEntry, this, std::placeholders::_1), Name() + "::WorkerEntry", id()));
RETURN_IF_NOT_OK(tree_->LaunchWorkers(1, std::bind(&ParallelOp::Collector, this), Name() + "::Collector", id()));
return Status::OK();
}
virtual Status Collector() {
TaskManager::FindMe()->Post();
uint64_t ctr = 0;
TensorRow row;
do {
RETURN_IF_NOT_OK(worker_out_queues_[ctr++ % num_workers_]->PopFront(&row));
if (row.eoe() || row.eof() || !row.skip()) {
RETURN_IF_NOT_OK(out_connector_->Add(std::move(row)));
}
} while (!row.eof());
return Status::OK();
}
// Wait post used to perform the pausing logic
WaitPost wait_for_workers_post_;
// Count number of workers that have signaled master
std::atomic_int num_workers_paused_;
/// Whether or not to sync worker threads at the end of each epoch
bool epoch_sync_flag_;
/// The number of worker threads
int32_t num_workers_;
/// The size of input/output worker queeus
int32_t worker_connector_size_;
/// queues to hold the input rows to workers
QueueList<T> worker_in_queues_;
/// queues to hold the output from workers
QueueList<S> worker_out_queues_;
};
} // namespace dataset
} // namespace mindspore
#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_PARALLEL_OP_H_
|
mindspore-ai/mindspore
|
mindspore/lite/src/runtime/runtime_allocator.h
|
<reponame>mindspore-ai/mindspore
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_OPTIMIZE_ALLOCATOR_H_
#define MINDSPORE_LITE_SRC_RUNTIME_OPTIMIZE_ALLOCATOR_H_
#include <memory>
#include <map>
#include <unordered_map>
#include "include/api/allocator.h"
#include "include/errorcode.h"
#include "src/tensor.h"
namespace mindspore {
class RuntimeAllocator : public Allocator {
public:
explicit RuntimeAllocator(size_t aligned_size = 32);
~RuntimeAllocator() override;
public:
void *Malloc(size_t size) override { return nullptr; }
void Free(void *ptr) override { return; }
int RefCount(void *ptr) override { return (ptr == nullptr) ? RUNTIME_REFCOUNT : 0; }
int SetRefCount(void *ptr, int ref_count) override { return 0; }
int IncRefCount(void *ptr, int ref_count) override { return 0; }
int DecRefCount(void *ptr, int ref_count) override { return 0; }
public:
void SetDataOffset(lite::Tensor *tensor, size_t offset);
void MallocTensorData(lite::Tensor *tensor);
void FreeTensorData(lite::Tensor *tensor);
void *MallocOptData();
const std::unordered_map<lite::Tensor *, size_t> &GetOffsetMap() const { return offset_map_; }
private:
size_t FindMinFree(size_t size);
private:
void *data_ = nullptr;
size_t total_size_ = 0;
std::unordered_map<lite::Tensor *, size_t> offset_map_;
std::map<size_t, size_t> free_list_; /* offset, size */
std::map<size_t, size_t> used_list_; /* offset, size */
};
using RuntimeAllocatorPtr = std::shared_ptr<RuntimeAllocator>;
} // namespace mindspore
#endif // MINDSPORE_LITE_SRC_RUNTIME_OPTIMIZE_ALLOCATOR_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/ps/worker.h
|
<reponame>mindspore-ai/mindspore
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PS_WORKER_H_
#define MINDSPORE_CCSRC_PS_WORKER_H_
#include <utility>
#include <memory>
#include <vector>
#include <string>
#include <numeric>
#include <functional>
#include <algorithm>
#include <map>
#include <mutex>
#include <unordered_set>
#include <unordered_map>
#include "utils/log_adapter.h"
#include "ir/tensor.h"
#include "ps/util.h"
#include "ps/constants.h"
#include "utils/shape_utils.h"
#include "ps/ps_cache/ps_data/ps_data_prefetch.h"
#include "ps/core/worker_node.h"
#include "ps/embedding_table_shard_metadata.h"
#include "proto/comm.pb.h"
#include "proto/ps.pb.h"
#include "ps/ps_context.h"
namespace mindspore {
namespace ps {
class Worker {
public:
static Worker &GetInstance() {
static Worker instance;
return instance;
}
using Callback = std::function<void()>;
using PartitionEmbeddingMessages = std::vector<std::pair<bool, EmbeddingTableLookup>>;
using PartitionKVMessages = std::vector<std::pair<bool, KVMessage>>;
using EmbeddingPartitioner = std::function<void(
const EmbeddingTableLookup &send, PartitionEmbeddingMessages *partition, const std::map<int64_t, int64_t> &attrs)>;
using KVPartitioner =
std::function<void(const KVMessage &send, PartitionKVMessages *partition, const std::map<int64_t, int64_t> &attrs)>;
void Run();
void Push(const std::vector<size_t> &keys, std::vector<uintptr_t> addrs, const ShapeVector &sizes);
void Pull(const size_t key, void *dev_addr, const size_t size);
size_t SetParamKey(const std::string ¶m_name);
size_t GetParamKey(const std::string ¶m_name);
void SetParamInitInServer(const std::string ¶m_name, bool init_in_server);
bool GetParamInitInServer(const std::string ¶m_name);
void SetKeyOptimId(size_t key, const std::string &optimizer_name);
void SetOptimInputShapes(size_t key, const ShapeVector &shape);
void AddEmbeddingTable(const Key &key, const size_t &row_count);
bool InitPSEmbeddingTable(const size_t &key, const std::vector<size_t> &input_shape,
const std::vector<size_t> &indices_shape, const std::vector<size_t> &output_shape,
const ParamInitInfoMessage &info);
void InitPSParamAndOptim(const AnfNodePtr &input_node, const tensor::TensorPtr &tensor);
bool DoPSEmbeddingLookup(const Key &key, const std::vector<int> &lookup_ids, std::vector<float> *lookup_result,
int64_t cmd);
bool UpdateEmbeddingTable(const std::vector<Key> &keys, const std::vector<int> &lookup_ids,
const std::vector<float> &vals);
bool running() { return running_; }
void Finalize();
private:
Worker() : server_num_(-1), running_(false), key_cnt_(0) {}
~Worker() = default;
Worker(const Worker &) = delete;
Worker &operator=(const Worker &) = delete;
void Initialize();
bool IsKeyInit(const size_t key);
void AddKeyToServerId(const Key &key);
void AddKeyByHashMod(const Key &key);
void InitPSOptimId(const size_t param_key);
void InitPSOptimInputShapes(const size_t key);
void InitPSParamData(const std::vector<size_t> &keys, void *const origin_addr, size_t size);
bool IsReadyForPush(const Key &key);
bool IsReadyForPull(const Key &key);
void PrepareSparseGradient(const size_t begin, const size_t end, const std::unordered_set<int> &distinct_ids,
const std::vector<std::pair<int, float *>> &indice_to_grads, const int *all_indice,
const size_t segment_size, float *gradient, int *indices);
void BuildSparseValue(const std::vector<int> &lengths, const size_t grad_index, const size_t indice_index,
const float *original_data, const float *grads, int *indices, std::vector<float> *reduced_data);
void PushData(const std::vector<Key> &keys, const std::vector<float> &vals, const std::vector<int> &lens = {},
int command = 0, int64_t priority = 0);
void PushSparseData(const std::vector<Key> &keys, const std::vector<float> &vals, const std::vector<int> &lens,
size_t grad_index, size_t indice_index, size_t first_dim_size, size_t outer_dim_size);
void PullData(const std::vector<Key> &keys, std::vector<float> *const vals, std::vector<int> *lens = nullptr,
int cmd = 0, int64_t priority = 0);
void LookupIdPartitioner(const EmbeddingTableLookup &send, PartitionEmbeddingMessages *partition,
const std::map<int64_t, int64_t> &attrs);
void SparsePartitioner(const KVMessage &send, PartitionKVMessages *partition,
const std::map<int64_t, int64_t> &attrs);
void RoundRobinPartitioner(const KVMessage &send, PartitionKVMessages *partition,
const std::map<int64_t, int64_t> &attrs);
void WorkerInitEmbeddingPartitioner(const KVMessage &send, std::vector<std::pair<bool, KVMessage>> *partition,
const std::map<int64_t, int64_t> &attrs);
void UpdateEmbeddingPartitioner(const KVMessage &send, PartitionKVMessages *partition,
const std::map<int64_t, int64_t> &attrs);
void BroadcastPartitioner(const KVMessage &send, PartitionKVMessages *partition,
const std::map<int64_t, int64_t> &attrs);
void SendForPush(int cmd, const KVMessage &send, const KVPartitioner &partitioner,
const std::map<int64_t, int64_t> &attrs);
void SendForPull(int cmd, const KVMessage &send, const KVPartitioner &partitioner,
const std::map<int64_t, int64_t> &attrs, std::vector<float> *vals, std::vector<int> *lens);
int64_t server_num_;
bool running_;
std::mutex running_mutex_;
size_t key_cnt_;
std::map<std::string, size_t> param_to_key_;
std::map<size_t, bool> init_keys_;
std::map<size_t, int64_t> key_to_optimId_;
std::map<size_t, std::vector<ShapeVector>> key_to_optim_shapes_;
std::map<std::string, bool> param_to_init_in_server_;
core::WorkerNode worker_node_;
EmbeddingPartitioner lookup_partitioner_;
KVPartitioner sparse_partitioner_;
KVPartitioner round_robin_partitioner_;
KVPartitioner worker_init_embedding_partitioner_;
KVPartitioner update_embedding_partitioner_;
KVPartitioner broadcast_partitioner_;
std::unordered_map<Key, int64_t> key_to_server_id_;
std::unordered_map<Key, size_t> embedding_row_cnt_;
std::unordered_map<Key, std::shared_ptr<std::vector<EmbeddingTableShardMetadata>>> embedding_table_ranges_;
};
} // namespace ps
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PS_WORKER_H_
|
mindspore-ai/mindspore
|
mindspore/lite/include/ms_tensor.h
|
<gh_stars>1000+
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_
#define MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_
#include <vector>
#include "include/lite_utils.h"
#include "ir/dtype/type_id.h"
namespace mindspore {
enum Format : int64_t;
namespace tensor {
/// \brief MSTensor defined tensor in MindSpore Lite.
class MS_API MSTensor {
public:
/// \brief Constructor of MindSpore Lite MSTensor.
///
/// \return Instance of MindSpore Lite MSTensor.
MSTensor() = default;
/// \brief Destructor of MindSpore Lite Model.
virtual ~MSTensor() = default;
/// \brief Create a MSTensor.
///
/// \return Pointer to an instance of MindSpore Lite MSTensor.
static MSTensor *CreateTensor(const String &name, TypeId type, const Vector<int> &shape, const void *data,
size_t data_len);
/// \brief Set memory allocator for current MSTensor.
///
/// \param[in] allocator Define memory allocator, which is shown in allocator.h.
virtual void set_allocator(AllocatorPtr allocator) = 0;
/// \brief Get memory allocator of current MSTensor.
///
/// \return Pointer of memory allocator class.
virtual AllocatorPtr allocator() const = 0;
/// \brief Get data type of the MindSpore Lite MSTensor.
///
/// \note TypeId is defined in mindspore/mindspore/include/api/type_id.h. Only number types in TypeId enum are
/// suitable for MSTensor.
///
/// \return MindSpore Lite TypeId of the MindSpore Lite MSTensor.
virtual TypeId data_type() const = 0;
/// \brief Set data type of current MSTensor.
///
/// \param[in] data_type Define data type, which is shown in type_id.h.
virtual void set_data_type(TypeId data_type) = 0;
/// \brief Set format of current MSTensor.
///
/// \param[in] format Define format of data, which is shown in format.h
virtual void set_format(mindspore::Format format) = 0;
/// \brief Get format of current MSTensor.
///
/// \return format, which is shown in format.h
virtual mindspore::Format format() const = 0;
/// \brief Get shape of the MindSpore Lite MSTensor.
///
/// \return A vector of int as the shape of the MindSpore Lite MSTensor.
virtual Vector<int> shape() const = 0;
/// \brief Set the shape of MSTensor.
virtual void set_shape(const Vector<int> &shape) = 0;
/// \brief Get number of element in MSTensor.
///
/// \return Number of element in MSTensor.
virtual int ElementsNum() const = 0;
/// \brief Get byte size of data in MSTensor.
///
/// \return Byte size of data in MSTensor.
virtual size_t Size() const = 0;
/// \brief Get the name of MSTensor.
///
/// \return the name of MSTensor.
virtual String tensor_name() const = 0;
/// \brief Set the name of MSTensor.
virtual void set_tensor_name(const String &name) = 0;
/// \brief Get the pointer of data in MSTensor.
///
/// \note The data pointer can be used to both write and read data in MSTensor. The memory buffer will be
/// automatically allocated.
///
/// \return the pointer points to data in MSTensor.
virtual void *MutableData() = 0;
/// \brief Get the pointer of data in MSTensor.
///
/// \note The data pointer can be used to both write and read data in MSTensor. No memory buffer will be
/// allocated.
///
/// \return the pointer points to data in MSTensor.
virtual void *data() = 0;
/// \brief Set the data of MSTensor.
virtual void set_data(void *data) = 0;
virtual Vector<lite::LiteQuantParam> quant_params() const = 0;
virtual void set_quant_params(Vector<lite::LiteQuantParam>) = 0;
/// \brief Get whether the MSTensor data is const data
///
/// \return Const flag of MSTensor
virtual bool IsConst() const = 0;
};
} // namespace tensor
} // namespace mindspore
#endif // MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/frontend/parallel/graph_util/pipeline_split_utils.h
|
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_GRAPH_UTIL_PIPELINE_SPLIT_UTILS_H_
#define MINDSPORE_CCSRC_FRONTEND_PARALLEL_GRAPH_UTIL_PIPELINE_SPLIT_UTILS_H_
#include <utility>
#include <vector>
#include <string>
#include "ir/anf.h"
#include "ir/manager.h"
namespace mindspore {
namespace parallel {
using PipelinePair = std::pair<std::vector<AnfNodePtr>, std::vector<AnfNodePtr>>;
AnfNodePtr FindAccuGrad(const CNodePtr &cnode);
bool IsLastStage();
void InsertVirtualAssignAdd(const std::pair<AnfNodePtr, int> &node_user, const FuncGraphManagerPtr &manager,
const AnfNodePtr &accu_parameter);
void InsertVirtualAccuGrad(const AnfNodePtr &recv, const FuncGraphManagerPtr &manager, const AnfNodePtr ¶m);
AnfNodePtr FindGradAccuParameter(const std::vector<AnfNodePtr> ¶meters, const std::string &name);
void HandleReceiveParam(const FuncGraphPtr &root, const std::vector<AnfNodePtr> &all_nodes);
void AddVirtualAssignAdd(const FuncGraphPtr &root);
bool CompFunc(const AnfNodePtr &node1, const AnfNodePtr &node2);
void ReorderForForward(const std::vector<AnfNodePtr> &forward_start, const std::vector<AnfNodePtr> &forward_end,
const FuncGraphPtr &root);
void ReorderForBackward(const PipelinePair &forward_start_pair, const PipelinePair &forward_end_pair,
const PipelinePair &backward_start_pair, const PipelinePair &backward_end_pair,
const PipelinePair &forward_end_before_pair, const FuncGraphPtr &root);
void ReorderForParams(const std::vector<AnfNodePtr> &backward_params, const std::vector<AnfNodePtr> &forward_params,
const std::vector<AnfNodePtr> &allreduce_params, const PipelinePair &forward_params_pair,
const PipelinePair &backward_params_pair, const std::vector<AnfNodePtr> &backward_end,
const PipelinePair &forward_start_pair, const FuncGraphPtr &root);
int64_t GetMicroBatch(const AnfNodePtr &node);
void InsertDepend(const AnfNodePtr &prior_node, const AnfNodePtr &post_node, const FuncGraphManagerPtr &manager,
const FuncGraphPtr &root);
PipelinePair Deduplicate(const std::vector<AnfNodePtr> &node_vector, const FuncGraphPtr &root, int64_t micro_max);
AnfNodePtr GetActualOp(const AnfNodePtr &node);
void GetBorderNode(std::vector<AnfNodePtr> *forward_start, std::vector<AnfNodePtr> *forward_end,
std::vector<AnfNodePtr> *backward_start, std::vector<AnfNodePtr> *backward_end,
std::vector<AnfNodePtr> *forward_params, std::vector<AnfNodePtr> *backward_params,
std::vector<AnfNodePtr> *allreduce_params, const FuncGraphPtr &root);
void Reorder(const FuncGraphPtr &root);
void ReorderForPredict(const FuncGraphPtr &root, const FuncGraphManagerPtr &manager);
void HandleMicroBatch(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphManagerPtr &manager);
void BroadCastMicroBatch(const CNodePtr &node, NodeUsersMap *node_users_map, const ValuePtr &value, size_t max_depth);
void LabelNeedGrad(const FuncGraphManagerPtr &manager, const FuncGraphPtr &root);
void BroadCastNeedGrad(const AnfNodePtr &node, NodeUsersMap *node_user_map, const FuncGraphPtr &root);
AnfNodePtr GetPreNode(const AnfNodePtr &node);
void LastStageEndNode(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphManagerPtr &manager,
const FuncGraphPtr &root);
void SetStridedSliceStrategy(const AnfNodePtr &node);
void ParameterStartNode(const std::vector<AnfNodePtr> &all_nodes, const FuncGraphManagerPtr &manager);
ValuePtr Micro(const CNodePtr &cnode, NodeUsersMap *node_users_map, size_t max_depth);
void CheckBorderNode(const PipelinePair &forward_start_pair, const PipelinePair &forward_end_pair,
const PipelinePair &backward_start_pair, const PipelinePair &backward_end_pair, size_t micro_size);
} // namespace parallel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_GRAPH_UTIL_PIPELINE_SPLIT_UTILS_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/optimizer/graph_kernel/graph_kernel_helper.h
|
<reponame>mindspore-ai/mindspore
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_GRAPH_KERNEL_HELPER_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_GRAPH_KERNEL_HELPER_H_
#include <map>
#include <memory>
#include <set>
#include <string>
#include <tuple>
#include <unordered_set>
#include <utility>
#include <vector>
#include "ir/anf.h"
#include "ir/func_graph.h"
#include "ir/primitive.h"
#include "backend/session/anf_runtime_algorithm.h"
#include "backend/session/kernel_graph.h"
#include "backend/kernel_compiler/akg/akg_kernel_json_generator.h"
#include <nlohmann/json.hpp>
#include "backend/optimizer/graph_kernel/model/lite_graph.h"
namespace mindspore::graphkernel {
using kernel::DumpOption;
constexpr auto kIsFeatureMapOutput = "IsFeatureMapOutput";
constexpr auto kIsFeatureMapInputList = "IsFeatureMapInputList";
constexpr auto kGraphKernelModule = "mindspore._extends.graph_kernel";
constexpr auto kGraphKernelEstimateOps = "estimate_ops";
constexpr auto kGraphKernelGetNodeCalAmount = "estimate_calulation_amount";
constexpr auto kGraphKernelSplitFunc = "split_with_json";
constexpr auto kGetGraphKernelOpExpander = "get_op_expander";
constexpr auto kJsonKeyMultiGraph = "multi_graph";
constexpr auto kJsonKeyGraphDesc = "graph_desc";
constexpr auto kJsonKeyGraphMode = "graph_mode";
constexpr auto kAllTarget = "ALL";
constexpr auto kGraphKernelDumpPath = "graph_kernel_dump";
struct DataInfo {
std::string format{kOpFormat_DEFAULT};
ShapeVector shape{1};
TypePtr type{nullptr};
};
bool ConvertNonscalarTensorToParameter(const FuncGraphPtr &fg, AnfNodePtrList *inputs_ptr);
std::tuple<FuncGraphPtr, AnfNodePtrList, AnfNodePtrList> MixedNodesTransToGraph(const AnfNodePtrList &fuse_nodes,
AnfNodePtrList *src_outputs = nullptr);
void SetNewKernelInfo(const AnfNodePtr &new_node, const FuncGraphPtr &fg, const AnfNodePtrList &inputs,
const AnfNodePtrList &outputs);
kernel::KernelBuildInfoPtr BuildSelectKernelBuildInfo(const std::vector<std::string> &inputs_format,
const std::vector<TypeId> &inputs_type,
const std::vector<std::string> &output_formats,
const std::vector<TypeId> &output_types, const AnfNodePtr &node);
kernel::KernelBuildInfoPtr BuildSelectKernelBuildInfo(const std::vector<std::string> &inputs_format,
const std::vector<TypeId> &inputs_type,
const std::vector<std::string> &output_formats,
const std::vector<TypeId> &output_types);
AnfNodePtr CreateNewFuseCNode(const FuncGraphPtr &kernel_graph, const FuncGraphPtr &fg, const AnfNodePtrList &inputs,
const AnfNodePtrList &outputs);
void ReplaceNewFuseCNode(const FuncGraphPtr &kernel_graph, const AnfNodePtr &new_fuse_cnode,
const AnfNodePtrList &outputs);
std::tuple<AnfNodePtr, AnfNodePtrList> FuseNodesToSubGraph(const std::vector<AnfNodePtr> &fuse_nodes,
const FuncGraphPtr &kernel_graph,
const std::string &postfix = "");
bool AnfToJsonDesc(const AnfNodePtrList &nodes, const DumpOption &dump_option, nlohmann::json *op_desc);
bool AnfToJsonDesc(const AnfNodePtrList &nodes, const DumpOption &dump_option, nlohmann::json *op_desc,
std::map<std::string, AnfNodePtr> *address_node_map);
bool AnfToJsonDesc(const std::vector<AnfNodePtrList> &graphs, const DumpOption &dump_option, nlohmann::json *op_desc);
FuncGraphPtr JsonDescToAnf(const std::string &json_desc);
std::string ExtractGraphKernelName(const AnfNodePtrList &cnodes, const string &prefix = "", const string &postfix = "");
void ResetKernelInfo(const AnfNodePtr &node, KernelType kernel_type = KernelType::UNKNOWN_KERNEL_TYPE);
std::string GetFormat(const AnfNodePtr &node);
TypePtr GetType(const AnfNodePtr &node);
ShapeVector GetShape(const AnfNodePtr &node);
ShapeVector GetDeviceShape(const AnfNodePtr &node);
std::vector<int64_t> GetReduceAxis(const AnfNodePtr &node);
CNodePtr CreateCNode(const std::vector<AnfNodePtr> &inputs, const FuncGraphPtr &func_graph, const DataInfo &out_info,
bool use_fake_abstract = false);
void SetNodeAttrSafely(const std::string &key, const ValuePtr &value, const AnfNodePtr &node);
bool IsKeepBasicNode(const AnfNodePtr &node);
void OpListFilter(std::vector<PrimitivePtr> *ops, const std::vector<std::string> &enable_ops_only,
const std::vector<std::string> &enable_ops, const std::vector<std::string> &disable_ops);
template <typename T>
ValueNodePtr CreateScalarTensorValueNode(const DataInfo &info, T value, size_t data_length) {
// Create tensor value.
if (info.shape.size() != 1 && info.shape[0] != 1) {
MS_LOG(EXCEPTION) << "Only support create scalar tensor value node!!!";
}
if (info.type == nullptr) {
MS_LOG(EXCEPTION) << "Data type is needed!!!";
}
tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(info.type->type_id(), info.shape);
MS_EXCEPTION_IF_NULL(tensor);
tensor::DeviceInfo device_info{info.format, info.type};
tensor->set_device_info(device_info);
auto data_ptr = tensor->data_c();
MS_EXCEPTION_IF_NULL(data_ptr);
auto ret_code = memcpy_s(data_ptr, static_cast<size_t>(tensor->data().nbytes()), &value, data_length);
if (ret_code != 0) {
MS_LOG(EXCEPTION) << "Failed to copy data into scalar tensor.";
}
// Create value node.
ValueNodePtr new_value_node = std::make_shared<ValueNode>(tensor);
new_value_node->set_abstract(tensor->ToAbstract());
auto kernel_info = std::make_shared<device::KernelInfo>();
new_value_node->set_kernel_info(kernel_info);
auto kernel_build_info_builder = std::make_shared<kernel::KernelBuildInfo::KernelBuildInfoBuilder>();
kernel_build_info_builder->SetOutputsFormat(std::vector<std::string>{info.format});
std::vector<TypeId> types = {info.type->type_id()};
kernel_build_info_builder->SetOutputsDeviceType(types);
AnfAlgo::SetSelectKernelBuildInfo(kernel_build_info_builder->Build(), new_value_node.get());
return new_value_node;
}
AbstractBasePtr GetOutputAbstract(const AnfNodePtr &node, size_t output_idx);
// functions to graphkernel model
inner::LiteGraphPtr AnfGraph2LiteGraph(const FuncGraphPtr &func_graph);
FuncGraphPtr LiteGraph2AnfGraph(const inner::LiteGraphPtr &lite_graph, AnfNodePtrList *outputs = nullptr);
// remove parameter which is not used
void EliminateRedundantParameters(const FuncGraphPtr &func_graph, AnfNodePtrList *inputs);
std::vector<PrimitivePtr> GetValidOps(
const std::vector<std::tuple<std::string, unsigned int, PrimitivePtr>> &ops_with_level, unsigned int level);
// return a func_graph's manager
FuncGraphManagerPtr GetFuncGraphManager(const FuncGraphPtr &func_graph);
void UpdateMng(FuncGraphManagerPtr mng, const FuncGraphPtr &func_graph);
} // namespace mindspore::graphkernel
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_GRAPH_KERNEL_HELPER_H_
|
mindspore-ai/mindspore
|
mindspore/lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.h
|
<reponame>mindspore-ai/mindspore
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_TFLITE_LSTM_CELL_FUSION_H_
#define MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_TFLITE_LSTM_CELL_FUSION_H_
#include <vector>
#include <memory>
#include <string>
#include "backend/optimizer/common/optimizer.h"
#include "utils/utils.h"
#include "include/errorcode.h"
namespace mindspore {
namespace opt {
class TfliteLstmCellFusion : public PatternProcessPass {
public:
explicit TfliteLstmCellFusion(const std::string &name = "TfliteLstmCellFusion", bool multigraph = true,
int input_length = 0, int var_num = 0, int cond_nodes_num = 0, int cond_cnodes_num = 0,
int body_nodes_num = 0, int body_cnodes_num = 0);
~TfliteLstmCellFusion() override = default;
static EquivPtr MatchGraph(const FuncGraphPtr &func_graph, const PrimitiveVarMapPtr &primitive_vars,
const AnfNodePtr &pattern);
static EquivPtr CheckSubGraph(const FuncGraphPtr &func_graph, const AnfNodePtr &pattern,
const PrimitiveVarMapPtr &primitive_vars, const AnfNodePtr &anf_sub_graph,
size_t cnode_num, size_t all_node_num);
static lite::STATUS SetAbstractTuple(const CNodePtr &cnode, int output_num);
static CNodePtr CreateOutputGetItem(const FuncGraphPtr &func_graph, const CNodePtr &node, int item_index);
protected:
bool Init() const;
const BaseRef DefinePattern() const override;
const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override;
static lite::STATUS GetFloatScalarFromTensorInfo(const AnfNodePtr &tensor_info, float *v);
static CNodePtr CreateSqueezeNode(const FuncGraphPtr &func_graph, const CNodePtr &input_node,
const std::vector<int> &axis);
static lite::STATUS AdjustOtherGetItems(const FuncGraphPtr &func_graph, const CNodePtr &while_cnode,
const CNodePtr &lstm_cnode, const CNodePtr &output_get_item);
static AnfNodePtr GetCondGraphPattern(const PrimitiveVarMapPtr &primitive_vars);
virtual AnfNodePtr GetBodyGraphPattern(const PrimitiveVarMapPtr &primitive_vars) const;
virtual CNodePtr CreateLSTMNode(const FuncGraphPtr &func_graph, const EquivPtr &equiv, const EquivPtr &body_equiv,
const std::string &base_name, float zoneout_cell, float zoneout_hidden) const;
private:
CNodePtr GetWhileCnode(const AnfNodePtr &cnode) const;
bool CheckBodyGraph(const FuncGraphPtr &func_graph, const EquivPtr &equiv, const CNodePtr &while_cnode,
float *zoneout_cell, float *zoneout_hidden) const;
static bool CheckReferencedOutputs(const FuncGraphPtr &func_graph, const CNodePtr &while_cnode);
static lite::STATUS GetConcatedParam(const std::vector<AnfNodePtr> ¶ms, const ParameterPtr &new_param,
bool is_bias);
protected:
mutable VarPtr cell_zoneout_old_ = nullptr;
mutable VarPtr cell_zoneout_new_ = nullptr;
mutable VarPtr hidden_zoneout_old_ = nullptr;
mutable VarPtr hidden_zoneout_new_ = nullptr;
mutable std::vector<VarPtr> while_input_vars_;
private:
size_t while_input_var_num_ = 0;
size_t while_inputs_num_ = 0;
size_t cond_nodes_num_ = 0;
size_t cond_cnodes_num_ = 0;
size_t body_nodes_num_ = 0;
size_t body_cnodes_num_ = 0;
};
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_TFLITE_LSTM_CELL_FUSION_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/optimizer/somas/somas_solver_pre.h
|
<filename>mindspore/ccsrc/backend/optimizer/somas/somas_solver_pre.h
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_SOMAS_SOMAS_SOLVER_PRE_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_SOMAS_SOMAS_SOLVER_PRE_H_
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstring>
#include <iostream>
#include <map>
#include <memory>
#include <stack>
#include <unordered_map>
#include <vector>
#include "backend/session/kernel_graph.h"
using std::unordered_map;
using std::vector;
namespace mindspore {
namespace somas {
constexpr char const *sortingNames[6] = {"size(>), index(<)",
"size(>), index(>)",
"size(>), constraints(<), index(<)",
"size(>), constraints(<), index(>)",
"size(>), constraints(>), index(<)",
"size(>), constraints(>), index(>)"};
constexpr char const *branchingNames[4] = {"bestfit", "smallest", "largest", "worstfit"};
constexpr char const *algorithmTypeNames[2] = {"Shared Objects", "Single Object"};
constexpr auto kParallelComputeSizeThreshold = 2000;
enum Status { FAILED, SUCCESS };
enum AlgorithmType { kManyObjects = 0, kSingleObject, kNumAlgorithmTypes };
enum SortingType {
kGreaterSizeSmallerIndex = 0,
#ifdef SOMAS_DEBUG
kGreaterSizeGreaterIndex,
kGreaterSizeSmallerConstraintsSmallerIndex,
kGreaterSizeSmallerConstraintsGreaterIndex,
kGreaterSizeGreaterConstraintsSmallerIndex,
kGreaterSizeGreaterConstraintsGreaterIndex,
#endif
kNumSortingTypes
};
enum FittingType {
kBest = 0,
kSmallest,
#ifdef SOMAS_DEBUG
kLargest,
kWorst,
#endif
kNumFittingTypes
};
class DynamicBitSet {
const size_t bit_width_ = 64;
inline size_t GetIndex(size_t index) const { return index / bit_width_; }
inline uint64_t GetBitMask(size_t index) const {
return (((uint64_t)0x1) << (bit_width_ - 1 - (index % bit_width_)));
}
inline void Reset(uint64_t val) {
bit_.clear();
for (size_t i = 0; i < bit_size_; i++) {
bit_.push_back(val);
}
}
public:
size_t bit_size_;
std::vector<uint64_t> bit_;
explicit DynamicBitSet(size_t count) {
bit_size_ = (count + bit_width_ - 1) / bit_width_;
Reset(0x0);
}
~DynamicBitSet() = default;
void SetBitTrue(size_t index, bool log = false) {
if (log) {
MS_LOG(INFO) << GetIndex(index) << " " << GetBitMask(index);
}
bit_[GetIndex(index)] |= GetBitMask(index);
}
void SetBitFalse(size_t index) { bit_[GetIndex(index)] &= (~GetBitMask(index)); }
bool IsBitTrue(size_t index) const { return (bit_[GetIndex(index)] & GetBitMask(index)) != 0x0; }
size_t CountOnesNum() const {
size_t ret = 0;
static char ones_num_in_hex[] = "\0\1\1\2\1\2\2\3\1\2\2\3\2\3\3\4";
for (size_t i = 0; i < bit_size_; i++) {
auto value = bit_[i];
if (value == 0) {
continue;
}
char *char_value = reinterpret_cast<char *>(&value);
for (size_t j = 0; j < bit_width_ / CHAR_BIT; j++) {
ret += ones_num_in_hex[char_value[j] & 0xF];
char_value[j] >>= 4;
ret += ones_num_in_hex[char_value[j] & 0xF];
}
}
return ret;
}
void Log() {
std::cout << "Start Print Bitset ";
for (size_t i = 0; i < bit_size_; i++) {
std::cout << " bit [" << std::dec << i << "] = " << std::hex << bit_[i] << std::dec;
}
std::cout << std::endl;
}
friend void Union(DynamicBitSet *a, DynamicBitSet *b) {
for (size_t i = 0; i < (*a).bit_size_; i++) {
(*a).bit_[i] |= (*b).bit_[i];
}
}
};
struct SomasSolverTensorDesc {
size_t index_;
size_t size_;
size_t offset_;
bool lifelong_;
size_t constraints_;
using SomasSolverTensorDescPtr = std::shared_ptr<SomasSolverTensorDesc>;
SomasSolverTensorDescPtr right_;
SomasSolverTensorDescPtr left_;
bool blocked_;
SomasSolverTensorDesc() = default;
SomasSolverTensorDesc(size_t index, size_t size, size_t offset, bool blifelong)
: index_(index), size_(size), offset_(offset), lifelong_(blifelong) {
constraints_ = 0;
right_ = nullptr;
left_ = nullptr;
blocked_ = false;
}
void Update(size_t index, size_t size, size_t offset, bool blifelong, size_t constraints) {
index_ = index;
size_ = size;
offset_ = offset;
lifelong_ = blifelong;
constraints_ = constraints;
}
friend std::ostream &operator<<(std::ostream &out, const SomasSolverTensorDescPtr n) {
out << n->index_ << " " << n->size_ << " " << n->offset_ << "\n";
return out;
}
friend std::istream &operator>>(std::istream &in, SomasSolverTensorDescPtr n) {
in >> n->index_ >> n->size_ >> n->offset_;
return in;
}
};
using SomasSolverTensorDescPtr = std::shared_ptr<SomasSolverTensorDesc>;
typedef std::unordered_map<size_t, SomasSolverTensorDescPtr> TensorsDescMap;
class SomasSolverPre {
public:
SomasSolverPre() = default;
~SomasSolverPre() = default;
SomasSolverPre(const SomasSolverPre &) = delete;
SomasSolverPre &operator=(const SomasSolverPre &) = delete;
size_t GetMaxOffset() { return max_offset_; }
Status Solving(const session::KernelGraph *graph, TensorsDescMap *tensors,
const std::vector<DynamicBitSet> *pConstraints, const vector<vector<size_t>> &continuous_v,
bool bVerifySolution, // true -> Check continuous and non overlapping constraints solution
bool ball = true, // true -> run full set of heuristics, false -> run single heuristic specified
SortingType sorting = kGreaterSizeSmallerIndex, FittingType fitting = kBest,
AlgorithmType algorithm = kManyObjects);
void Log(const session::KernelGraph *graph, const TensorsDescMap &tensors,
const std::vector<DynamicBitSet> *pConstraints_v, const vector<vector<size_t>> &continuous_v);
Status CheckTensors(const TensorsDescMap *pTensors, uint32_t index1, uint32_t index2);
Status AddContiguousInfoInMap(const vector<vector<size_t>> &continuous_v, TensorsDescMap *pTensors);
Status AddContiguousInfoInMultiMaps(const vector<vector<size_t>> &continuous_v, vector<TensorsDescMap> *vecTensorsMap,
const TensorsDescMap *pTensors);
private:
size_t max_offset_;
void SolverInputLog(const session::KernelGraph *graph, const TensorsDescMap &tensors,
const vector<vector<size_t>> &continuous_v);
void SolverOutputLog(const session::KernelGraph *graph, const TensorsDescMap &tensors) const;
vector<TensorsDescMap> CreateTensorsMaps(const TensorsDescMap &tensors, size_t total_sol);
void TensorRelationLog(const std::vector<DynamicBitSet> *pConstraints, const session::KernelGraph *graph);
};
using SomasSolverPrePtr = std::shared_ptr<SomasSolverPre>;
} // namespace somas
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_SOMAS_SOMAS_SOLVER_PRE_H_
|
mindspore-ai/mindspore
|
mindspore/lite/src/inner_kernel.h
|
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_INNER_KERNEL_H_
#define MINDSPORE_LITE_SRC_INNER_KERNEL_H_
#include <string>
#include <vector>
#include <memory>
#include <utility>
#include <algorithm>
#include "src/common/utils.h"
#include "src/common/log_util.h"
#include "nnacl/op_base.h"
#include "src/inner_context.h"
#include "src/tensor.h"
#include "include/errorcode.h"
#include "schema/model_generated.h"
#include "src/cxx_api/tensor/tensor_impl.h"
#include "include/api/context.h"
#include "include/api/kernel.h"
namespace mindspore::kernel {
class InnerKernel : public Kernel {
public:
InnerKernel() = default;
InnerKernel(OpParameter *parameter, std::vector<lite::Tensor *> in_tensors, std::vector<lite::Tensor *> out_tensors,
const lite::Context *ctx)
: op_parameter_(parameter),
in_tensors_(std::move(in_tensors)),
out_tensors_(std::move(out_tensors)),
ms_context_(ctx) {}
virtual ~InnerKernel() {
if (op_parameter_ != nullptr) {
free(op_parameter_);
op_parameter_ = nullptr;
FreeWorkspace();
}
}
int Execute() override;
virtual int Run() { return mindspore::lite::RET_ERROR; }
int ReSize() override { return mindspore::lite::RET_ERROR; }
// called before Run
virtual int PreProcess();
// called after Run
virtual int PostProcess() { return FreeInWorkTensor(); }
virtual int FreeInWorkTensor() const {
for (auto &in_tensor : this->in_tensors()) {
MS_ASSERT(in_tensor != nullptr);
in_tensor->DecRefCount();
}
return lite::RET_OK;
}
int Prepare() override { return mindspore::lite::RET_OK; }
OpParameter *op_parameter() const { return op_parameter_; }
bool InferShapeDone() const {
if (std::any_of(in_tensors_.begin(), in_tensors_.end(),
[](lite::Tensor *input) { return input->data_type() == kObjectTypeTensorType; })) {
return false;
}
auto shape = out_tensors_.front()->shape();
if (std::find(shape.begin(), shape.end(), -1) != shape.end()) {
return false;
}
return true;
}
schema::PrimitiveType type() const override {
return (this->op_parameter_ != nullptr) ? schema::PrimitiveType(this->op_parameter_->type_)
: schema::PrimitiveType_NONE;
}
void set_inputs(const std::vector<mindspore::tensor::MSTensor *> &in_tensors) {
this->in_tensors_.resize(in_tensors.size());
(void)std::transform(in_tensors.begin(), in_tensors.end(), in_tensors_.begin(),
[](mindspore::tensor::MSTensor *tensor) { return static_cast<lite::Tensor *>(tensor); });
}
void set_outputs(const std::vector<mindspore::tensor::MSTensor *> &out_tensors) {
this->out_tensors_.resize(out_tensors.size());
(void)std::transform(out_tensors.begin(), out_tensors.end(), out_tensors_.begin(),
[](mindspore::tensor::MSTensor *tensor) { return static_cast<lite::Tensor *>(tensor); });
}
const std::vector<mindspore::MSTensor> &inputs() override {
if (inputs_.empty()) {
std::transform(in_tensors_.begin(), in_tensors_.end(), std::back_inserter(inputs_), [](lite::Tensor *tensor) {
return mindspore::MSTensor(std::make_shared<mindspore::MSTensor::Impl>(tensor));
});
}
return inputs_;
}
const std::vector<mindspore::MSTensor> &outputs() override {
if (outputs_.empty()) {
std::transform(out_tensors_.begin(), out_tensors_.end(), std::back_inserter(outputs_), [](lite::Tensor *tensor) {
return mindspore::MSTensor(std::make_shared<mindspore::MSTensor::Impl>(tensor));
});
}
return outputs_;
}
void set_in_tensors(const std::vector<lite::Tensor *> &in_tensors) { this->in_tensors_ = in_tensors; }
virtual void set_in_tensor(lite::Tensor *in_tensor, size_t index) {
if (index >= in_tensors_.size()) {
MS_LOG(ERROR) << "index: " << index << " larger than in_tensors size: " << in_tensors_.size();
return;
}
this->in_tensors_[index] = in_tensor;
}
void set_out_tensors(const std::vector<lite::Tensor *> &out_tensors) { this->out_tensors_ = out_tensors; }
virtual void set_out_tensor(lite::Tensor *out_tensor, size_t index) {
if (index >= out_tensors_.size()) {
MS_LOG(ERROR) << "index: " << index << " larger than out_tensors size: " << out_tensors_.size();
return;
}
this->out_tensors_[index] = out_tensor;
}
const std::vector<lite::Tensor *> &in_tensors() const { return in_tensors_; }
const std::vector<lite::Tensor *> &out_tensors() const { return out_tensors_; }
virtual int Train() {
this->train_mode_ = true;
return mindspore::lite::RET_OK;
}
virtual bool IsTrain() const { return this->train_mode_; }
virtual int Eval() {
this->train_mode_ = false;
return mindspore::lite::RET_OK;
}
virtual bool IsEval() const { return !this->train_mode_; }
virtual void SetTrainable(bool trainable = true) { this->trainable_ = trainable; }
virtual bool IsTrainable() const { return this->trainable_; }
TypeId registry_data_type(void) { return registry_data_type_; }
void set_registry_data_type(TypeId data_type) { registry_data_type_ = data_type; }
void set_workspace_size(size_t value) { workspace_size_ = value; }
virtual size_t workspace_size() { return workspace_size_; }
void AllocWorkspace();
void FreeWorkspace();
void *workspace() { return workspace_; }
void set_workspace(void *ws) {
if (ws_allocated_ == false) {
workspace_ = ws;
}
}
const lite::Context *context() const { return this->ms_context_; }
bool ws_allocated_ = false;
protected:
OpParameter *op_parameter_ = nullptr;
// tensor will free in ~lite_session()
std::vector<lite::Tensor *> in_tensors_;
std::vector<lite::Tensor *> out_tensors_;
bool train_mode_ = false;
bool trainable_ = false; // parameters of this Kernel are trained in Train Session
TypeId registry_data_type_ = kTypeUnknown;
size_t workspace_size_ = 0;
void *workspace_ = nullptr;
const lite::Context *ms_context_ = nullptr;
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_INNER_KERNEL_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/intrinsics/avx/TiledC4MatMulFp32.c
|
<gh_stars>1000+
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef ENABLE_AVX
#ifdef _MSC_VER
#include <immintrin.h>
#else
#include <x86intrin.h>
#endif
#include "nnacl/fp32/common_func_fp32.h"
static inline __m256 padd(__m256 v0, __m256 v1, __m256 v2, __m256 v3) {
__m256 h0 = _mm256_hadd_ps(v0, v1);
__m256 h1 = _mm256_hadd_ps(v2, v3);
__m256 res = _mm256_hadd_ps(h0, h1);
return res;
}
void TiledC4MatmulFp32(float *dst, const float *src, const float *weight, size_t dst_step, size_t ic4, size_t oc4) {
for (int oc = 0; oc < oc4; oc++) {
float *dst_oc = dst + oc * dst_step;
const float *weight_oc = weight + oc * ic4 * 16;
for (int cur = 0; cur < 2; cur++) {
float *cur_dst = dst_oc + cur * 16;
const float *cur_src = src + cur * 16;
__m256 in0 = _mm256_loadu_ps(cur_src);
__m256 in1 = _mm256_loadu_ps(cur_src + 8);
__m256 w0 = _mm256_broadcast_ps((const __m128 *)(weight_oc));
__m256 w1 = _mm256_broadcast_ps((const __m128 *)(weight_oc + 4));
__m256 w2 = _mm256_broadcast_ps((const __m128 *)(weight_oc + 8));
__m256 w3 = _mm256_broadcast_ps((const __m128 *)(weight_oc + 12));
__m256 d00 = _mm256_mul_ps(in0, w0);
__m256 d01 = _mm256_mul_ps(in0, w1);
__m256 d02 = _mm256_mul_ps(in0, w2);
__m256 d03 = _mm256_mul_ps(in0, w3);
__m256 d10 = _mm256_mul_ps(in1, w0);
__m256 d11 = _mm256_mul_ps(in1, w1);
__m256 d12 = _mm256_mul_ps(in1, w2);
__m256 d13 = _mm256_mul_ps(in1, w3);
for (int ic = 1; ic < ic4; ic++) {
const float *src_ic = cur_src + ic * 32;
in0 = _mm256_loadu_ps(src_ic);
in1 = _mm256_loadu_ps(src_ic + 8);
const float *weight_ic = weight_oc + ic * 16;
w0 = _mm256_broadcast_ps((const __m128 *)(weight_ic));
w1 = _mm256_broadcast_ps((const __m128 *)(weight_ic + 4));
w2 = _mm256_broadcast_ps((const __m128 *)(weight_ic + 8));
w3 = _mm256_broadcast_ps((const __m128 *)(weight_ic + 12));
d00 = _mm256_fmadd_ps(in0, w0, d00);
d01 = _mm256_fmadd_ps(in0, w1, d01);
d02 = _mm256_fmadd_ps(in0, w2, d02);
d03 = _mm256_fmadd_ps(in0, w3, d03);
d10 = _mm256_fmadd_ps(in1, w0, d10);
d11 = _mm256_fmadd_ps(in1, w1, d11);
d12 = _mm256_fmadd_ps(in1, w2, d12);
d13 = _mm256_fmadd_ps(in1, w3, d13);
}
_mm256_storeu_ps(cur_dst, padd(d00, d01, d02, d03));
_mm256_storeu_ps(cur_dst + 8, padd(d10, d11, d12, d13));
}
}
}
#endif
|
mindspore-ai/mindspore
|
mindspore/ccsrc/runtime/framework/actor/control_flow/exit_actor.h
|
<reponame>mindspore-ai/mindspore
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_EXIT_ACTOR_H_
#define MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_EXIT_ACTOR_H_
#include <vector>
#include <string>
#include <memory>
#include <unordered_map>
#include <stack>
#include "runtime/framework/actor/actor_common.h"
#include "runtime/framework/actor/abstract_actor.h"
namespace mindspore {
namespace runtime {
// The exit actor is used to receive a set of result arrow and a branch id in the control flow, and then send the
// node in the result to the corresponding actor. It is the exit of the end of subgraph execution.
class ExitActor : public AbstractActor {
public:
ExitActor(const std::string &name, const std::vector<AnfNodePtr> ¶meters)
: AbstractActor(name, KernelTransformType::kExitActor, nullptr), formal_parameters_(parameters) {}
~ExitActor() override = default;
// The exit actor run when receive the anfnode.
void CollectRealParameter(const AnfNodePtr &output_node, size_t output_index, size_t output_position,
OpContext<DeviceTensor> *const context);
// The exit actor run when receive the input branch id.
void CollectBranchId(int branch_id, OpContext<DeviceTensor> *const context);
private:
friend class GraphScheduler;
// Formal parameters of actor, which is the front node.
std::vector<KernelWithIndex> formal_parameters_;
// Input data.
std::unordered_map<uuids::uuid *, std::unordered_map<size_t, KernelWithIndex>> input_nodes_;
// Branch ids is used to record the id corresponding to the output branch.
// In control flow, sub funcgraph may be called in multiple places, and the output must be return to different
// places. Therefore, the output of each subgraph will be connected to a exit actor, and the caller will send
// its branch id to the entrance actor of the subgraph. Then branch id will be sent by the entrance actor to
// the exit actor connected to the output.
// In a recursive scenario, the exit will sequentially receive the branch ids sent by the caller, and the exit
// actor needs to store the branch ids in the stack, and pop up in turn when returning.
std::unordered_map<uuids::uuid *, std::stack<int>> input_branch_ids_;
// Output arrow.
std::unordered_map<int, std::vector<DataArrowPtr>> output_branch_result_arrows_;
};
using ExitActorPtr = std::shared_ptr<ExitActor>;
} // namespace runtime
} // namespace mindspore
#endif // MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_EXIT_ACTOR_H_
|
mindspore-ai/mindspore
|
mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.h
|
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_PASS_FUSION_CONV_TRANSFORM_FUSION_H_
#define MINDSPORE_LITE_SRC_PASS_FUSION_CONV_TRANSFORM_FUSION_H_
#include <string>
#include "backend/optimizer/common/optimizer.h"
#include "tools/converter/converter_flags.h"
using mindspore::converter::FmkType;
namespace mindspore::opt {
class ConvTransformFusion : public PatternProcessPass {
public:
explicit ConvTransformFusion(bool multigraph = true, const std::string &name = "ConvTransformFusion")
: PatternProcessPass(name, multigraph) {}
~ConvTransformFusion() override = default;
protected:
virtual int InitTransParam(const CNodePtr &, int, float *, float *) const = 0;
private:
const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override;
int GenTransParam(const CNodePtr &, int, float *, float *) const;
int GenNewConvTensor(const FuncGraphPtr &, const CNodePtr &, int, const float *, const float *) const;
int CalNewWeightTensor(const CNodePtr &, const tensor::TensorPtr &, int, const float *) const;
int CalNewBiasTensor(float *, int, bool, const float *, const float *) const;
bool IsVariableWeightConv(const CNodePtr &conv_node) const;
protected:
FmkType fmk_type_ = converter::kFmkTypeTf;
};
} // namespace mindspore::opt
#endif // MINDSPORE_LITE_SRC_PASS_FUSION_CONV_TRANSFORM_FUSION_H_
|
mindspore-ai/mindspore
|
mindspore/lite/src/delegate/tensorrt/tensorrt_runtime.h
|
<filename>mindspore/lite/src/delegate/tensorrt/tensorrt_runtime.h
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_TENSORRT_BUILDER_
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_TENSORRT_BUILDER_
#include <NvInfer.h>
#include "include/errorcode.h"
#include "src/delegate/tensorrt/tensorrt_utils.h"
#include "src/delegate/tensorrt/tensorrt_allocator.h"
#define MAX_BATCH_SIZE 64
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
namespace mindspore::lite {
class TensorRTLogger : public nvinfer1::ILogger {
void log(Severity severity, const char *msg) noexcept override {
if (severity == Severity::kINTERNAL_ERROR || severity == Severity::kERROR) {
MS_LOG(ERROR) << msg;
} else if (severity == Severity::kWARNING) {
MS_LOG(WARNING) << msg;
} else if (severity == Severity::kINFO) {
MS_LOG(INFO) << msg;
} else {
MS_LOG(DEBUG) << msg;
}
}
};
class TensorRTRuntime {
public:
TensorRTRuntime() = default;
~TensorRTRuntime();
int Init();
nvinfer1::IBuilder *GetBuilder() { return this->builder_; }
int GetBatchSize() { return batch_size_; }
void SetBatchSize(int batch_size) { batch_size_ = batch_size; }
TensorRTAllocator *GetAllocator() { return this->allocator_; }
private:
bool is_init_ = false;
nvinfer1::IBuilder *builder_{nullptr};
TensorRTLogger logger_;
TensorRTAllocator *allocator_{nullptr};
int batch_size_{0};
};
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_TENSORRT_BUILDER_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/pipeline/jit/static_analysis/evaluator.h
|
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PIPELINE_JIT_STATIC_ANALYSIS_EVALUATOR_H_
#define MINDSPORE_CCSRC_PIPELINE_JIT_STATIC_ANALYSIS_EVALUATOR_H_
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include <stack>
#include "pipeline/jit/static_analysis/static_analysis.h"
#include "pipeline/jit/static_analysis/async_eval_result.h"
#include "utils/ms_context.h"
namespace mindspore {
namespace abstract {
using EvaluatorCacheMgrPtr = std::shared_ptr<EvaluatorCacheMgr>;
using EvaluatorAttrMap =
std::unordered_map<AbstractBasePtrList, AttrValueMapPtr, AbstractBasePtrListHasher, AbstractBasePtrListEqual>;
using EvaluatorAttrCache = MultiThreadCache<AbstractBasePtrList, AttrValueMapPtr, EvaluatorAttrMap>;
using EvaluatorAttrCachePtr = std::shared_ptr<EvaluatorAttrCache>;
class Evaluator : public Base {
public:
explicit Evaluator(const std::string &id)
: identifier_(id),
evaluator_cache_mgr_(std::make_shared<EvaluatorCacheMgr>()),
attr_cache_(std::make_shared<EvaluatorAttrCache>()) {}
~Evaluator() override = default;
MS_DECLARE_PARENT(Evaluator, Base);
// difference between Run() and Eval():
// Run() will be called with ConfigPtrList, but Eval() will be called with AbstractBasePtr.
// Run() will modify cache_ member, so it cannot marked as const;
virtual EvalResultPtr Run(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list,
const AnfNodeConfigPtr &out_conf);
virtual EvalResultPtr RunShortCircuit(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list,
const AnfNodeConfigPtr &out_conf) {
MS_LOG(EXCEPTION) << "Not support for this evaluator: " << ToString();
}
virtual EvalResultPtr Eval(AnalysisEnginePtr engine, const AbstractBasePtrList &args_spec_list,
const AnfNodeConfigPtr &out_conf) = 0;
virtual EvalResultPtr SingleRun(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list,
const AnfNodeConfigPtr &out_conf);
virtual AbstractBasePtrList NormalizeArgs(const AbstractBasePtrList &args_spec_list) const { return args_spec_list; }
virtual AbstractBasePtrList BroadenUndeterminedArgs(const AbstractBasePtrList &args_spec_list) {
return args_spec_list;
}
virtual EvalResultPtr AbstractEval(const AbstractBasePtrList &args_spec_list) {
auto context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(context);
bool enable_sparse = context->get_param<bool>(MS_CTX_ENABLE_SPARSE);
if (!enable_sparse) {
return nullptr;
}
auto is_abstract = std::any_of(args_spec_list.begin(), args_spec_list.end(), [](auto &arg) {
if (arg->BuildType()->type_id() == kObjectTypeUndeterminedType) {
return true;
}
return false;
});
if (is_abstract) {
MS_LOG(DEBUG) << "Eval " << identifier_ << " return abstract result";
return std::make_shared<EvalResult>(std::make_shared<AbstractUndetermined>(), std::make_shared<AttrValueMap>());
}
return nullptr;
}
std::string ToString() const override { return identifier_; }
virtual AnfNodePtr bound_node() const { return bound_node_.lock(); }
virtual void set_bound_node(const AnfNodePtr &node) { bound_node_ = AnfNodeWeakPtr(node); }
EvaluatorCacheMgrPtr evaluator_cache_mgr() const { return evaluator_cache_mgr_; }
EvaluatorAttrCachePtr attr_cache() const { return attr_cache_; }
std::recursive_timed_mutex &eval_lock() { return eval_lock_; }
protected:
std::string identifier_;
AnfNodeWeakPtr bound_node_;
EvaluatorCacheMgrPtr evaluator_cache_mgr_;
std::recursive_timed_mutex eval_lock_;
private:
EvaluatorAttrCachePtr attr_cache_;
};
class PrimEvaluator : public Evaluator {
public:
explicit PrimEvaluator(const std::string &id) : Evaluator(id) {}
~PrimEvaluator() override = default;
MS_DECLARE_PARENT(PrimEvaluator, Evaluator);
EvalResultPtr Eval(AnalysisEnginePtr, const AbstractBasePtrList &, const AnfNodeConfigPtr &) final {
MS_LOG(EXCEPTION) << "Eval() should not be called, Run() method should be called";
}
};
class TrivialPrimEvaluator : public PrimEvaluator {
public:
explicit TrivialPrimEvaluator(const std::string &id) : PrimEvaluator(id) {}
~TrivialPrimEvaluator() override = default;
MS_DECLARE_PARENT(TrivialPrimEvaluator, PrimEvaluator);
EvalResultPtr Run(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list,
const AnfNodeConfigPtr &out_conf) final;
virtual EvalResultPtr EvalPrim(const AnalysisEnginePtr &engine, const AbstractBasePtrList &args_spec_list) = 0;
};
class TransitionPrimEvaluator : public PrimEvaluator {
public:
explicit TransitionPrimEvaluator(const std::string &id) : PrimEvaluator(id) {}
~TransitionPrimEvaluator() override = default;
MS_DECLARE_PARENT(TransitionPrimEvaluator, PrimEvaluator);
EvalResultPtr Run(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list,
const AnfNodeConfigPtr &out_conf) final;
// Parameter in_conf0 : the first element in args_conf_list;
virtual EvalResultPtr EvalPrim(const AnalysisEnginePtr &engine, const AbstractBasePtrList &args_spec_list,
const ConfigPtr &in_conf0, const AnfNodeConfigPtr &out_conf) = 0;
};
class SymbolicPrimEvaluator : public PrimEvaluator {
public:
explicit SymbolicPrimEvaluator(const std::string &id) : PrimEvaluator(id) {}
~SymbolicPrimEvaluator() override = default;
MS_DECLARE_PARENT(SymbolicPrimEvaluator, PrimEvaluator);
EvalResultPtr Run(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list,
const AnfNodeConfigPtr &out_conf) final;
virtual EvalResultPtr EvalPrim(const ConfigPtrList &args_conf_list) = 0;
};
// Evaluator will be stored in AnalysisEngine.evaluators_
using EvaluatorPtrList = std::vector<EvaluatorPtr>;
class DummyEvaluator : public Evaluator {
public:
DummyEvaluator() : Evaluator("dummy") {}
~DummyEvaluator() override = default;
MS_DECLARE_PARENT(DummyEvaluator, Evaluator);
EvalResultPtr Eval(AnalysisEnginePtr, const AbstractBasePtrList &, const AnfNodeConfigPtr &) override {
return nullptr;
}
};
// Wrap another evaluator to track a subset of uses.
// A TrackedEvaluator has its own cache that maps possible calls to
// their results, but is ultimately backed by a different evaluator.
// Multiple TrackedEvaluators can be backed by the same Evaluator.
class TrackedEvaluator : public Evaluator {
public:
explicit TrackedEvaluator(const EvaluatorPtr &subinf) : Evaluator("TrackedEvaluator"), sub_evaluator_(subinf) {}
~TrackedEvaluator() override = default;
MS_DECLARE_PARENT(TrackedEvaluator, Evaluator);
AnfNodePtr bound_node() const override {
if (sub_evaluator_ != nullptr) {
return sub_evaluator_->bound_node();
}
return bound_node_.lock();
}
void set_bound_node(const AnfNodePtr &node) override {
if (sub_evaluator_ != nullptr) {
sub_evaluator_->set_bound_node(node);
}
bound_node_ = AnfNodeWeakPtr(node);
}
EvalResultPtr Eval(AnalysisEnginePtr, const AbstractBasePtrList &, const AnfNodeConfigPtr &) override {
MS_LOG(EXCEPTION) << "Eval() should not be called, Run() method should be called";
}
EvalResultPtr Run(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list,
const AnfNodeConfigPtr &out_conf) override;
std::string ToString() const override { return identifier_ + "_" + sub_evaluator_->ToString(); }
private:
EvaluatorPtr sub_evaluator_;
};
using FuncGraphCacheMap =
std::unordered_map<AbstractBasePtrList, FuncGraphPtr, AbstractBasePtrListHasher, AbstractBasePtrListEqual>;
class StackFrame;
using StackFramePtr = std::shared_ptr<StackFrame>;
class BaseFuncGraphEvaluator : public Evaluator {
public:
explicit BaseFuncGraphEvaluator(const AnalysisContextPtr &context)
: Evaluator("basegraph"), parent_context_(context) {}
~BaseFuncGraphEvaluator() override = default;
MS_DECLARE_PARENT(BaseFuncGraphEvaluator, Evaluator);
EvalResultPtr RunShortCircuit(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list,
const AnfNodeConfigPtr &out_conf) override;
EvalResultPtr Eval(AnalysisEnginePtr engine, const AbstractBasePtrList &args_spec_list,
const AnfNodeConfigPtr &out_conf) override;
virtual FuncGraphPtr GetFuncGraph(AnalysisEnginePtr engine, const AbstractBasePtrList &args_spec_list) = 0;
AnalysisContextPtr parent_context() const { return parent_context_; }
void set_parent_context(const AnalysisContextPtr &parent_context) { parent_context_ = parent_context; }
protected:
AnalysisContextPtr parent_context_;
private:
AbstractBasePtr LaunchRecursiveEval(const AnalysisEnginePtr &engine, const FuncGraphPtr &fg,
const AnalysisContextPtr &context);
// Add functions for stack frame routine.
AbstractBasePtr LaunchStackFrame(const AnalysisEnginePtr &engine, const FuncGraphPtr &fg,
const AnalysisContextPtr &context);
static void EnterStackFrame(const AnalysisEnginePtr &engine, const StackFramePtr ¤t_stack_frame,
const StackFramePtr &new_stack_frame);
static void LeaveStackFrame(const AnalysisEnginePtr &, const StackFramePtr ¤t_stack_frame);
};
class FuncGraphEvaluator : public BaseFuncGraphEvaluator {
public:
FuncGraphEvaluator(const FuncGraphPtr &func_graph, const AnalysisContextPtr &context)
: BaseFuncGraphEvaluator(context), func_graph_(func_graph) {}
~FuncGraphEvaluator() override = default;
MS_DECLARE_PARENT(FuncGraphEvaluator, BaseFuncGraphEvaluator);
FuncGraphPtr GetFuncGraph(AnalysisEnginePtr engine, const AbstractBasePtrList &args_spec_list) override;
FuncGraphPtr func_graph() { return func_graph_; }
AbstractBasePtrList NormalizeArgs(const AbstractBasePtrList &args_spec_list) const override;
AbstractBasePtrList BroadenUndeterminedArgs(const AbstractBasePtrList &args_spec_list) override;
std::string ToString() const override { return identifier_ + "_" + func_graph_->ToString(); }
private:
FuncGraphPtr func_graph_;
FuncGraphCacheMap func_graph_cache_;
std::vector<AbstractBasePtrList> trace_;
};
using FuncGraphEvaluatorPtr = std::shared_ptr<FuncGraphEvaluator>;
class MetaFuncGraphEvaluator : public BaseFuncGraphEvaluator {
public:
// Note: context parameter is not used;
MetaFuncGraphEvaluator(const MetaFuncGraphPtr &meta_func_graph, const ScopePtr &scope)
: BaseFuncGraphEvaluator(AnalysisContext::DummyContext()), meta_func_graph_(meta_func_graph), scope_(scope) {}
~MetaFuncGraphEvaluator() override = default;
MS_DECLARE_PARENT(MetaFuncGraphEvaluator, BaseFuncGraphEvaluator);
FuncGraphPtr GetFuncGraph(AnalysisEnginePtr engine, const AbstractBasePtrList &args_spec_list) override;
// Return normalized versions of the arguments.
AbstractBasePtrList NormalizeArgs(const AbstractBasePtrList &args_spec_list) const override {
return meta_func_graph_->NormalizeArgs(args_spec_list);
}
std::string ToString() const override { return identifier_ + "_" + meta_func_graph_->ToString(); }
private:
MetaFuncGraphPtr meta_func_graph_;
FuncGraphCacheMap func_graph_cache_;
ScopePtr scope_;
};
class PartialAppEvaluator : public Evaluator {
public:
PartialAppEvaluator(const EvaluatorPtr &evaluator, const AbstractBasePtrList &args)
: Evaluator("PartialAppEvaluator"), evaluator_(evaluator), args_spec_list_(args) {}
~PartialAppEvaluator() override = default;
MS_DECLARE_PARENT(PartialAppEvaluator, Evaluator);
AnfNodePtr bound_node() const override {
if (evaluator_ != nullptr) {
return evaluator_->bound_node();
}
return bound_node_.lock();
}
void set_bound_node(const AnfNodePtr &node) override {
if (evaluator_ != nullptr) {
evaluator_->set_bound_node(node);
}
bound_node_ = AnfNodeWeakPtr(node);
}
EvalResultPtr Eval(AnalysisEnginePtr, const AbstractBasePtrList &, const AnfNodeConfigPtr &) override {
MS_LOG(EXCEPTION) << "Should not be called, Run() method should be called";
}
EvalResultPtr Run(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list,
const AnfNodeConfigPtr &out_conf) override;
std::string ToString() const override { return identifier_ + "_" + evaluator_->ToString(); }
private:
EvaluatorPtr evaluator_;
AbstractBasePtrList args_spec_list_;
};
class VirtualEvaluator : public Evaluator {
public:
VirtualEvaluator(const AbstractBasePtrList &args_spec_list, const AbstractBasePtr &output)
: Evaluator("virtual"), args_spec_list_(args_spec_list), output_(output) {}
~VirtualEvaluator() override = default;
MS_DECLARE_PARENT(VirtualEvaluator, Evaluator);
EvalResultPtr Eval(AnalysisEnginePtr engine, const AbstractBasePtrList &args_spec_list,
const AnfNodeConfigPtr &out_conf) override;
std::string ToString() const override { return identifier_; }
private:
AbstractBasePtrList args_spec_list_;
AbstractBasePtr output_;
};
class JEvaluator : public Evaluator {
public:
JEvaluator(const EvaluatorPtr &evaluator, const AbstractFunctionPtr &orig_func)
: Evaluator("JEvaluator"), evaluator_(evaluator), orig_func_(orig_func) {}
~JEvaluator() override = default;
MS_DECLARE_PARENT(JEvaluator, Evaluator);
AnfNodePtr bound_node() const override {
if (evaluator_ != nullptr) {
return evaluator_->bound_node();
}
return bound_node_.lock();
}
void set_bound_node(const AnfNodePtr &node) override {
if (evaluator_ != nullptr) {
evaluator_->set_bound_node(node);
}
bound_node_ = AnfNodeWeakPtr(node);
}
EvalResultPtr Eval(AnalysisEnginePtr, const AbstractBasePtrList &, const AnfNodeConfigPtr &) override {
MS_LOG(EXCEPTION) << "Should not be called, Run() method should be called";
}
EvalResultPtr Run(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list,
const AnfNodeConfigPtr &out_conf) override;
std::string ToString() const override { return identifier_ + "_" + evaluator_->ToString(); }
private:
EvaluatorPtr evaluator_;
AbstractFunctionPtr orig_func_;
};
void BroadenArgs(const AbstractBasePtrList &args_spec_list, AbstractBasePtrList *broaded_args);
} // namespace abstract
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PIPELINE_JIT_STATIC_ANALYSIS_EVALUATOR_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/runtime/device/memory_scheduler.h
|
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_RUNTIME_DEVICE_MEMORY_SCHEDULER_H_
#define MINDSPORE_CCSRC_RUNTIME_DEVICE_MEMORY_SCHEDULER_H_
#include <vector>
#include <map>
#include <set>
#include <memory>
#include <utility>
namespace mindspore {
namespace device {
class MemHandler {
public:
virtual size_t GetAvailableMemSize() = 0;
virtual void *MallocDevice(size_t mem_size) = 0;
virtual void FreeDevice(void *ptr) = 0;
virtual void *MallocHost(size_t mem_size) = 0;
virtual void FreeHost(void *ptr) = 0;
virtual void SwapIn(const void *host_ptr, void *device_ptr, size_t mem_size, void *stream) = 0;
virtual void SwapOut(const void *device_ptr, void *host_ptr, size_t mem_size, void *stream) = 0;
};
enum MemPriority { kMemPriorityLow, kMemPriorityHigh };
class MemScheduler {
enum EventType { kInit, kMalloc, kGet, kFree, kSwapIn, kSwapOut };
struct Event {
Event(const EventType &in_type, size_t in_index) {
type = in_type;
index = in_index;
}
EventType type;
size_t index{0};
size_t mem_size{0};
const void *key{nullptr};
};
public:
MemScheduler() = default;
~MemScheduler() = default;
bool need_record_event() const { return need_record_event_; }
void set_need_record_event(bool flag) { need_record_event_ = flag; }
bool optimized() const { return optimized_; }
void set_optimized(bool flag) { optimized_ = flag; }
void SetMemHandler(const std::shared_ptr<MemHandler> &handler) { mem_handler_ = handler; }
void Init(const void *key, void *host_ptr, size_t mem_size, MemPriority priority = kMemPriorityLow);
void *GetOrMalloc(const void *key, size_t mem_size, MemPriority priority = kMemPriorityLow);
void Reset() { compute_index_ = 0; }
bool PreCompute(void *stream);
bool PostCompute(void *stream);
void OptMemUsage();
void Clear();
bool IsHighPriorityMem(const void *key);
void SetMemPriority(const void *key, MemPriority priority);
void SetMemUsedFactor(float factor) { mem_used_factor_ = factor; }
void SetNeedSwap(bool flag) { need_swap_ = flag; }
private:
void Record(const void *key, const EventType &event_type, size_t mem_size = 0);
void GenComputeMemEvents();
void CheckMemSize();
void CountMemUsage();
void GenEventSpan();
void GenNoSwapEventSet();
std::map<const void *, MemPriority> mem_priority_;
std::map<const void *, std::vector<std::shared_ptr<Event>>> mem_events_;
std::vector<std::vector<std::shared_ptr<Event>>> pre_compute_events_;
std::vector<std::vector<std::shared_ptr<Event>>> post_compute_events_;
std::map<const void *, void *> mem_result_;
std::map<const void *, void *> init_host_ptr_;
std::map<const void *, void *> swap_host_ptr_;
std::map<const void *, void *> high_priority_device_ptr_;
size_t compute_index_{0};
bool need_record_event_{true};
bool optimized_{false};
bool has_compute_mem_events_{false};
std::shared_ptr<MemHandler> mem_handler_{nullptr};
bool need_swap_{false};
std::multimap<size_t, std::shared_ptr<Event>> event_span_;
std::set<std::shared_ptr<Event>> no_swap_events_;
std::vector<size_t> min_mem_used_;
size_t mem_used_without_swap_{0};
size_t min_mem_needed_{0};
float mem_used_factor_{0.9};
};
class MemSchedulerManager {
public:
MemSchedulerManager() = default;
~MemSchedulerManager() = default;
std::shared_ptr<MemScheduler> GetOrCreateMemScheduler(uint64_t uid) {
auto scheduler = GetMemScheduler(uid);
if (scheduler == nullptr) {
scheduler = std::make_shared<MemScheduler>();
graph_mem_scheduler_map_[uid] = scheduler;
}
return scheduler;
}
std::shared_ptr<MemScheduler> GetMemScheduler(uint64_t uid) {
auto iter = graph_mem_scheduler_map_.find(uid);
if (iter != graph_mem_scheduler_map_.end()) {
return iter->second;
}
return nullptr;
}
private:
std::map<uint64_t, std::shared_ptr<MemScheduler>> graph_mem_scheduler_map_;
};
} // namespace device
} // namespace mindspore
#endif // MINDSPORE_CCSRC_RUNTIME_DEVICE_MEMORY_SCHEDULER_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/optimizer/graph_kernel/model/node.h
|
<reponame>mindspore-ai/mindspore
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_MODEL_NODE_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_MODEL_NODE_H_
#include <memory>
#include <algorithm>
#include <functional>
#include <sstream>
#include <vector>
#include <unordered_map>
#include <set>
#include <iostream>
#include <utility>
#include <string>
#include <stdexcept>
#include "mindspore/core/ir/dtype/type_id.h"
#include "mindspore/core/ir/value.h"
#include "mindspore/core/ir/tensor.h"
#include "mindspore/core/utils/shape_utils.h"
#include "utils/utils.h"
#include "backend/kernel_compiler/common_utils.h"
namespace mindspore::graphkernel::inner {
enum class NType {
Base,
Primitive,
Parameter,
Value,
Output,
};
using DFormat = std::string;
using DShape = ShapeVector;
using DAttrs = std::unordered_map<std::string, ValuePtr>;
struct NodeBase {
DShape shape;
TypeId type;
DFormat format;
};
class Node;
using NodePtr = std::shared_ptr<Node>;
using NodePtrList = std::vector<NodePtr>;
class Node : public NodeBase, public std::enable_shared_from_this<Node> {
public:
Node(const NodeBase &baseinfo, const std::string &name) : NodeBase(baseinfo), name_(name) {}
virtual ~Node() {
// remove this node from the previous nodes' user.
SetInputs({});
}
void SetBaseInfo(NodeBase baseinfo) {
this->shape = std::move(baseinfo.shape);
this->type = std::move(baseinfo.type);
this->format = std::move(baseinfo.format);
}
virtual NType NodeType() { return NType::Base; }
friend std::ostream &operator<<(std::ostream &output, const Node &n) {
std::ostringstream os;
n.Dump(os);
output << os.str();
return output;
}
virtual void Dump(std::ostringstream &os) const = 0;
virtual void DumpTensor(std::ostringstream &os) const;
void AddInput(const NodePtr &new_input);
void SetInput(size_t i, const NodePtr &new_input);
void SetInputs(const NodePtrList &inputs);
void ReplaceWith(const NodePtr &other_node);
void SetAttrs(const DAttrs &attrs) { attrs_ = attrs; }
void SetAttr(const std::string &key, const ValuePtr &value) { attrs_[key] = value; }
template <typename T>
std::shared_ptr<T> As() {
return std::static_pointer_cast<T>(shared_from_this());
}
const std::string &name() const { return name_; }
const DAttrs &attrs() const { return attrs_; }
const NodePtr &input(size_t i) const { return inputs_[i]; }
const NodePtrList &inputs() const { return inputs_; }
const std::unordered_map<Node *, std::set<size_t>> &users() const { return users_; }
protected:
std::string name_;
DAttrs attrs_;
NodePtrList inputs_;
std::unordered_map<Node *, std::set<size_t>> users_;
private:
// the nodes' users are only maintained by AddInput/SetInput.
void AddUser(Node *user, size_t index) { users_[user].insert(index); }
void RemoveUser(Node *user, size_t index) {
if (auto iter = users_.find(user); iter != users_.end()) {
iter->second.erase(index);
if (iter->second.empty()) {
users_.erase(iter);
}
}
}
};
class ConstTensorNode : public Node {
public:
explicit ConstTensorNode(const tensor::TensorPtr &data, const std::string &name = "")
: Node({data->shape(), data->data_type(), kOpFormat_DEFAULT}, name), data_(data) {}
~ConstTensorNode() = default;
NType NodeType() override { return NType::Value; }
void Dump(std::ostringstream &os) const override { os << ToString(); }
void DumpTensor(std::ostringstream &os) const override { os << ToString(); }
std::string ToString() const { return data_->data().ToString(this->type, this->shape, false); }
const tensor::TensorPtr data() const { return data_; }
protected:
tensor::TensorPtr data_;
};
class ParamNode : public Node {
public:
ParamNode(const std::string &name, const NodeBase &baseinfo) : Node(baseinfo, name) {}
~ParamNode() = default;
void Dump(std::ostringstream &os) const override { DumpTensor(os); }
NType NodeType() override { return NType::Parameter; }
};
class OutputNode : public Node {
public:
OutputNode() : Node({{1}, TypeId::kNumberTypeBegin, kOpFormat_DEFAULT}, "Output") {}
~OutputNode() = default;
void Dump(std::ostringstream &os) const override { ; }
NType NodeType() override { return NType::Output; }
};
} // namespace mindspore::graphkernel::inner
#endif
|
mindspore-ai/mindspore
|
mindspore/lite/tools/converter/anf_transform.h
|
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_ANF_TRANSFORM_H
#define MINDSPORE_LITE_TOOLS_CONVERTER_ANF_TRANSFORM_H
#include <memory>
#include <vector>
#include <set>
#include "backend/optimizer/common/optimizer.h"
#include "schema/inner/model_generated.h"
#include "tools/common/storage.h"
#include "tools/converter/converter_flags.h"
#include "ir/anf.h"
#include "tools/converter/quantizer/quantizer.h"
#include "tools/converter/converter_context.h"
namespace mindspore {
namespace lite {
class AnfTransform {
public:
AnfTransform();
virtual ~AnfTransform();
FuncGraphPtr Transform(const FuncGraphPtr &old_graph, const converter::Flags *config = nullptr);
private:
std::unique_ptr<quant::Quantizer> m_quantizer_ = nullptr;
FuncGraphPtr TransformFuncGraph(const FuncGraphPtr &old_graph, const converter::Flags *config = nullptr);
static int RunFusionPass(const FuncGraphPtr &old_graph, const converter::Flags *config);
static int RunGraphPass(const FuncGraphPtr &old_graph, const converter::Flags *config);
static int RunConvertPass(const FuncGraphPtr &old_graph, const converter::Flags *config);
static int RunConstFoldPass(const FuncGraphPtr &olde_graph, const converter::Flags *config);
static int RunParallelPass(const FuncGraphPtr &old_graph, const converter::Flags *config);
int DoQuantize(const FuncGraphPtr &old_graph, const converter::Flags *config);
static void GetFuncGraphs(const FuncGraphPtr &func_graph, std::set<FuncGraphPtr> *all_func_graphs);
int DoSingleGraphQuantize(const FuncGraphPtr &old_graph, const converter::Flags *config);
static bool StoreBuiltinPass(const converter::Flags *config);
static STATUS MarkTrainInputOp(const FuncGraphPtr &func_graph, const CNodePtr &cnode);
static STATUS MarkTrainWeightSharingOp(const FuncGraphPtr &func_graph, const CNodePtr &cnode);
static STATUS MarkTrainOp(const FuncGraphPtr &func_graph);
};
} // namespace lite
} // namespace mindspore
#endif
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/optimizer/somas/somas_solver_core.h
|
<filename>mindspore/ccsrc/backend/optimizer/somas/somas_solver_core.h
/**
* Copyright 2020 Huawei Technologies Co., Ltd
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_SOMAS_SOMAS_SOLVER_CORE_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_SOMAS_SOMAS_SOLVER_CORE_H_
#include <algorithm>
#include <chrono>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "backend/optimizer/somas/somas_solver_alg.h"
#include "backend/optimizer/somas/somas_solver_pre.h"
namespace mindspore {
namespace somas {
class SomasSolverCore {
public:
/// Interface Function: receive parameters, creates the model to solve and then save the result
SomasSolverCore(const TensorsDescMap &tensors, const std::vector<DynamicBitSet> *constraints, uint32_t sol,
bool isMultiThreadValid = true)
: best_sol_(0),
sort_strategy_(kGreaterSizeSmallerIndex),
branching_strategy_(kBest),
sol_count_(sol),
algorithm_(kManyObjects),
tensors_(tensors),
constraints_(*constraints),
upperbound_(SIZE_MAX),
verify_(false),
all_(true),
is_multi_thread_valid_(isMultiThreadValid) {}
~SomasSolverCore() = default;
Status MemoryAllocationSolver();
Status Verify();
bool Verify(const size_t &);
void VerifySolution(const bool verify) { verify_ = verify; }
void SortTensors();
void BuildBlocks();
void Clean();
void SetBestSolution() { RestoreSolution(best_sol_); }
void RestoreSolution(uint32_t sol_id);
void SetSortingStrategy(SortingType sort_strategy) { sort_strategy_ = sort_strategy; }
void SetFittingStrategy(FittingType branching_strategy) { branching_strategy_ = branching_strategy; }
void SetAlgorithmStrategy(AlgorithmType algorithm_strategy) { algorithm_ = algorithm_strategy; }
void SetAllStrategies(bool all) { all_ = all; }
const size_t &GetUpperbound() const { return upperbound_; }
const size_t &Getlifelongmemory() const { return lifelong_memory_; }
uint32_t best_sol_{0};
SortingType sort_strategy_;
FittingType branching_strategy_;
uint32_t sol_count_{0};
AlgorithmType algorithm_;
int64_t timing_{0};
private:
const TensorsDescMap &tensors_;
vector<BlockTensor> block_tensors_;
const std::vector<DynamicBitSet> &constraints_;
size_t upperbound_{0};
size_t lifelong_memory_{0};
bool verify_{false};
bool all_{false};
bool is_multi_thread_valid_{true};
size_t FindSolutions();
size_t Search(const std::shared_ptr<FootPrint> &pFootprint);
void AppendLifelongTensors();
void Destroy(std::shared_ptr<FootPrint> &);
};
} // namespace somas
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_SOMAS_SOMAS_SOLVER_CORE_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/frontend/parallel/device_manager.h
|
<reponame>mindspore-ai/mindspore
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_DEVICE_MANAGER_H_
#define MINDSPORE_CCSRC_FRONTEND_PARALLEL_DEVICE_MANAGER_H_
#include <cstdint>
#include <cstring>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "frontend/parallel/device.h"
#include "frontend/parallel/device_matrix.h"
#include "frontend/parallel/group_manager.h"
#include "frontend/parallel/status.h"
#include "frontend/parallel/strategy.h"
#include "utils/convert_utils.h"
#include "utils/ms_utils.h"
namespace mindspore {
namespace parallel {
#define MAX_DEVICE_NUM 4096
constexpr char HCCL_BACKEND[] = "hccl";
constexpr char NCCL_BACKEND[] = "nccl";
constexpr char UNDEFINED_BACKEND[] = "undefined_backend";
class DeviceManager;
using DeviceManagerPtr = std::shared_ptr<DeviceManager>;
// 'g_device_manager' is the globally unique manager to manage the devices.
extern DeviceManagerPtr g_device_manager;
// This method is used for initializing the global DeviceManager 'g_device_manager',
// arguments including 'device_num' and 'global_rank'
bool InitDevice(int64_t device_num, int64_t global_rank, const std::string &backend, const std::vector<int64_t> &stage);
void CheckGlobalDeviceManager();
std::string HashName(const std::string &rank_list_name);
class DeviceManager {
// This class is used to manage the abstract devices, including group-related and stage-related management.
public:
DeviceManager() { gm_ = GroupManager(); }
~DeviceManager() = default;
Status Init(const RankList &devices, int64_t local_device, const RankList &stage_map, const std::string &backend);
static DeviceManager &GetInstance();
RankList GetDeviceListByStageId(int64_t stage_id) const;
RankList GetDeviceListInThisStage() const;
Device CreateNewDeviceByRank(int64_t rank) const;
std::vector<Device> CreateDeviceListByRankList(RankList ranks);
std::string GenerateGroupNameByRanks(RankList dev_ranks);
Group CreateGroup(const std::string &group_name, const std::vector<Device> &devices);
Group CreateGroup(const RankList &dev_ranks);
size_t DeviceNum() const { return devices_.size(); }
int64_t stage_num() const { return stage_num_; }
int64_t stage_device_num() const { return stage_device_num_; }
int64_t stage_id() const { return stage_id_; }
int64_t rank_index_in_stage() const { return rank_index_in_stage_; }
int64_t global_rank() const { return global_rank_; }
std::string backend() const { return backend_; }
GroupManager group_manager() const { return gm_; }
void set_group_manager(const GroupManager &gm) { gm_ = gm; }
void Clear();
std::string world_group() const { return gm_.world_group(); }
std::vector<std::pair<std::string, std::vector<uint32_t>>> group_info() const { return gm_.group_info(); }
std::string FindRankListNameByHashName(const std::string &hash_name);
private:
std::vector<std::shared_ptr<Device>> devices_;
// each stage has a list of devices
std::vector<std::vector<int64_t>> stage_devices_;
std::shared_ptr<Device> device_;
GroupManager gm_;
std::string backend_;
// bimap:
std::map<std::string, std::string> rank_to_group_; // the key is rank list, value is hash name
std::map<std::string, std::string> group_to_rank_; // the key is hash name, value is rank list
int64_t global_rank_ = 0; // the real rank in all devices
int64_t stage_num_ = 1; // the stage num
int64_t stage_id_ = 0; // the stage id of the global_rank_
int64_t rank_index_in_stage_ = 0; // the index of this rank in it's stage
int64_t stage_device_num_ = 0; // the device num of one stage
};
} // namespace parallel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_DEVICE_MANAGER_H_
|
mindspore-ai/mindspore
|
mindspore/lite/tools/converter/adapter/acl/common/acl_types.h
|
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TOOLS_CONVERTER_ADAPTER_ACL_COMMON_ACL_TYPES_H
#define TOOLS_CONVERTER_ADAPTER_ACL_COMMON_ACL_TYPES_H
#include <string>
#include <vector>
#include <map>
#include "include/api/data_type.h"
namespace mindspore {
namespace lite {
namespace acl {
struct AclModelOptionCfg {
int32_t device_id;
DataType output_type;
std::vector<size_t> dynamic_batch_size;
std::map<int32_t, std::vector<int32_t>> input_shape_map;
std::string input_format;
std::string input_shape;
std::string precision_mode;
std::string op_select_impl_mode;
std::string fusion_switch_config_file_path;
std::string buffer_optimize;
std::string insert_op_config_file_path;
};
constexpr auto kOutputShapes = "outputs_shape";
} // namespace acl
} // namespace lite
} // namespace mindspore
#endif // TOOLS_CONVERTER_ADAPTER_ACL_COMMON_ACL_TYPES_H
|
mindspore-ai/mindspore
|
mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.h
|
<filename>mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.h
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_OPENCL_KERNEL_H_
#define MINDSPORE_LITE_SRC_OPENCL_KERNEL_H_
#define MAX_PROFILING_TIME_MILLI_SECOND 10 * 1000 // 10 seconds
#include <vector>
#include <set>
#include <map>
#include <string>
#include <cfloat>
#include "src/inner_kernel.h"
#include "include/errorcode.h"
#include "src/runtime/gpu/opencl/opencl_runtime.h"
#include "mindspore/lite/src/weight_decoder.h"
#include "src/runtime/kernel/opencl/utils.h"
#include "nnacl/resize_parameter.h"
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
namespace mindspore::kernel {
constexpr int INPUT_TENSOR_SIZE_1 = 1;
constexpr int INPUT_TENSOR_SIZE_2 = 2;
constexpr int INPUT_TENSOR_SIZE_3 = 3;
constexpr int INPUT_TENSOR_SIZE_4 = 4;
constexpr int INPUT_TENSOR_SIZE_5 = 5;
constexpr int INPUT_TENSOR_SIZE_6 = 6;
constexpr int OUTPUT_TENSOR_SIZE_1 = 1;
constexpr int OUTPUT_TENSOR_SIZE_2 = 2;
constexpr int OUTPUT_TENSOR_SIZE_3 = 3;
constexpr int OUTPUT_TENSOR_SIZE_4 = 4;
struct OpenCLToFormatParameter {
OpParameter op_parameter{};
lite::opencl::MemType out_mem_type{lite::opencl::MemType::IMG};
};
template <typename SrcT, typename DstT>
void Broadcast2GpuShape(DstT *dst, const SrcT *src, int src_num) {
MS_ASSERT(dst);
if (src == nullptr || src_num <= 0) {
return;
}
auto *N = dst;
auto *H = dst + 1;
auto *W = dst + 2;
auto *C = dst + 3;
if (src_num == 1) { // 1 1 1 C
*C = src[0];
} else if (src_num == 2) { // N 1 1 C
*N = src[0];
*C = src[1];
} else if (src_num == 3) { // N 1 W C
*N = src[0];
*W = src[1];
*C = src[2];
} else if (src_num == 4) { // N H W C
*N = src[0];
*H = src[1];
*W = src[2];
*C = src[3];
} else if (src_num > 4) {
MS_LOG(ERROR) << "GPU doesn't support ndim>=" << src_num;
}
}
template <typename SrcT, typename DstT>
void Broadcast2GpuShape(DstT *dst, const SrcT *src, int src_num, DstT default_value) {
MS_ASSERT(dst);
for (int i = 0; i < 4; ++i) {
dst[i] = default_value;
}
if (src == nullptr || src_num <= 0) {
return;
}
Broadcast2GpuShape(dst, src, src_num);
}
struct GpuTensorInfo {
GpuTensorInfo() = default;
explicit GpuTensorInfo(const lite::Tensor *tensor) {
auto ocl_runtime_wrap_ = lite::opencl::OpenCLRuntimeInnerWrapper();
if (tensor == nullptr) {
return;
}
auto shape_ori = tensor->shape();
NDim = shape_ori.size();
cl_int4 shape;
Broadcast2GpuShape(shape.s, shape_ori.data(), shape_ori.size(), 1);
N = shape.s[0];
H = shape.s[1];
W = shape.s[2];
C = shape.s[3];
MS_ASSERT(N > 0);
MS_ASSERT(H > 0);
MS_ASSERT(W > 0);
MS_ASSERT(C > 0);
Slice = UP_DIV(C, C4NUM);
FLT_size = tensor->data_type() == kNumberTypeFloat16 ? sizeof(cl_half) : sizeof(cl_float);
FLT4_size = FLT_size * 4;
if (W * Slice <= ocl_runtime_wrap_.GetInstance()->GetMaxImage2DWidth()) {
height = N * H;
width = W * Slice;
} else {
height = N * H * W;
width = Slice;
if (height > ocl_runtime_wrap_.GetInstance()->GetMaxImage2DHeight()) {
height = -1;
width = -1;
}
}
ElementsNum = N * H * W * C;
ElementsC4Num = N * H * W * Slice * C4NUM;
OriginSize = ElementsNum * FLT_size;
Image2DSize = height * width * FLT4_size;
}
size_t RowPitch() const {
auto runtime_wrapper = lite::opencl::OpenCLRuntimeInnerWrapper();
int alignment = runtime_wrapper.GetInstance()->GetImagePitchAlignment();
MS_ASSERT(alignment);
size_t row_pitch = UP_ROUND(width, alignment) * FLT4_size;
return row_pitch;
}
int AlignAxis(int oriAxis) const {
if (NDim == 0 || NDim == 1) {
return 3;
}
int no_neg_axis = static_cast<int>((oriAxis + NDim) % NDim);
if (no_neg_axis == 0) {
return 0;
}
return static_cast<int>(no_neg_axis + 4 - NDim);
}
bool IsImageSizeValid() { return width > 0 && height > 0; }
size_t N{1};
size_t H{1};
size_t W{1};
size_t C{1};
size_t Slice{};
size_t width{};
size_t height{};
size_t FLT_size{4};
size_t FLT4_size{16};
size_t ElementsNum{};
size_t ElementsC4Num{};
size_t OriginSize{};
size_t Image2DSize{};
size_t NDim{};
};
struct BaseTuningParameter {
std::vector<size_t> local_size;
friend std::ostream &operator<<(std::ostream &ostrm, const BaseTuningParameter &a) {
ostrm << "LocalSize:";
for (auto i : a.local_size) {
ostrm << i << ",";
}
return ostrm;
}
};
class OpenCLKernel : public InnerKernel {
public:
OpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx)
: InnerKernel(parameter, inputs, outputs, ctx) {
ocl_runtime_ = ocl_runtime_wrap_.GetInstance();
}
~OpenCLKernel() override = default;
void AlignGlobalLocal(const std::vector<size_t> &global, const std::vector<size_t> &local);
int Prepare() override { return RET_OK; }
int PreProcess() override;
int ReSize() override;
int Run() override { return RET_ERROR; }
virtual int CheckSpecs();
virtual int InitWeights() { return RET_OK; }
virtual int SetConstArgs() { return RET_OK; }
virtual void SetGlobalLocal() {}
virtual int GetGlobalSize(size_t idx, std::vector<size_t> *global_size) { return RET_ERROR; }
virtual int GetLocalSize(size_t idx, const std::vector<size_t> &global_size, std::vector<size_t> *local_size) {
return RET_ERROR;
}
virtual std::vector<BaseTuningParameter> GenerateTuningParam();
virtual int AssignTuningParam(const BaseTuningParameter ¶m);
virtual int Tune();
virtual int StoreConstData() { return RET_OK; }
int GetImageSize(size_t idx, lite::opencl::ImageSize *img_size);
void PrintOutput(int print_num = 10, const std::string &out_file = "");
lite::opencl::MemType GetMemType() { return out_mem_type_; }
void SetMemType(lite::opencl::MemType mem_type) { out_mem_type_ = mem_type; }
OpParameter *GetParameter() { return op_parameter_; }
virtual double GetProfilingTimeMs();
virtual int InferShape();
protected:
static std::set<size_t> GenerateLocalByGlobal(size_t global_i);
virtual std::string Key() {
std::string key = schema::EnumNamePrimitiveType(type());
key += "_global";
for (auto i : global_size_) {
key += "_" + std::to_string(i);
}
return key;
}
protected:
lite::opencl::OpenCLRuntime *ocl_runtime_;
lite::opencl::MemType out_mem_type_{lite::opencl::MemType::IMG};
cl::NDRange global_range_{cl::NullRange};
cl::NDRange local_range_{cl::NullRange};
std::vector<size_t> global_size_;
std::vector<size_t> local_size_;
cl::Kernel kernel_;
cl::Event event_;
void *restore_quant_data_{nullptr};
bool dequant_flag_{false};
private:
lite::opencl::OpenCLRuntimeInnerWrapper ocl_runtime_wrap_;
static inline std::map<std::string, BaseTuningParameter> tuned_param_cache_;
};
template <class T>
kernel::InnerKernel *OpenCLKernelCreator(const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter,
const lite::Context *ctx, const kernel::KernelKey &desc) {
auto *kernel = new (std::nothrow)
T(reinterpret_cast<OpParameter *>(opParameter), inputs, outputs, static_cast<const lite::InnerContext *>(ctx));
if (kernel == nullptr) {
MS_LOG(ERROR) << "kernel " << opParameter->name_ << "is nullptr.";
free(opParameter);
return nullptr;
}
auto shape = outputs.front()->shape();
if (std::find(shape.begin(), shape.end(), -1) != shape.end()) {
MS_LOG(WARNING) << "kernel " << opParameter->name_ << "don't infer shape yet!";
return kernel;
}
if (std::find(shape.begin(), shape.end(), 0) != shape.end()) {
MS_LOG(WARNING) << "kernel " << opParameter->name_ << "don't support output shape has zero.";
return nullptr;
}
auto ret = kernel->CheckSpecs();
if (ret != mindspore::lite::RET_OK) {
MS_LOG(ERROR) << "Check " << opParameter->name_ << " specification failed!";
delete kernel;
return nullptr;
}
ret = kernel->OpenCLKernel::CheckSpecs();
if (ret != mindspore::lite::RET_OK) {
MS_LOG(ERROR) << "Check " << opParameter->name_ << " specification failed!";
delete kernel;
return nullptr;
}
ret = reinterpret_cast<OpenCLKernel *>(kernel)->StoreConstData();
if (ret != mindspore::lite::RET_OK) {
MS_LOG(ERROR) << "Store " << opParameter->name_ << " const data failed!";
delete kernel;
return nullptr;
}
return kernel;
}
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_OPENCL_KERNEL_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/kernel_compiler/cpu/rank_cpu_kernel.h
|
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RANK_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RANK_CPU_KERNEL_H_
#include <vector>
#include <string>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
#include "backend/kernel_compiler/cpu/nnacl/op_base.h"
namespace mindspore {
namespace kernel {
namespace rank {
enum Method : int {
Average,
Max,
Min,
First,
Dense,
MethodNotDefined,
};
enum NaOption : int {
Keep,
Top,
Bottom,
OptionNotDefined,
};
} // namespace rank
template <typename T>
class RankCpuKernel : public CPUKernel {
public:
RankCpuKernel() = default;
~RankCpuKernel() override = default;
void InitKernel(const CNodePtr &kernel_node) override;
void InitInputOutputSize(const CNodePtr &kernel_node) override;
void SetFunc();
void Launch1DInt(const T *input_addr, size_t *sort_idx, T *values, const AxisIterator &iter,
float *output_addr) const;
void Launch1DFloat(const T *input_addr, size_t *sort_idx, T *values, bool *is_nan, const AxisIterator &iter,
float *output_addr) const;
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace,
const std::vector<AddressPtr> &outputs) override;
private:
// shape info
AxisIterator axisIterator_{};
// parameters
size_t axis_{0};
rank::Method method_{rank::MethodNotDefined};
std::function<void(size_t, int, int, const AxisIterator &, const size_t *const, float *const)> func_;
rank::NaOption option_{rank::OptionNotDefined};
bool ascending_{true};
bool pct_{false};
};
MS_REG_CPU_KERNEL_T(Rank, KernelAttr().AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32),
RankCpuKernel, float)
MS_REG_CPU_KERNEL_T(Rank, KernelAttr().AddInputAttr(kNumberTypeFloat64).AddOutputAttr(kNumberTypeFloat32),
RankCpuKernel, double)
MS_REG_CPU_KERNEL_T(Rank, KernelAttr().AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat32), RankCpuKernel,
int32_t)
MS_REG_CPU_KERNEL_T(Rank, KernelAttr().AddInputAttr(kNumberTypeInt64).AddOutputAttr(kNumberTypeFloat32), RankCpuKernel,
int64_t)
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RANK_CPU_KERNEL_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/kernel_compiler/cpu/rl/buffer_sample_cpu_kernel.h
|
<reponame>mindspore-ai/mindspore
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_BUFFER_SAMPLE_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_BUFFER_SAMPLE_CPU_KERNEL_H_
#include <stdlib.h>
#include <memory>
#include <string>
#include <vector>
#include <algorithm>
#include <random>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
namespace mindspore {
namespace kernel {
class BufferCPUSampleKernel : public CPUKernel {
public:
BufferCPUSampleKernel() : element_nums_(0), capacity_(0), batch_size_(0), exp_size_(0), seed_(0), unique_(false) {}
~BufferCPUSampleKernel() override = default;
void Init(const CNodePtr &kernel_node) {
auto shapes = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, "buffer_elements");
auto types = AnfAlgo::GetNodeAttr<std::vector<TypePtr>>(kernel_node, "buffer_dtype");
capacity_ = AnfAlgo::GetNodeAttr<int64_t>(kernel_node, "capacity");
seed_ = AnfAlgo::GetNodeAttr<int64_t>(kernel_node, "seed");
unique_ = AnfAlgo::GetNodeAttr<bool>(kernel_node, "unique");
batch_size_ = LongToSize(AnfAlgo::GetNodeAttr<int64_t>(kernel_node, "batch_size"));
element_nums_ = shapes.size();
for (size_t i = 0; i < element_nums_; i++) {
exp_element_list.push_back(shapes[i] * UnitSizeInBytes(types[i]->type_id()));
}
// init seed for random_shuffle and uniform distribution
if (seed_ == 0) {
std::srand(time(nullptr));
generator_.seed(time(nullptr));
} else {
std::srand(seed_);
generator_.seed(seed_);
}
// buffer size
for (auto i : exp_element_list) {
input_size_list_.push_back(i * capacity_);
output_size_list_.push_back(i * batch_size_);
exp_size_ += i;
}
// count and head
input_size_list_.push_back(sizeof(int));
input_size_list_.push_back(sizeof(int));
}
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs) {
auto count_addr = GetDeviceAddress<int>(inputs, element_nums_);
auto head_addr = GetDeviceAddress<int>(inputs, element_nums_ + 1);
if ((head_addr[0] > 0 && SizeToLong(batch_size_) > capacity_) ||
(head_addr[0] == 0 && SizeToLong(batch_size_) > count_addr[0])) {
MS_LOG(ERROR) << "The batch size " << batch_size_ << " is larger than total buffer size "
<< std::min(capacity_, IntToLong(count_addr[0]));
}
// Generate random indexes
// If unique_ == true, use random_shuffle to guarantee the index in generated indexes is unique.
// If unique_ == false, use a uniform distribution to generate the indexes. Some of the indexes may be repeated.
// Case unique_ == false has a better perfomace than case unique_ == true.
std::vector<size_t> indexes;
if (unique_) {
for (size_t i = 0; i < IntToSize(count_addr[0]); ++i) {
(void)indexes.emplace_back(i);
}
random_shuffle(indexes.begin(), indexes.end(), [&](int i) { return std::rand() % i; });
} else {
std::uniform_int_distribution<> distrib(0, count_addr[0]);
for (size_t i = 0; i < batch_size_; ++i) {
(void)indexes.emplace_back(distrib(generator_));
}
}
auto task = [&](size_t start, size_t end) {
for (size_t j = start; j < end; j++) {
size_t index = indexes[j];
for (size_t i = 0; i < element_nums_; i++) {
auto buffer_addr = GetDeviceAddress<unsigned char>(inputs, i);
auto output_addr = GetDeviceAddress<unsigned char>(outputs, i);
auto one_exp_len = exp_element_list[i];
size_t dist_len = one_exp_len;
if (memcpy_s(output_addr + j * one_exp_len, one_exp_len, buffer_addr + index * one_exp_len, dist_len) !=
EOK) {
MS_LOG(EXCEPTION) << "Launch kernel error: memcpy failed";
}
}
}
};
CPUKernelUtils::ParallelFor(task, batch_size_);
return true;
}
void InitKernel(const CNodePtr &kernel_node) { return; }
protected:
void InitSizeLists() { return; }
private:
size_t element_nums_;
int64_t capacity_;
size_t batch_size_;
int64_t exp_size_;
int64_t seed_;
bool unique_;
std::mt19937 generator_;
std::vector<size_t> exp_element_list;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_BUFFER_SAMPLE_CPU_KERNEL_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/transform/graph_ir/op_declare/elewise_calculation_ops_declare.h
|
<reponame>mindspore-ai/mindspore
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_ELEWISE_CALCULATION_OPS_DECLARE_H_
#define MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_ELEWISE_CALCULATION_OPS_DECLARE_H_
#include <string>
#include <unordered_map>
#include "transform/graph_ir/op_declare/op_declare_macro.h"
#include "ops/elewise_calculation_ops.h"
namespace mindspore::transform {
DECLARE_OP_ADAPTER(AccumulateNV2)
DECLARE_OP_USE_DYN_INPUT(AccumulateNV2)
DECLARE_OP_USE_OUTPUT(AccumulateNV2)
DECLARE_OP_ADAPTER(ConfusionMulGrad)
DECLARE_OP_USE_OUTPUT(ConfusionMulGrad)
DECLARE_OP_ADAPTER(FakeQuantWithMinMaxVars)
DECLARE_OP_USE_OUTPUT(FakeQuantWithMinMaxVars)
DECLARE_OP_ADAPTER(FakeQuantWithMinMaxVarsGradient)
DECLARE_OP_USE_OUTPUT(FakeQuantWithMinMaxVarsGradient)
DECLARE_OP_ADAPTER(FakeQuantWithMinMaxVarsPerChannel)
DECLARE_OP_USE_OUTPUT(FakeQuantWithMinMaxVarsPerChannel)
DECLARE_OP_ADAPTER(FakeQuantWithMinMaxVarsPerChannelGradient)
DECLARE_OP_USE_OUTPUT(FakeQuantWithMinMaxVarsPerChannelGradient)
DECLARE_OP_ADAPTER(GreaterEqual)
DECLARE_OP_USE_OUTPUT(GreaterEqual)
DECLARE_OP_ADAPTER(AssignAdd)
DECLARE_OP_USE_OUTPUT(AssignAdd)
DECLARE_OP_ADAPTER(AssignSub)
DECLARE_OP_USE_OUTPUT(AssignSub)
DECLARE_OP_ADAPTER(ZerosLike)
DECLARE_OP_USE_OUTPUT(ZerosLike)
DECLARE_OP_ADAPTER(OnesLike)
DECLARE_OP_USE_OUTPUT(OnesLike)
DECLARE_OP_ADAPTER(ArgMaxD)
DECLARE_OP_USE_OUTPUT(ArgMaxD)
DECLARE_OP_ADAPTER(ArgMinD)
DECLARE_OP_USE_OUTPUT(ArgMinD)
DECLARE_OP_ADAPTER(ArgMaxWithValue)
DECLARE_OP_USE_OUTPUT(ArgMaxWithValue)
DECLARE_OP_ADAPTER(ArgMinWithValue)
DECLARE_OP_USE_OUTPUT(ArgMinWithValue)
DECLARE_OP_ADAPTER(Mul)
DECLARE_OP_USE_OUTPUT(Mul)
DECLARE_OP_ADAPTER(MulNoNan)
DECLARE_OP_USE_OUTPUT(MulNoNan)
DECLARE_OP_ADAPTER(AddN)
DECLARE_OP_USE_DYN_INPUT(AddN)
DECLARE_OP_USE_OUTPUT(AddN)
DECLARE_OP_ADAPTER(Less)
DECLARE_OP_USE_OUTPUT(Less)
DECLARE_OP_ADAPTER(Rsqrt)
DECLARE_OP_USE_OUTPUT(Rsqrt)
DECLARE_OP_ADAPTER(Sqrt)
DECLARE_OP_USE_OUTPUT(Sqrt)
DECLARE_OP_ADAPTER(Square)
DECLARE_OP_USE_OUTPUT(Square)
DECLARE_OP_ADAPTER(SquaredDifference)
DECLARE_OP_USE_OUTPUT(SquaredDifference)
DECLARE_OP_ADAPTER(SquareSumAll)
DECLARE_OP_USE_OUTPUT(SquareSumAll)
DECLARE_OP_ADAPTER(Maximum)
DECLARE_OP_USE_OUTPUT(Maximum)
DECLARE_OP_ADAPTER(Minimum)
DECLARE_OP_USE_OUTPUT(Minimum)
DECLARE_OP_ADAPTER(MaximumGrad)
DECLARE_OP_USE_OUTPUT(MaximumGrad)
DECLARE_OP_ADAPTER(MinimumGrad)
DECLARE_OP_USE_OUTPUT(MinimumGrad)
DECLARE_OP_ADAPTER(RealDiv)
DECLARE_OP_USE_OUTPUT(RealDiv)
DECLARE_OP_ADAPTER(BitwiseAnd)
DECLARE_OP_USE_OUTPUT(BitwiseAnd)
DECLARE_OP_ADAPTER(BitwiseOr)
DECLARE_OP_USE_OUTPUT(BitwiseOr)
DECLARE_OP_ADAPTER(BitwiseXor)
DECLARE_OP_USE_OUTPUT(BitwiseXor)
DECLARE_OP_ADAPTER(Rint)
DECLARE_OP_USE_OUTPUT(Rint)
DECLARE_OP_ADAPTER(BesselI0e)
DECLARE_OP_USE_OUTPUT(BesselI0e)
DECLARE_OP_ADAPTER(BesselI1e)
DECLARE_OP_USE_OUTPUT(BesselI1e)
DECLARE_OP_ADAPTER(Inv)
DECLARE_OP_USE_OUTPUT(Inv)
DECLARE_OP_ADAPTER(InvGrad)
DECLARE_OP_USE_OUTPUT(InvGrad)
DECLARE_OP_ADAPTER(Invert)
DECLARE_OP_USE_OUTPUT(Invert)
DECLARE_OP_ADAPTER(Log1p)
DECLARE_OP_USE_OUTPUT(Log1p)
DECLARE_OP_ADAPTER(Ceil)
DECLARE_OP_USE_OUTPUT(Ceil)
DECLARE_OP_ADAPTER(CosineEmbeddingLoss)
DECLARE_OP_USE_OUTPUT(CosineEmbeddingLoss)
DECLARE_OP_ADAPTER(Xdivy)
DECLARE_OP_USE_OUTPUT(Xdivy)
DECLARE_OP_ADAPTER(Mod)
DECLARE_OP_USE_OUTPUT(Mod)
DECLARE_OP_ADAPTER(Cast)
DECLARE_OP_USE_INPUT_ATTR(Cast)
DECLARE_OP_USE_OUTPUT(Cast)
DECLARE_OP_ADAPTER(Reciprocal)
DECLARE_OP_USE_OUTPUT(Reciprocal)
DECLARE_OP_ADAPTER(Neg)
DECLARE_OP_USE_OUTPUT(Neg)
DECLARE_OP_ADAPTER(Sub)
DECLARE_OP_USE_OUTPUT(Sub)
DECLARE_OP_ADAPTER(Pow)
DECLARE_OP_USE_OUTPUT(Pow)
DECLARE_OP_ADAPTER(PopulationCount)
DECLARE_OP_USE_OUTPUT(PopulationCount)
DECLARE_OP_ADAPTER(Equal)
DECLARE_OP_USE_OUTPUT(Equal)
DECLARE_OP_ADAPTER(ApproximateEqual)
DECLARE_OP_USE_OUTPUT(ApproximateEqual)
DECLARE_OP_ADAPTER(NotEqual)
DECLARE_OP_USE_OUTPUT(NotEqual)
DECLARE_OP_ADAPTER(Log)
DECLARE_OP_USE_OUTPUT(Log)
DECLARE_OP_ADAPTER(LogicalAnd)
DECLARE_OP_USE_OUTPUT(LogicalAnd)
DECLARE_OP_ADAPTER(LogicalOr)
DECLARE_OP_USE_OUTPUT(LogicalOr)
DECLARE_OP_ADAPTER(LogicalNot)
DECLARE_OP_USE_OUTPUT(LogicalNot)
DECLARE_OP_ADAPTER(LessEqual)
DECLARE_OP_USE_OUTPUT(LessEqual)
DECLARE_OP_ADAPTER(Assign)
DECLARE_OP_USE_OUTPUT(Assign)
DECLARE_OP_ADAPTER(Add)
DECLARE_OP_USE_OUTPUT(Add)
DECLARE_OP_ADAPTER(Cos)
DECLARE_OP_USE_OUTPUT(Cos)
DECLARE_OP_ADAPTER(Cosh)
DECLARE_OP_USE_OUTPUT(Cosh)
DECLARE_OP_ADAPTER(Acos)
DECLARE_OP_USE_OUTPUT(Acos)
DECLARE_OP_ADAPTER(AcosGrad)
DECLARE_OP_USE_OUTPUT(AcosGrad)
DECLARE_OP_ADAPTER(Acosh)
DECLARE_OP_USE_OUTPUT(Acosh)
DECLARE_OP_ADAPTER(AcoshGrad)
DECLARE_OP_USE_OUTPUT(AcoshGrad)
DECLARE_OP_ADAPTER(Div)
DECLARE_OP_USE_OUTPUT(Div)
DECLARE_OP_ADAPTER(TruncateDiv)
DECLARE_OP_USE_OUTPUT(TruncateDiv)
DECLARE_OP_ADAPTER(TruncateMod)
DECLARE_OP_USE_OUTPUT(TruncateMod)
DECLARE_OP_ADAPTER(Xlogy)
DECLARE_OP_USE_OUTPUT(Xlogy)
DECLARE_OP_ADAPTER(DivNoNan)
DECLARE_OP_USE_OUTPUT(DivNoNan)
DECLARE_OP_ADAPTER(Floor)
DECLARE_OP_USE_OUTPUT(Floor)
DECLARE_OP_ADAPTER(FloorDiv)
DECLARE_OP_USE_OUTPUT(FloorDiv)
DECLARE_OP_ADAPTER(FloorMod)
DECLARE_OP_USE_OUTPUT(FloorMod)
DECLARE_OP_ADAPTER(Sin)
DECLARE_OP_USE_OUTPUT(Sin)
DECLARE_OP_ADAPTER(Sinh)
DECLARE_OP_USE_OUTPUT(Sinh)
DECLARE_OP_ADAPTER(Asin)
DECLARE_OP_USE_OUTPUT(Asin)
DECLARE_OP_ADAPTER(AsinGrad)
DECLARE_OP_USE_OUTPUT(AsinGrad)
DECLARE_OP_ADAPTER(Asinh)
DECLARE_OP_USE_OUTPUT(Asinh)
DECLARE_OP_ADAPTER(AsinhGrad)
DECLARE_OP_USE_OUTPUT(AsinhGrad)
DECLARE_OP_ADAPTER(Exp)
DECLARE_OP_USE_OUTPUT(Exp)
DECLARE_OP_ADAPTER(Expm1)
DECLARE_OP_USE_OUTPUT(Expm1)
DECLARE_OP_ADAPTER(BiasAdd)
DECLARE_OP_USE_OUTPUT(BiasAdd)
DECLARE_OP_ADAPTER(Greater)
DECLARE_OP_USE_OUTPUT(Greater)
DECLARE_OP_ADAPTER(SqrtGrad)
DECLARE_OP_USE_OUTPUT(SqrtGrad)
DECLARE_OP_ADAPTER(ReciprocalGrad)
DECLARE_OP_USE_OUTPUT(ReciprocalGrad)
DECLARE_OP_ADAPTER(RsqrtGrad)
DECLARE_OP_USE_OUTPUT(RsqrtGrad)
DECLARE_OP_ADAPTER(Abs)
DECLARE_OP_USE_OUTPUT(Abs)
DECLARE_OP_ADAPTER(AbsGrad)
DECLARE_OP_USE_OUTPUT(AbsGrad)
DECLARE_OP_ADAPTER(Sign)
DECLARE_OP_USE_OUTPUT(Sign)
DECLARE_OP_ADAPTER(Round)
DECLARE_OP_USE_OUTPUT(Round)
DECLARE_OP_ADAPTER(Tan)
DECLARE_OP_USE_OUTPUT(Tan)
DECLARE_OP_ADAPTER(Atan)
DECLARE_OP_USE_OUTPUT(Atan)
DECLARE_OP_ADAPTER(AtanGrad)
DECLARE_OP_USE_OUTPUT(AtanGrad)
DECLARE_OP_ADAPTER(Atanh)
DECLARE_OP_USE_OUTPUT(Atanh)
DECLARE_OP_ADAPTER(Atan2)
DECLARE_OP_USE_OUTPUT(Atan2)
DECLARE_OP_ADAPTER(LambApplyOptimizerAssign)
DECLARE_OP_USE_OUTPUT(LambApplyOptimizerAssign)
DECLARE_OP_ADAPTER(LambApplyWeightAssign)
DECLARE_OP_USE_OUTPUT(LambApplyWeightAssign)
DECLARE_OP_ADAPTER(Eltwise)
DECLARE_OP_USE_OUTPUT(Eltwise)
} // namespace mindspore::transform
#endif // MINDSPORE_CCSRC_TRANSFORM_GRAPH_IR_OP_DECLARE_ELEWISE_CALCULATION_OPS_DECLARE_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/vm/transform.h
|
<filename>mindspore/ccsrc/vm/transform.h<gh_stars>1000+
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_VM_TRANSFORM_H_
#define MINDSPORE_CCSRC_VM_TRANSFORM_H_
#include <string>
#include <memory>
#include <functional>
#include <utility>
#include <unordered_map>
#include <vector>
#include "vm/vm.h"
#include "ir/anf.h"
#include "frontend/operator/ops.h"
#include "vm/segment_runner.h"
#include "vm/backend.h"
#include "vm/graph_partition.h"
// mindspore namespace is the top level namespace of MindSpore project.
// Other namespace should be a sub namespace of mindspore namespace in the ME project.
namespace mindspore {
extern const char kMsVm[];
extern const char kGeVm[];
// compile namespace
// A sub namespace in ME to support compile related definition.
namespace compile {
extern std::vector<PrimitivePtr> nonlinear_ops;
extern std::vector<PrimitivePtr> control_ops;
const std::vector<PrimitivePtr> &GetMsNonlinearOps();
FuncGraphPtr WrapPrimitives(const FuncGraphPtr &graph);
using VmEvalFunc = std::function<BaseRef(const VectorRef &)>;
using VmEvalFuncPtr = std::shared_ptr<std::function<BaseRef(const VectorRef &)>>;
class CompileGraph {
public:
explicit CompileGraph(const BackendPtr &backend, const std::vector<PrimitivePtr> &cut_list = nonlinear_ops);
virtual ~CompileGraph() = default;
InstSet Run(const FuncGraphPtr &func_graph);
bool IsCut(const AnfNodePtr &node);
void Push(const AnfNodePtr &node);
void Tie(const AnfNodePtr &n1, const AnfNodePtr &n2) { slots_[n2] = slots_[n1]; }
void Ret(int64_t nargs);
virtual int64_t Ref(const AnfNodePtr &node);
void set_height(int64_t h) {
height_ = h;
if (height_ > max_height_) {
max_height_ = height_;
}
}
void Reset() {
height_ = 0;
max_height_ = 0;
slots_.clear();
inst_.clear();
}
protected:
virtual void PushParameters(const FuncGraphPtr &func_graph);
bool Compile(const FuncGraphPtr &func_graph);
int64_t LinConvert(const FuncGraphPtr &func_graph, const GraphSegmentPtr &segment, const std::string &target = "");
int64_t InterpretNode(const FuncGraphPtr &func_graph, const CNodePtr &node);
virtual int64_t AddCall(const FuncGraphPtr &graph, const CNodePtr &node);
void AddPadStack(int64_t param_height);
void AddTailCall(const AnfNodePtr &fn, size_t size);
virtual void AddPartial(const CNodePtr &node);
void AddMakeTuple(const CNodePtr &node);
void AddSwitch(const CNodePtr &node);
void AddSwitchLayer(const CNodePtr &node);
void AddReturn(const CNodePtr &node);
void AddPrimitive(const CNodePtr &node, const PrimitivePtr &prim);
virtual void AddInput(const AnfNodePtr &node);
virtual void AddExternal(const LinConvertResult &result);
void AddInst(const Instruction &inst, const int64_t &arg);
void AddInst(const Instruction &inst, const ValuePtr &arg);
void AddInst(const Instruction &inst, const VectorRef &args);
BackendPtr backend_;
GraphPartitionPtr graph_partition_;
LinkFuncType lin_convert_;
int64_t height_{0};
int64_t max_height_{0};
std::unordered_map<AnfNodePtr, int64_t> slots_;
InstSet inst_;
};
using CompileGraphPtr = std::shared_ptr<CompileGraph>;
// CompileGraphs is used to Convert a graph cluster into instruction lists.
class CompileGraphs {
public:
explicit CompileGraphs(const BackendPtr &backend, const std::vector<PrimitivePtr> &cut_list = nonlinear_ops);
virtual ~CompileGraphs() = default;
void Reset() {
insts_.clear();
mapping_.clear();
}
void Compile(const FuncGraphPtr &func_graph);
FinalVMPtr Link();
FinalVMPtr CompileAndLink(const FuncGraphPtr &func_graph);
protected:
InstSet insts_;
std::unordered_map<FuncGraphPtr, int64_t> mapping_;
CompileGraphPtr transform_;
BackendPtr backend_;
};
BackendPtr CreateBackend();
// Set mindRT whether enable. GPU and CPU use mindRT currently, and other hardwares will use it in the future.
void SetMindRTEnable();
} // namespace compile
} // namespace mindspore
#endif // MINDSPORE_CCSRC_VM_TRANSFORM_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/runtime/framework/actor/control_flow/entrance_actor.h
|
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_ENTRANCE_ACTOR_H_
#define MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_ENTRANCE_ACTOR_H_
#include <vector>
#include <string>
#include <memory>
#include <unordered_map>
#include <stack>
#include <queue>
#include "runtime/framework/actor/actor_common.h"
#include "runtime/framework/actor/abstract_actor.h"
namespace mindspore {
namespace runtime {
// Entrance actor is used in the control flow to receive a set of result arrow and a branch id and then send
// the data to the corresponding actor. It is the entry point for subgraph execution.
class EntranceActor : public AbstractActor {
public:
EntranceActor(const std::string &name, const std::vector<AnfNodePtr> ¶meters)
: AbstractActor(name, KernelTransformType::kEntranceActor, nullptr), formal_parameters_(parameters) {
device_contexts_.resize(parameters.size());
}
~EntranceActor() override = default;
void Init() override;
// The entrance actor run when receive the real parameter nodes and branch id.
void CollectRealParametersAndBranchId(const std::vector<KernelWithIndex> &real_parameters, int branch_id,
OpContext<DeviceTensor> *const context);
protected:
void Run(OpContext<DeviceTensor> *const context) override;
private:
friend class GraphScheduler;
// Formal parameters of actor, which is the front node.
std::vector<KernelWithIndex> formal_parameters_;
// Input data.
std::unordered_map<uuids::uuid *, std::queue<std::vector<KernelWithIndex>>> input_nodes_;
std::unordered_map<uuids::uuid *, std::queue<int>> input_branch_ids_;
std::vector<AID> output_branch_id_arrows_;
// The output_data_ corresponds to the output_data_arrows_ one by one.
std::vector<OpData<DeviceTensor> *> output_data_;
bool is_actor_ready_{true};
};
using EntranceActorPtr = std::shared_ptr<EntranceActor>;
} // namespace runtime
} // namespace mindspore
#endif // MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_ENTRANCE_ACTOR_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/kernel_compiler/tbe/ascend_kernel_compile.h
|
<filename>mindspore/ccsrc/backend/kernel_compiler/tbe/ascend_kernel_compile.h<gh_stars>1000+
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_TBE_ASCEND_KERNEL_COMPILE_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_TBE_ASCEND_KERNEL_COMPILE_H_
#include <string>
#include <map>
#include <tuple>
#include <set>
#include <memory>
#include <vector>
#include <utility>
#include "ir/anf.h"
#include "backend/kernel_compiler/kernel.h"
#include "backend/kernel_compiler/kernel_fusion.h"
#include "backend/kernel_compiler/tbe/tbe_kernel_build.h"
#include "backend/kernel_compiler/tbe/tbe_kernel_parallel_build.h"
#include "backend/session/kernel_graph.h"
namespace mindspore {
namespace kernel {
namespace ascend {
using KernelModMap = std::map<int64_t, KernelModPtr>;
struct TargetJobStatus {
int target_job_id;
std::string job_status;
std::string except_msg;
std::string json_name;
};
class AscendKernelCompileManager {
public:
static AscendKernelCompileManager &GetInstance() {
static AscendKernelCompileManager instance;
if (!instance.tbe_init_flag_) {
instance.TbeInitialize();
}
return instance;
}
void TbeInitialize();
void TbeFinalize();
// kernel select
std::string AscendOpSelectFormat(const AnfNodePtr &node);
bool AscendOpCheckSupported(const AnfNodePtr &node);
// pre build
void AscendPreBuild(const std::shared_ptr<session::KernelGraph> &kernel_graph);
// single op compile
bool AscendSingleOpCompile(const std::vector<AnfNodePtr> &anf_nodes);
// fusion op compile
KernelModMap AscendFusionOpCompile(const std::vector<FusionScopeInfo> &fusion_scopes);
// clear prev job's cache
void ResetOldTask();
private:
AscendKernelCompileManager() = default;
~AscendKernelCompileManager();
void GetAllAscendNodes(const std::shared_ptr<session::KernelGraph> &kernel_graph, std::vector<AnfNodePtr> *tbe_nodes);
void QueryFinishJob(const std::string &type);
void ParseTargetJobStatus(const std::string &type, const std::string &job_result, std::vector<int> *success_job);
void QueryPreBuildFinishJob();
void QueryFusionFinishJob(KernelModMap *kernel_mode_ret);
void PrintProcessLog(const nlohmann::json &json, int adjust_log_level);
void JsonAssemble(const std::string &job_type, const nlohmann::json &src_json, nlohmann::json *dst_json);
void PrintInitResult(const nlohmann::json &json);
void PrintCompileResult(const nlohmann::json &json);
std::string OpSelectAndCheckResultProcess(const nlohmann::json &json, const AnfNodePtr &node);
void QueryResultProcess(const nlohmann::json &json, TargetJobStatus *task_info);
nlohmann::json TurnStrToJson(const std::string &str) const;
static bool tbe_init_flag_;
static bool is_tune_flag_;
std::set<std::string> single_processed_kernels_;
std::set<std::string> fusion_processed_kernels_;
std::string op_debug_level_; // if op_debug_level is "1", skip tbe compile cache and rebuild again.
std::shared_ptr<ParallelBuildManager> build_manager_ = nullptr;
std::map<int, nlohmann::json> job_list_;
std::map<int, AnfNodePtr> job_id_to_node_;
std::map<int, std::string> fusion_op_names_;
};
} // namespace ascend
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_TBE_ASCEND_KERNEL_COMPILE_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/ps/core/abstract_node.h
|
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PS_CORE_ABSTRACT_NODE_H_
#define MINDSPORE_CCSRC_PS_CORE_ABSTRACT_NODE_H_
#include <utility>
#include <string>
#include <memory>
#include <map>
#include <vector>
#include <unordered_map>
#include "ps/core/node.h"
#include "ps/core/communicator/message.h"
#include "ps/core/follower_scaler.h"
#include "utils/ms_exception.h"
#include "ps/constants.h"
#include "ps/core/node_info.h"
#include "ps/core/recovery_base.h"
#include "ps/core/communicator/task_executor.h"
#include "ps/core/communicator/communicator_base.h"
namespace mindspore {
namespace ps {
namespace core {
class FollowerScaler;
class AbstractNode : public Node {
public:
AbstractNode()
: heart_beat_thread_(nullptr),
client_to_scheduler_thread_(nullptr),
client_to_scheduler_(nullptr),
server_(nullptr),
server_thread_(nullptr),
worker_num_(-1),
server_num_(-1),
is_current_node_scale_in_(false),
follower_scaler_(nullptr),
node_recovery_(nullptr),
scheduler_ip_(""),
scheduler_port_(0) {}
~AbstractNode() override = default;
typedef void (AbstractNode::*ResponseHandler)(const std::shared_ptr<MessageMeta> &meta, const void *data,
size_t size);
typedef void (AbstractNode::*ServerHandler)(const std::shared_ptr<TcpConnection> &conn,
const std::shared_ptr<MessageMeta> &meta, const Protos &protos,
const void *data, size_t size);
using DataPtr = std::shared_ptr<unsigned char[]>;
using VectorPtr = std::shared_ptr<std::vector<unsigned char>>;
using RequestHandler =
std::function<void(const std::shared_ptr<TcpConnection> &conn, const std::shared_ptr<MessageMeta> &meta,
const DataPtr &data, size_t size)>;
bool Broadcast(const NodeRole &node_role, const DataPtr &message, size_t size, int command,
const uint32_t &timeout = kCommTimeoutInSeconds);
// When the business layer finish scale out, it should call this function
void set_ready_for_scale_out();
// When the business layer finish scale in, it should call this function
void set_ready_for_scale_in();
// Send scale_out_done instructions to the scheduler.
void set_scale_out_done();
// Send scale_in_done instructions to the scheduler.
void set_scale_in_done();
// The worker/server sends the event to the scheduler, and then the scheduler broadcasts this event to all nodes.
void BroadcastEvent(const uint32_t &event);
// Set the callback corresponding to the event.
void RegisterEventCallback(const ClusterEvent &event, const EventCallback &event_cb);
// Set the callback corresponding to the custom event.
void RegisterCustomEventCallback(const uint32_t &event, const EventCallback &event_cb);
bool Send(const NodeRole &node_role, const uint32_t &rank_id, const DataPtr &data, size_t len, int command,
const uint32_t &timeout = kTimeoutInSeconds);
bool Send(const NodeRole &node_role, const std::vector<uint32_t> &rank_ids, const std::vector<DataPtr> &data,
const std::vector<size_t> &lens, int command, const uint32_t &timeout = kTimeoutInSeconds);
bool Send(const NodeRole &node_role, const uint32_t &rank_id, const DataPtr &message, size_t len, int command,
VectorPtr *output, const uint32_t &timeout = kTimeoutInSeconds);
bool Send(const NodeRole &node_role, const std::vector<uint32_t> &rank_ids, const std::vector<DataPtr> &data,
const std::vector<size_t> &data_lens, int command, std::vector<VectorPtr> *output,
const uint32_t &timeout = kTimeoutInSeconds);
uint64_t CollectiveSendAsync(const NodeRole &node_role, const uint32_t &rank_id, const void *data, size_t size);
std::pair<uint32_t, uint64_t> CollectiveReceiveAsync(const NodeRole &node_role, const uint32_t &rank_id,
VectorPtr *output);
bool CollectiveWait(const std::pair<uint32_t, uint64_t> &request_id, const uint32_t &timeout = kCommTimeoutInSeconds);
// Initialize the scaler for server to process before/after scaling operations.
bool InitFollowerScaler();
// Register barriers before scaling operations for server.
void RegisterFollowerScalerBarrierBeforeScaleOut(const std::string &module, const BarrierBeforeScaleOut &barrier);
void RegisterFollowerScalerBarrierBeforeScaleIn(const std::string &module, const BarrierBeforeScaleIn &barrier);
// Register handlers after scaling operations for server.
void RegisterFollowerScalerHandlerAfterScaleOut(const std::string &module, const HandlerAfterScaleOut &handler);
void RegisterFollowerScalerHandlerAfterScaleIn(const std::string &module, const HandlerAfterScaleIn &handler);
int32_t worker_num() const;
int32_t server_num() const;
void set_worker_num(const int32_t &worker_num);
void set_server_num(const int32_t &server_num);
std::string scheduler_ip() const;
void set_scheduler_ip(const std::string &scheduler_ip);
uint16_t scheduler_port() const;
void set_scheduler_port(const uint16_t &scheduler_port);
ClusterState cluster_state() const;
void set_handler(const RequestHandler &handler);
void Response(const std::shared_ptr<TcpConnection> &conn, const std::shared_ptr<MessageMeta> &meta, const void *data,
size_t size);
std::shared_ptr<CommunicatorBase> GetOrCreateHttpComm(const std::string &ip, uint16_t port,
const std::shared_ptr<TaskExecutor> &task_executor);
std::shared_ptr<CommunicatorBase> GetOrCreateTcpComm(const std::string &scheduler_ip, std::int16_t scheduler_port,
uint32_t worker_num, uint32_t server_num,
const std::shared_ptr<TaskExecutor> &task_executor);
protected:
void Register(const std::shared_ptr<TcpClient> &client);
bool Heartbeat(const std::shared_ptr<TcpClient> &client);
void FetchServers(const std::shared_ptr<TcpClient> &client);
void ProcessRegisterResp(const std::shared_ptr<MessageMeta> &meta, const void *data, size_t size);
void ProcessHeartbeatResp(const std::shared_ptr<MessageMeta> &meta, const void *data, size_t size);
void ProcessFetchServersResp(const std::shared_ptr<MessageMeta> &meta, const void *data, size_t size);
void ProcessSendMetadata(const std::shared_ptr<TcpConnection> &conn, const std::shared_ptr<MessageMeta> &meta,
const Protos &protos, const void *data, size_t size);
void ProcessFinish(const std::shared_ptr<TcpConnection> &conn, const std::shared_ptr<MessageMeta> &meta,
const Protos &protos, const void *data, size_t size);
void ProcessScaleOut(const std::shared_ptr<TcpConnection> &conn, const std::shared_ptr<MessageMeta> &meta,
const Protos &protos, const void *data, size_t size);
void ProcessScaleIn(const std::shared_ptr<TcpConnection> &conn, const std::shared_ptr<MessageMeta> &meta,
const Protos &protos, const void *data, size_t size);
// The worker/server processes the scale_out_done message from scheduelr
void ProcessScaleOutDone(const std::shared_ptr<TcpConnection> &conn, const std::shared_ptr<MessageMeta> &meta,
const Protos &protos, const void *data, size_t size);
// The worker/server processes the scale_in_done message from scheduelr
void ProcessScaleInDone(const std::shared_ptr<TcpConnection> &conn, const std::shared_ptr<MessageMeta> &meta,
const Protos &protos, const void *data, size_t size);
// The worker/server processes the SEND_EVENT message from scheduelr
void ProcessEvent(const std::shared_ptr<TcpConnection> &conn, const std::shared_ptr<MessageMeta> &meta,
const Protos &protos, const void *data, size_t size);
void StartHeartbeatTimer(const std::shared_ptr<TcpClient> &client);
void UpdateSchedulerTime();
bool CheckSchedulerTimeout() const;
bool Disconnect(const std::shared_ptr<TcpClient> &client, const uint32_t &timeout);
bool WaitForDisconnect(const uint32_t &timeout);
bool InitClientToScheduler();
const std::shared_ptr<TcpClient> &GetOrCreateTcpClient(const uint32_t &rank_id,
const NodeRole &role = NodeRole::SERVER);
bool SendMessageSync(const std::shared_ptr<TcpClient> &client, const CommMessage &message,
const uint32_t &timeout = kCommTimeoutInSeconds);
bool SendMessageSync(const std::shared_ptr<TcpClient> &client, const std::shared_ptr<MessageMeta> &meta,
const Protos &, const void *, size_t size, const uint32_t &timeout = kCommTimeoutInSeconds);
uint64_t SendMessageAsync(const std::shared_ptr<TcpClient> &client, const std::shared_ptr<MessageMeta> &meta,
const Protos &protos, const void *data, size_t size);
void ProcessCollectiveSendData(const std::shared_ptr<TcpConnection> &conn, const std::shared_ptr<MessageMeta> &meta,
const void *data, size_t size);
void ProcessSendData(const std::shared_ptr<TcpConnection> &conn, const std::shared_ptr<MessageMeta> &meta,
const Protos &protos, const void *data, size_t size);
void NotifyMessageArrival(const std::shared_ptr<MessageMeta> &meta);
void RunReceiveCallback(const std::shared_ptr<MessageMeta> &meta, const Protos &protos, const void *data,
size_t size);
uint64_t NextExpectedRankRequestId(const uint32_t &rank_id);
uint64_t NextActualRankRequestId(const uint32_t &rank_id);
void InitCommandHandler();
void InitServerHandler();
// when initializing the node, should initializing the node info.
void InitNodeInfo(const NodeRole &role);
// Initialize worker num and server num by cluster config.
void InitNodeNum();
// Node recover by cluster config.
bool Recover();
// Trigger the callback corresponding to the event.
void OnEventCallback(const ClusterEvent &event);
// Trigger the callback corresponding to the custom event.
void OnCustomEventCallback(const uint32_t &event);
bool IsWorkerOrServer0(const std::unordered_map<std::string, NodeInfo> &info);
void CreateTcpServer();
std::unique_ptr<std::thread> heart_beat_thread_;
std::unique_ptr<std::thread> client_to_scheduler_thread_;
std::shared_ptr<TcpClient> client_to_scheduler_;
// the key is: <node_role,rank_id>, the value is: <ip, port>
std::map<std::pair<NodeRole, uint32_t>, std::pair<std::string, uint16_t>> nodes_address_;
// the map's key is: rank_id
std::map<std::pair<NodeRole, uint32_t>, std::shared_ptr<TcpClient>> connected_nodes_;
// the key is <rank_id, rank_request_id>
std::map<std::pair<uint32_t, uint64_t>, std::shared_ptr<std::vector<unsigned char>>> received_data_;
std::mutex receive_callbacks_mutex_;
// the key is <rank_id, rank_request_id>
std::map<std::pair<uint32_t, uint64_t>, MessageCallback> receive_callbacks_;
std::condition_variable receive_cond_;
// the key is rank_id, the value is rank_id's expected request_id
std::unordered_map<uint32_t, uint64_t> expected_rank_request_ids_;
// the key is rank_id, the value is rank_id's actual request_id
std::unordered_map<uint32_t, uint64_t> actual_rank_request_ids_;
std::mutex rank_request_ids_mutex;
timeval scheduler_time_{0, 0};
std::unordered_map<NodeCommand, ResponseHandler> handlers_;
std::unordered_map<NodeCommand, ServerHandler> server_handler_;
// Workers and servers launch the server to process command: FINISH,SCALE_OUT,SCALE_IN,SEND_METADATA
std::shared_ptr<TcpServer> server_;
std::unique_ptr<std::thread> server_thread_;
int32_t worker_num_;
int32_t server_num_;
// Identify whether the current node is a scale in node.
std::atomic<bool> is_current_node_scale_in_;
// Each ClusterEvent corresponds to a EventCallback to process the event.
std::map<ClusterEvent, EventCallback> event_to_callback_;
// Each custom event corresponds to a EventCallback to process the event.
// This event is sent to the scheduler, and then the scheduler broadcasts this event to all nodes.
// for example:
// In order to ensure the consistency of the cluster, the server broadcasts an iteration_end event to notify all other
// nodes to modify the iteration status
std::map<uint32_t, EventCallback> custom_event_to_callback_;
// Scaler for worker/server node.
std::unique_ptr<FollowerScaler> follower_scaler_;
// Recovery for worker/server node.
std::unique_ptr<RecoveryBase> node_recovery_;
// The ip of scheduler.
std::string scheduler_ip_;
// The port of scheduler.
uint16_t scheduler_port_;
// Synchronize all node metadata from the scheduler.
std::unordered_map<std::string, NodeInfo> all_nodes_info_;
RequestHandler request_handler_;
std::unordered_map<std::string, std::shared_ptr<CommunicatorBase>> communicators_;
std::mutex communicator_mutex_;
};
} // namespace core
} // namespace ps
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PS_CORE_ABSTRACT_NODE_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/int8/pack_int8.c
|
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "nnacl/int8/pack_int8.h"
#ifdef ENABLE_ARM32
void PackInputSum16x4PerChannelArm32(const int8_t *input_value, int32_t *input_sum, const int32_t *filter_zp_ptr,
size_t plane_size, size_t input_channel, size_t output_channel) {
size_t hw4 = UP_ROUND(plane_size, C4NUM);
size_t ic16 = UP_ROUND(input_channel, C16NUM);
#ifdef ENABLE_ARM32
size_t oc_div2 = output_channel / C2NUM * C2NUM;
size_t oc_res2 = output_channel - oc_div2;
size_t inputsun_stride = hw4 * C2NUM * 4 - C4NUM * C2NUM * 4;
PreSum4x16Int8Peroc(input_value, input_sum, filter_zp_ptr, hw4, ic16, oc_div2, oc_res2, inputsun_stride);
#else
for (int ri = 0; ri < plane_size; ri++) {
int ri4div = ri / C4NUM, ri4mod = ri % C4NUM;
for (int ci = 0; ci < output_channel; ci++) {
int32_t tmp_sum_value = 0;
int ci2div = ci / C2NUM, ci2mod = ci % C2NUM;
int32_t filter_zp = filter_zp_ptr[ci];
for (int di = 0; di < input_channel; di++) {
size_t di16div = di / C16NUM, di16mod = di % C16NUM;
int src_index = ri4div * C4NUM * ic16 + di16div * C16NUM * C4NUM + ri4mod * C16NUM + di16mod;
tmp_sum_value += input_value[src_index];
}
int dst_index = ci2div * C2NUM * hw4 + ri * C2NUM + ci2mod;
input_sum[dst_index] = tmp_sum_value * filter_zp;
}
}
#endif
return;
}
#endif
void PackInputSum16x4PerChannel(const int8_t *input_value, int32_t *input_sum, const int32_t *filter_zp_ptr,
size_t plane_size, size_t input_channel, size_t output_channel) {
size_t hw4 = UP_ROUND(plane_size, C4NUM);
size_t ic16 = UP_ROUND(input_channel, C16NUM);
#ifdef ENABLE_ARM64
size_t oc_div4 = output_channel / C4NUM * C4NUM;
size_t oc_res4 = output_channel - oc_div4;
size_t inputsun_stride = hw4 * C4NUM * 4 - C4NUM * C4NUM * 4;
PreSum4x16Int8Peroc(input_value, input_sum, filter_zp_ptr, hw4, ic16, oc_div4, oc_res4, inputsun_stride);
#else
for (int ri = 0; ri < plane_size; ri++) {
int ri4div = ri / C4NUM, ri4mod = ri % C4NUM;
for (int ci = 0; ci < output_channel; ci++) {
int32_t tmp_sum_value = 0;
int ci4div = ci / C4NUM, ci4mod = ci % C4NUM;
int32_t filter_zp = filter_zp_ptr[ci];
for (int di = 0; di < input_channel; di++) {
size_t di16div = di / C16NUM, di16mod = di % C16NUM;
int src_index = ri4div * C4NUM * ic16 + di16div * C16NUM * C4NUM + ri4mod * C16NUM + di16mod;
tmp_sum_value += input_value[src_index];
}
int dst_index = ci4div * C4NUM * hw4 + ri * C4NUM + ci4mod;
input_sum[dst_index] = tmp_sum_value * filter_zp;
}
}
#endif
return;
}
void Conv1x1PreOptPeroc(const int8_t *src_input, int8_t *packed_input, int32_t *input_sum, size_t input_channel,
size_t output_channel, size_t plane_size, const int32_t *filter_zp, size_t inputsum_stride) {
int ic4 = UP_ROUND(input_channel, C4NUM);
int oc8 = UP_ROUND(output_channel, C8NUM);
int hw8 = UP_ROUND(plane_size, C8NUM);
size_t hw_8div = plane_size / C8NUM * C8NUM;
size_t oc_8div = output_channel / C8NUM * C8NUM;
size_t oc_8res = output_channel - oc_8div;
size_t ic_4div = input_channel / C4NUM * C4NUM;
const int8_t *src_r = src_input;
int8_t *pack_r = packed_input;
int32_t *input_sum_r = input_sum;
for (int hwi = 0; hwi < hw_8div; hwi += C8NUM) {
const int8_t *src_ic = src_r;
int8_t *pack_ic = pack_r;
int32_t *input_sum_oc = input_sum_r;
#ifdef ENABLE_ARM64
size_t src_stride = input_channel;
size_t ic_4res = input_channel - ic_4div;
size_t input_sum_stride = inputsum_stride * 4 - C8NUM * C8NUM * 4;
asm volatile(
"dup v16.4s, wzr \n"
"dup v17.4s, wzr \n"
"mov x10, %[src_ic] \n"
"mov x11, %[pack_ic] \n"
"mov x0, #0 \n"
"1: \n"
"cmp x0, %[ic_4div] \n"
"add x0, x0, #4\n"
"mov x12, x10 \n"
"add x10, x10, #4\n"
"blt 2f \n"
"cmp %[ic_4res], #0\n"
"beq 6f \n"
"cmp %[ic_4res], #1\n"
"beq 3f \n"
"cmp %[ic_4res], #2\n"
"beq 4f \n"
"cmp %[ic_4res], #3\n"
"beq 5f \n"
"2: \n"
"ld1 {v0.s}[0], [x12], %[src_stride]\n"
"ld1 {v0.s}[1], [x12], %[src_stride]\n"
"ld1 {v0.s}[2], [x12], %[src_stride]\n"
"ld1 {v0.s}[3], [x12], %[src_stride]\n"
"ld1 {v1.s}[0], [x12], %[src_stride]\n"
"ld1 {v1.s}[1], [x12], %[src_stride]\n"
"ld1 {v1.s}[2], [x12], %[src_stride]\n"
"ld1 {v1.s}[3], [x12], %[src_stride]\n"
"st1 {v0.16b}, [x11], #16\n"
"st1 {v1.16b}, [x11], #16\n"
"saddlp v4.8h, v0.16b \n"
"saddlp v5.8h, v1.16b \n"
"saddlp v0.4s, v4.8h \n"
"saddlp v1.4s, v5.8h \n"
"add v16.4s, v16.4s, v0.4s \n"
"add v17.4s, v17.4s, v1.4s \n"
"b 1b \n"
"3: \n" /* col res 1 */
"dup v0.4s, wzr \n"
"dup v1.4s, wzr \n"
"ld1 {v0.b}[0], [x12], %[src_stride]\n"
"ld1 {v0.b}[4], [x12], %[src_stride]\n"
"ld1 {v0.b}[8], [x12], %[src_stride]\n"
"ld1 {v0.b}[12], [x12], %[src_stride]\n"
"ld1 {v1.b}[0], [x12], %[src_stride]\n"
"ld1 {v1.b}[4], [x12], %[src_stride]\n"
"ld1 {v1.b}[8], [x12], %[src_stride]\n"
"ld1 {v1.b}[12], [x12], %[src_stride]\n"
"st1 {v0.16b}, [x11], #16\n"
"st1 {v1.16b}, [x11], #16\n"
"saddlp v4.8h, v0.16b \n"
"saddlp v5.8h, v1.16b \n"
"saddlp v0.4s, v4.8h \n"
"saddlp v1.4s, v5.8h \n"
"add v16.4s, v16.4s, v0.4s \n"
"add v17.4s, v17.4s, v1.4s \n"
"b 6f \n"
"4: \n" /* col res 2 */
"dup v0.4s, wzr \n"
"dup v1.4s, wzr \n"
"ld1 {v0.h}[0], [x12], %[src_stride]\n"
"ld1 {v0.h}[2], [x12], %[src_stride]\n"
"ld1 {v0.h}[4], [x12], %[src_stride]\n"
"ld1 {v0.h}[6], [x12], %[src_stride]\n"
"ld1 {v1.h}[0], [x12], %[src_stride]\n"
"ld1 {v1.h}[2], [x12], %[src_stride]\n"
"ld1 {v1.h}[4], [x12], %[src_stride]\n"
"ld1 {v1.h}[6], [x12], %[src_stride]\n"
"st1 {v0.16b}, [x11], #16\n"
"st1 {v1.16b}, [x11], #16\n"
"saddlp v4.8h, v0.16b \n"
"saddlp v5.8h, v1.16b \n"
"saddlp v0.4s, v4.8h \n"
"saddlp v1.4s, v5.8h \n"
"add v16.4s, v16.4s, v0.4s \n"
"add v17.4s, v17.4s, v1.4s \n"
"b 6f \n"
"5: \n" /* col res 3 */
"dup v0.4s, wzr \n"
"dup v1.4s, wzr \n"
"add x13, x12, #2 \n"
"ld1 {v0.h}[0], [x12], %[src_stride]\n"
"ld1 {v0.b}[2], [x13], %[src_stride]\n"
"ld1 {v0.h}[2], [x12], %[src_stride]\n"
"ld1 {v0.b}[6], [x13], %[src_stride]\n"
"ld1 {v0.h}[4], [x12], %[src_stride]\n"
"ld1 {v0.b}[10], [x13], %[src_stride]\n"
"ld1 {v0.h}[6], [x12], %[src_stride]\n"
"ld1 {v0.b}[14], [x13], %[src_stride]\n"
"ld1 {v1.h}[0], [x12], %[src_stride]\n"
"ld1 {v1.b}[2], [x13], %[src_stride]\n"
"ld1 {v1.h}[2], [x12], %[src_stride]\n"
"ld1 {v1.b}[6], [x13], %[src_stride]\n"
"ld1 {v1.h}[4], [x12], %[src_stride]\n"
"ld1 {v1.b}[10], [x13], %[src_stride]\n"
"ld1 {v1.h}[6], [x12], %[src_stride]\n"
"ld1 {v1.b}[14], [x13], %[src_stride]\n"
"st1 {v0.16b}, [x11], #16\n"
"st1 {v1.16b}, [x11], #16\n"
"saddlp v4.8h, v0.16b \n"
"saddlp v5.8h, v1.16b \n"
"saddlp v0.4s, v4.8h \n"
"saddlp v1.4s, v5.8h \n"
"add v16.4s, v16.4s, v0.4s \n"
"add v17.4s, v17.4s, v1.4s \n"
"b 6f \n"
"6: \n"
"dup v0.4s, v16.s[0] \n"
"dup v1.4s, v16.s[1] \n"
"dup v2.4s, v16.s[2] \n"
"dup v3.4s, v16.s[3] \n"
"dup v4.4s, v17.s[0] \n"
"dup v5.4s, v17.s[1] \n"
"dup v6.4s, v17.s[2] \n"
"dup v7.4s, v17.s[3] \n"
"mov x4, #0 \n"
"mov x10, %[filter_zp] \n"
"mov x11, %[input_sum_oc] \n"
"7: \n"
"cmp x4, %[oc_8div] \n"
"beq 8f \n"
"add x4, x4, #8\n"
"ld1 {v16.4s}, [x10], #16\n"
"ld1 {v17.4s}, [x10], #16\n"
"mul v18.4s, v16.4s, v0.4s \n"
"mul v19.4s, v17.4s, v0.4s \n"
"st1 {v18.4s}, [x11], #16 \n"
"st1 {v19.4s}, [x11], #16 \n"
"mul v20.4s, v16.4s, v1.4s \n"
"mul v21.4s, v17.4s, v1.4s \n"
"st1 {v20.4s}, [x11], #16 \n"
"st1 {v21.4s}, [x11], #16 \n"
"mul v22.4s, v16.4s, v2.4s \n"
"mul v23.4s, v17.4s, v2.4s \n"
"st1 {v22.4s}, [x11], #16 \n"
"st1 {v23.4s}, [x11], #16 \n"
"mul v24.4s, v16.4s, v3.4s \n"
"mul v25.4s, v17.4s, v3.4s \n"
"st1 {v24.4s}, [x11], #16 \n"
"st1 {v25.4s}, [x11], #16 \n"
"mul v18.4s, v16.4s, v4.4s \n"
"mul v19.4s, v17.4s, v4.4s \n"
"st1 {v18.4s}, [x11], #16 \n"
"st1 {v19.4s}, [x11], #16 \n"
"mul v20.4s, v16.4s, v5.4s \n"
"mul v21.4s, v17.4s, v5.4s \n"
"st1 {v20.4s}, [x11], #16 \n"
"st1 {v21.4s}, [x11], #16 \n"
"mul v22.4s, v16.4s, v6.4s \n"
"mul v23.4s, v17.4s, v6.4s \n"
"st1 {v22.4s}, [x11], #16 \n"
"st1 {v23.4s}, [x11], #16 \n"
"mul v24.4s, v16.4s, v7.4s \n"
"mul v25.4s, v17.4s, v7.4s \n"
"st1 {v24.4s}, [x11], #16 \n"
"st1 {v25.4s}, [x11], #16 \n"
"add x11, x11, %[input_sum_stride] \n"
"b 7b \n"
"8: \n"
"cmp %[oc_8res], #0\n"
"beq 17f \n"
"dup v16.4s, wzr \n"
"dup v17.4s, wzr \n"
"cmp %[oc_8res], #1\n"
"beq 9f \n"
"cmp %[oc_8res], #2\n"
"beq 10f \n"
"cmp %[oc_8res], #3\n"
"beq 11f \n"
"cmp %[oc_8res], #4\n"
"beq 12f \n"
"cmp %[oc_8res], #5\n"
"beq 13f \n"
"cmp %[oc_8res], #6\n"
"beq 14f \n"
"cmp %[oc_8res], #7\n"
"beq 15f \n"
"9: \n"
"ld1 {v16.s}[0], [x10] \n"
"b 16f \n"
"10: \n"
"ld1 {v16.d}[0], [x10] \n"
"b 16f \n"
"11: \n"
"ld1 {v16.d}[0], [x10] \n"
"add x10, x10, #8 \n"
"ld1 {v16.s}[2], [x10] \n"
"b 16f \n"
"12: \n"
"ld1 {v16.4s}, [x10] \n"
"b 16f \n"
"13: \n"
"ld1 {v16.4s}, [x10], #16\n"
"ld1 {v17.s}[0], [x10] \n"
"b 16f \n"
"14: \n"
"ld1 {v16.4s}, [x10], #16\n"
"ld1 {v17.d}[0], [x10] \n"
"b 16f \n"
"15: \n"
"ld1 {v16.4s}, [x10], #16\n"
"ld1 {v17.d}[0], [x10] \n"
"add x10, x10, #8 \n"
"ld1 {v17.s}[2], [x10] \n"
"b 16f \n"
"16: \n"
"mul v18.4s, v16.4s, v0.4s \n"
"mul v19.4s, v17.4s, v0.4s \n"
"mul v20.4s, v16.4s, v1.4s \n"
"mul v21.4s, v17.4s, v1.4s \n"
"mul v22.4s, v16.4s, v2.4s \n"
"mul v23.4s, v17.4s, v2.4s \n"
"mul v24.4s, v16.4s, v3.4s \n"
"mul v25.4s, v17.4s, v3.4s \n"
"st1 {v18.4s}, [x11], #16 \n"
"st1 {v19.4s}, [x11], #16 \n"
"st1 {v20.4s}, [x11], #16 \n"
"st1 {v21.4s}, [x11], #16 \n"
"st1 {v22.4s}, [x11], #16 \n"
"st1 {v23.4s}, [x11], #16 \n"
"st1 {v24.4s}, [x11], #16 \n"
"st1 {v25.4s}, [x11], #16 \n"
"mul v18.4s, v16.4s, v4.4s \n"
"mul v19.4s, v17.4s, v4.4s \n"
"mul v20.4s, v16.4s, v5.4s \n"
"mul v21.4s, v17.4s, v5.4s \n"
"mul v22.4s, v16.4s, v6.4s \n"
"mul v23.4s, v17.4s, v6.4s \n"
"mul v24.4s, v16.4s, v7.4s \n"
"mul v25.4s, v17.4s, v7.4s \n"
"st1 {v18.4s}, [x11], #16 \n"
"st1 {v19.4s}, [x11], #16 \n"
"st1 {v20.4s}, [x11], #16 \n"
"st1 {v21.4s}, [x11], #16 \n"
"st1 {v22.4s}, [x11], #16 \n"
"st1 {v23.4s}, [x11], #16 \n"
"st1 {v24.4s}, [x11], #16 \n"
"st1 {v25.4s}, [x11], #16 \n"
"17: \n"
:
: [ src_ic ] "r"(src_ic), [ pack_ic ] "r"(pack_ic), [ filter_zp ] "r"(filter_zp),
[ input_sum_oc ] "r"(input_sum_oc), [ input_sum_stride ] "r"(input_sum_stride), [ src_stride ] "r"(src_stride),
[ ic_4div ] "r"(ic_4div), [ ic_4res ] "r"(ic_4res), [ oc_8div ] "r"(oc_8div), [ oc_8res ] "r"(oc_8res)
: "x0", "x1", "x4", "x9", "x10", "x11", "x12", "x13", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16",
"v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25");
#else
int32_t tmp_sum_value[8] = {0};
for (int ici = 0; ici < ic_4div; ici += C4NUM) {
for (int i = 0; i < C8NUM; i++) {
tmp_sum_value[i] += src_ic[0 + i * input_channel];
tmp_sum_value[i] += src_ic[1 + i * input_channel];
tmp_sum_value[i] += src_ic[2 + i * input_channel];
tmp_sum_value[i] += src_ic[3 + i * input_channel];
pack_ic[0 + i * C4NUM] = src_ic[0 + i * input_channel];
pack_ic[1 + i * C4NUM] = src_ic[1 + i * input_channel];
pack_ic[2 + i * C4NUM] = src_ic[2 + i * input_channel];
pack_ic[3 + i * C4NUM] = src_ic[3 + i * input_channel];
}
src_ic += C4NUM;
pack_ic += C4NUM * C8NUM;
}
for (int ici = ic_4div; ici < input_channel; ici += 1) {
for (int i = 0; i < C8NUM; i++) {
tmp_sum_value[i] += src_ic[i * input_channel];
pack_ic[i * C4NUM] = src_ic[i * input_channel];
}
src_ic += 1;
pack_ic += 1;
}
for (int ici = input_channel; ici < ic4; ici += 1) {
for (int i = 0; i < C8NUM; i++) {
pack_ic[i * C4NUM] = 0;
}
pack_ic += 1;
}
for (int oci = 0; oci < oc_8div; oci += C8NUM) {
for (int ri = 0; ri < C8NUM; ri++) {
input_sum_oc[ri * C8NUM + 0] = tmp_sum_value[ri] * filter_zp[oci + 0];
input_sum_oc[ri * C8NUM + 1] = tmp_sum_value[ri] * filter_zp[oci + 1];
input_sum_oc[ri * C8NUM + 2] = tmp_sum_value[ri] * filter_zp[oci + 2];
input_sum_oc[ri * C8NUM + 3] = tmp_sum_value[ri] * filter_zp[oci + 3];
input_sum_oc[ri * C8NUM + 4] = tmp_sum_value[ri] * filter_zp[oci + 4];
input_sum_oc[ri * C8NUM + 5] = tmp_sum_value[ri] * filter_zp[oci + 5];
input_sum_oc[ri * C8NUM + 6] = tmp_sum_value[ri] * filter_zp[oci + 6];
input_sum_oc[ri * C8NUM + 7] = tmp_sum_value[ri] * filter_zp[oci + 7];
}
input_sum_oc += inputsum_stride;
}
if (oc_8div != output_channel) {
for (int oci = 0; oci < oc_8res; oci += 1) {
for (int ri = 0; ri < C8NUM; ri++) {
input_sum_oc[ri * C8NUM + oci] = tmp_sum_value[ri] * filter_zp[oc_8div + oci];
}
}
for (int oci = oc_8res; oci < C8NUM; oci += 1) {
for (int ri = 0; ri < C8NUM; ri++) {
input_sum_oc[ri * C8NUM + oci] = 0;
}
}
} /* oc8 res done */
#endif
src_r += input_channel * C8NUM;
pack_r += ic4 * C8NUM;
input_sum_r += C8NUM * C8NUM;
}
if (hw_8div != plane_size) {
memset(pack_r, 0, C8NUM * ic4);
for (int hwi = hw_8div; hwi < plane_size; hwi += 1) {
int32_t *input_sum_oc = input_sum_r;
int32_t tmp_sum_value = 0;
const int8_t *src_ic = src_r;
int8_t *pack_ic = pack_r;
for (int ici = 0; ici < ic_4div; ici += C4NUM) {
tmp_sum_value += src_ic[0];
tmp_sum_value += src_ic[1];
tmp_sum_value += src_ic[2];
tmp_sum_value += src_ic[3];
pack_ic[0] = src_ic[0];
pack_ic[1] = src_ic[1];
pack_ic[2] = src_ic[2];
pack_ic[3] = src_ic[3];
src_ic += C4NUM;
pack_ic += C4NUM * C8NUM;
}
for (int ici = ic_4div; ici < input_channel; ici += 1) {
tmp_sum_value += src_ic[0];
pack_ic[0] = src_ic[0];
src_ic += 1;
pack_ic += 1;
}
for (int oci = 0; oci < oc_8div; oci += C8NUM) {
for (int curoi = 0; curoi < C8NUM; curoi++) {
input_sum_oc[curoi] = tmp_sum_value * filter_zp[oci + curoi];
}
input_sum_oc += inputsum_stride;
}
if (oc_8div != output_channel) {
for (int oci = 0; oci < oc_8res; oci += 1) {
input_sum_oc[oci] = tmp_sum_value * filter_zp[oc_8div + oci];
}
for (int oci = oc_8res; oci < C8NUM; oci += 1) {
input_sum_oc[oci] = 0;
}
} /* oc8 res done */
src_r += input_channel;
pack_r += C4NUM;
input_sum_r += C8NUM;
}
for (int hwi = plane_size; hwi < hw8; hwi++) {
for (int oc = 0; oc < oc8; oc++) {
int oc8div = oc / C8NUM, oc8res = oc % C8NUM;
input_sum[oc8div * inputsum_stride + hwi * C8NUM + oc8res] = 0;
}
}
}
return;
}
void Conv1x1PreOptPert(const int8_t *src_input, int8_t *packed_input, int32_t *input_sum, size_t input_channel,
size_t plane_size, const ConvParameter *conv_param) {
int ic4 = UP_ROUND(input_channel, C4NUM);
size_t hw_8div = plane_size / C8NUM * C8NUM;
size_t ic_4div = input_channel / C4NUM * C4NUM;
int32_t filter_zp = conv_param->conv_quant_arg_.filter_quant_args_[0].zp_;
const int8_t *src_r = src_input;
int8_t *pack_r = packed_input;
/* per layer */
for (int hwi = 0; hwi < hw_8div; hwi += C8NUM) {
const int8_t *src_ic = src_r;
int8_t *pack_ic = pack_r;
int32_t *input_sum_r = input_sum + hwi;
#ifdef ENABLE_ARM64
size_t src_stride = input_channel;
size_t ic_4res = input_channel - ic_4div;
asm volatile(
"dup v16.4s, wzr \n"
"dup v17.4s, wzr \n"
"mov x14, %[input_sum_r] \n"
"dup v20.4s, %w[filter_zp] \n"
"mov x10, %[src_ic] \n"
"mov x11, %[pack_ic] \n"
"mov x0, #0 \n"
"1: \n"
"cmp x0, %[ic_4div] \n"
"add x0, x0, #4\n"
"mov x12, x10 \n"
"add x10, x10, #4\n"
"blt 2f \n"
"cmp %[ic_4res], #0\n"
"beq 6f \n"
"cmp %[ic_4res], #1\n"
"beq 3f \n"
"cmp %[ic_4res], #2\n"
"beq 4f \n"
"cmp %[ic_4res], #3\n"
"beq 5f \n"
"2: \n"
"ld1 {v0.s}[0], [x12], %[src_stride]\n"
"ld1 {v0.s}[1], [x12], %[src_stride]\n"
"ld1 {v0.s}[2], [x12], %[src_stride]\n"
"ld1 {v0.s}[3], [x12], %[src_stride]\n"
"ld1 {v1.s}[0], [x12], %[src_stride]\n"
"ld1 {v1.s}[1], [x12], %[src_stride]\n"
"ld1 {v1.s}[2], [x12], %[src_stride]\n"
"ld1 {v1.s}[3], [x12], %[src_stride]\n"
"st1 {v0.16b}, [x11], #16\n"
"st1 {v1.16b}, [x11], #16\n"
"saddlp v4.8h, v0.16b \n"
"saddlp v5.8h, v1.16b \n"
"saddlp v0.4s, v4.8h \n"
"saddlp v1.4s, v5.8h \n"
"add v16.4s, v16.4s, v0.4s \n"
"add v17.4s, v17.4s, v1.4s \n"
"b 1b \n"
"3: \n" /* col res 1 */
"dup v0.4s, wzr \n"
"dup v1.4s, wzr \n"
"ld1 {v0.b}[0], [x12], %[src_stride]\n"
"ld1 {v0.b}[4], [x12], %[src_stride]\n"
"ld1 {v0.b}[8], [x12], %[src_stride]\n"
"ld1 {v0.b}[12], [x12], %[src_stride]\n"
"ld1 {v1.b}[0], [x12], %[src_stride]\n"
"ld1 {v1.b}[4], [x12], %[src_stride]\n"
"ld1 {v1.b}[8], [x12], %[src_stride]\n"
"ld1 {v1.b}[12], [x12], %[src_stride]\n"
"st1 {v0.16b}, [x11], #16\n"
"st1 {v1.16b}, [x11], #16\n"
"saddlp v4.8h, v0.16b \n"
"saddlp v5.8h, v1.16b \n"
"saddlp v0.4s, v4.8h \n"
"saddlp v1.4s, v5.8h \n"
"add v16.4s, v16.4s, v0.4s \n"
"add v17.4s, v17.4s, v1.4s \n"
"b 6f \n"
"4: \n" /* col res 2 */
"dup v0.4s, wzr \n"
"dup v1.4s, wzr \n"
"ld1 {v0.h}[0], [x12], %[src_stride]\n"
"ld1 {v0.h}[2], [x12], %[src_stride]\n"
"ld1 {v0.h}[4], [x12], %[src_stride]\n"
"ld1 {v0.h}[6], [x12], %[src_stride]\n"
"ld1 {v1.h}[0], [x12], %[src_stride]\n"
"ld1 {v1.h}[2], [x12], %[src_stride]\n"
"ld1 {v1.h}[4], [x12], %[src_stride]\n"
"ld1 {v1.h}[6], [x12], %[src_stride]\n"
"st1 {v0.16b}, [x11], #16\n"
"st1 {v1.16b}, [x11], #16\n"
"saddlp v4.8h, v0.16b \n"
"saddlp v5.8h, v1.16b \n"
"saddlp v0.4s, v4.8h \n"
"saddlp v1.4s, v5.8h \n"
"add v16.4s, v16.4s, v0.4s \n"
"add v17.4s, v17.4s, v1.4s \n"
"b 6f \n"
"5: \n" /* col res 3 */
"dup v0.4s, wzr \n"
"dup v1.4s, wzr \n"
"add x13, x12, #2 \n"
"ld1 {v0.h}[0], [x12], %[src_stride]\n"
"ld1 {v0.b}[2], [x13], %[src_stride]\n"
"ld1 {v0.h}[2], [x12], %[src_stride]\n"
"ld1 {v0.b}[6], [x13], %[src_stride]\n"
"ld1 {v0.h}[4], [x12], %[src_stride]\n"
"ld1 {v0.b}[10], [x13], %[src_stride]\n"
"ld1 {v0.h}[6], [x12], %[src_stride]\n"
"ld1 {v0.b}[14], [x13], %[src_stride]\n"
"ld1 {v1.h}[0], [x12], %[src_stride]\n"
"ld1 {v1.b}[2], [x13], %[src_stride]\n"
"ld1 {v1.h}[2], [x12], %[src_stride]\n"
"ld1 {v1.b}[6], [x13], %[src_stride]\n"
"ld1 {v1.h}[4], [x12], %[src_stride]\n"
"ld1 {v1.b}[10], [x13], %[src_stride]\n"
"ld1 {v1.h}[6], [x12], %[src_stride]\n"
"ld1 {v1.b}[14], [x13], %[src_stride]\n"
"st1 {v0.16b}, [x11], #16\n"
"st1 {v1.16b}, [x11], #16\n"
"saddlp v4.8h, v0.16b \n"
"saddlp v5.8h, v1.16b \n"
"saddlp v0.4s, v4.8h \n"
"saddlp v1.4s, v5.8h \n"
"add v16.4s, v16.4s, v0.4s \n"
"add v17.4s, v17.4s, v1.4s \n"
"b 6f \n"
"6: \n"
"mul v16.4s, v16.4s, v20.4s \n"
"mul v17.4s, v17.4s, v20.4s \n"
"st1 {v16.4s}, [x14], #16 \n"
"st1 {v17.4s}, [x14], #16 \n"
:
: [ src_ic ] "r"(src_ic), [ pack_ic ] "r"(pack_ic), [ input_sum_r ] "r"(input_sum_r),
[ src_stride ] "r"(src_stride), [ ic_4div ] "r"(ic_4div), [ ic_4res ] "r"(ic_4res), [ filter_zp ] "r"(filter_zp)
: "x0", "x1", "x10", "x11", "x12", "x13", "x14", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17",
"v20");
#else
int32_t tmp_sum_value[8] = {0};
for (int ici = 0; ici < ic_4div; ici += C4NUM) {
for (int i = 0; i < C8NUM; i++) {
tmp_sum_value[i] += src_ic[0 + i * input_channel];
tmp_sum_value[i] += src_ic[1 + i * input_channel];
tmp_sum_value[i] += src_ic[2 + i * input_channel];
tmp_sum_value[i] += src_ic[3 + i * input_channel];
pack_ic[0 + i * C4NUM] = src_ic[0 + i * input_channel];
pack_ic[1 + i * C4NUM] = src_ic[1 + i * input_channel];
pack_ic[2 + i * C4NUM] = src_ic[2 + i * input_channel];
pack_ic[3 + i * C4NUM] = src_ic[3 + i * input_channel];
}
src_ic += C4NUM;
pack_ic += C4NUM * C8NUM;
}
for (int ici = ic_4div; ici < input_channel; ici += 1) {
for (int i = 0; i < C8NUM; i++) {
tmp_sum_value[i] += src_ic[i * input_channel];
pack_ic[i * C4NUM] = src_ic[i * input_channel];
}
src_ic += 1;
pack_ic += 1;
}
for (int ici = input_channel; ici < ic4; ici += 1) {
for (int i = 0; i < C8NUM; i++) {
pack_ic[i * C4NUM] = 0;
}
pack_ic += 1;
}
for (int i = 0; i < C8NUM; i++) {
input_sum_r[i] = tmp_sum_value[i] * filter_zp;
}
#endif
src_r += input_channel * C8NUM;
pack_r += ic4 * C8NUM;
}
if (hw_8div != plane_size) {
memset(pack_r, 0, C8NUM * ic4);
for (int hwi = hw_8div; hwi < plane_size; hwi += 1) {
int32_t tmp_sum_value = 0;
const int8_t *src_ic = src_r;
int8_t *pack_ic = pack_r;
for (int ici = 0; ici < ic_4div; ici += C4NUM) {
tmp_sum_value += src_ic[0];
tmp_sum_value += src_ic[1];
tmp_sum_value += src_ic[2];
tmp_sum_value += src_ic[3];
pack_ic[0] = src_ic[0];
pack_ic[1] = src_ic[1];
pack_ic[2] = src_ic[2];
pack_ic[3] = src_ic[3];
src_ic += C4NUM;
pack_ic += C4NUM * C8NUM;
}
for (int ici = ic_4div; ici < input_channel; ici += 1) {
tmp_sum_value += src_ic[0];
pack_ic[0] = src_ic[0];
src_ic += 1;
pack_ic += 1;
}
input_sum[hwi] = tmp_sum_value * filter_zp;
src_r += input_channel;
pack_r += C4NUM;
}
for (int hwi = plane_size; hwi < UP_ROUND(plane_size, C8NUM); hwi++) {
input_sum[hwi] = 0;
}
}
return;
}
void Im2ColPackUnitInt8Opt(const int8_t *input_data, int8_t *packed_input, int8_t *matmul_input, int real_cal_num,
int block_index, const int32_t *filter_zp, int32_t *input_sum,
const ConvParameter *conv_param, bool per_channel, bool is_optimize) {
// input format : nhwc
int kernel_h = conv_param->kernel_h_;
int kernel_w = conv_param->kernel_w_;
int stride_h = conv_param->stride_h_;
int stride_w = conv_param->stride_w_;
int pad_h = conv_param->pad_u_;
int pad_w = conv_param->pad_l_;
int dilation_h = conv_param->dilation_h_;
int dilation_w = conv_param->dilation_w_;
int in_channel = conv_param->input_channel_;
int in_h = conv_param->input_h_;
int in_w = conv_param->input_w_;
int out_w = conv_param->output_w_;
int kernel_plane = kernel_h * kernel_w;
NNACL_CHECK_ZERO_RETURN(out_w);
NNACL_CHECK_ZERO_RETURN(dilation_h);
NNACL_CHECK_ZERO_RETURN(dilation_w);
for (int i = 0; i < real_cal_num; i++) {
int block_start = block_index + i;
int input_h = block_start / out_w * stride_h - pad_h;
int input_w = block_start % out_w * stride_w - pad_w;
int input_stride = input_h * in_w * in_channel + input_w * in_channel;
int kh_s = MSMAX(0, UP_DIV(-input_h, dilation_h));
int kh_e = MSMIN(kernel_h, UP_DIV(in_h - input_h, dilation_h));
int kw_s = MSMAX(0, UP_DIV(-input_w, dilation_w));
int kw_e = MSMIN(kernel_w, UP_DIV(in_w - input_w, dilation_w));
if (dilation_w == 1 && dilation_h == 1) {
for (int j = kh_s; j < kh_e; j++) {
int input_y_stride = j * in_w * in_channel + input_stride;
int input_x_stride = input_y_stride + kw_s * in_channel;
int input_plane_offset = (j * kernel_w + kw_s) * in_channel + i * in_channel * kernel_plane;
memcpy(matmul_input + input_plane_offset, input_data + input_x_stride, (kw_e - kw_s) * in_channel);
} // kernel_h loop
} else {
for (int j = kh_s; j < kh_e; j++) {
int input_y_stride = j * dilation_h * in_w * in_channel + input_stride;
for (int k = kw_s; k < kw_e; ++k) {
int input_x_stride = input_y_stride + k * dilation_w * in_channel;
int input_plane_offset = (j * kernel_w + k) * in_channel + i * in_channel * kernel_plane;
memcpy(matmul_input + input_plane_offset, input_data + input_x_stride, in_channel);
}
} // kernel_h loop
}
} // tile num loop
int deep = kernel_plane * in_channel;
if (is_optimize) {
if (per_channel) {
Conv1x1PreOptPeroc(matmul_input, packed_input, input_sum, deep, conv_param->output_channel_, real_cal_num,
filter_zp, C8NUM * C8NUM);
} else {
Conv1x1PreOptPert(matmul_input, packed_input, input_sum, deep, real_cal_num, conv_param);
}
} else {
RowMajor2Row16x4MajorInt8(matmul_input, packed_input, real_cal_num, deep);
if (per_channel) {
#ifdef ENABLE_ARM32
PackInputSum16x4PerChannelArm32(packed_input, input_sum, filter_zp, real_cal_num, deep,
conv_param->output_channel_);
#else
PackInputSum16x4PerChannel(packed_input, input_sum, filter_zp, real_cal_num, deep, conv_param->output_channel_);
#endif
} else {
size_t hw4 = UP_ROUND(real_cal_num, C4NUM);
size_t ic16 = UP_ROUND(deep, C16NUM);
PackInputSum16x4PerLayer(packed_input, input_sum, conv_param->conv_quant_arg_.filter_quant_args_[0].zp_, hw4,
ic16);
}
}
}
void PackInputToC8Int8(const int8_t *input_data, int16_t *packed_input, const ConvParameter *conv_param) {
int in_batch = conv_param->input_batch_;
int in_channel = conv_param->input_channel_;
int in_h = conv_param->input_h_;
int in_w = conv_param->input_w_;
int ic8_round = UP_ROUND(in_channel, C8NUM);
int ic8 = in_channel / C8NUM * C8NUM;
int in_plane = in_h * in_w;
for (int b = 0; b < in_batch; b++) {
int src_batch_offset = b * in_channel * in_plane;
int dst_batch_offset = b * ic8_round * in_plane;
for (int k = 0; k < in_plane; k++) {
int src_plane_offset = src_batch_offset + k * in_channel;
int dst_plane_offset = dst_batch_offset + k * C8NUM;
for (int i = 0; i < ic8; i += 8) {
int src_c_offset = src_plane_offset + i;
int dst_c_offset = dst_plane_offset + i * in_plane;
#ifdef ENABLE_ARM
vst1q_s16(packed_input + dst_c_offset, vmovl_s8(vld1_s8(input_data + src_c_offset)));
#else
for (int j = 0; j < C8NUM; ++j) {
(packed_input + dst_c_offset)[j] = (int16_t)(input_data + src_c_offset)[j];
}
#endif
} // ic8_minus loop
int res_c = in_channel - ic8;
int tmp_ic_offset = ic8 * in_plane;
for (int l = 0; l < res_c; ++l) {
int src_c_offset = src_plane_offset + ic8 + l;
int dst_c_offset = dst_plane_offset + tmp_ic_offset + l;
(packed_input + dst_c_offset)[0] = (int16_t)(input_data + src_c_offset)[0];
} // res ic loop
int res2 = ic8_round - in_channel;
for (int l = 0; l < res2; ++l) {
int dst_c_offset = dst_plane_offset + tmp_ic_offset + res_c + l;
(packed_input + dst_c_offset)[0] = 0;
} // res ic loop
} // kh * kw loop
}
}
void PackWeightToC8Int8(const int8_t *origin_weight_data, int16_t *packed_weight_data,
const ConvParameter *conv_param) {
// origin weight format : ohwi
int input_channel = conv_param->input_channel_;
int ic8 = input_channel / C8NUM * C8NUM;
int ic8_round = UP_ROUND(input_channel, C8NUM);
int output_channel = conv_param->output_channel_;
QuantArg *filter_zp = conv_param->conv_quant_arg_.filter_quant_args_;
int kernel_plane = conv_param->kernel_h_ * conv_param->kernel_w_;
for (int k = 0; k < kernel_plane; k++) {
int src_kernel_offset = k * input_channel;
int dst_kernel_offset = k * C8NUM;
for (int o = 0; o < output_channel; o++) {
int32_t zp;
if (conv_param->conv_quant_arg_.filter_arg_num_ == 1) {
zp = filter_zp[0].zp_;
} else {
zp = filter_zp[o].zp_;
}
int src_oc_offset = src_kernel_offset + o * kernel_plane * input_channel;
int dst_oc_offset = dst_kernel_offset + o * ic8_round * kernel_plane;
int i = 0;
for (; i < ic8; i += C8NUM) {
int src_ic_offset = src_oc_offset + i;
int dst_ic_offset = dst_oc_offset + i * kernel_plane;
#ifdef ENABLE_ARM64
int8x8_t src_s8 = vld1_s8(origin_weight_data + src_ic_offset);
int16x8_t src_s16 = vmovl_s8(src_s8);
int16x4_t src1_s16 = vget_low_s16(src_s16);
int16x4_t src2_s16 = vget_high_s16(src_s16);
int32x4_t src1_s32 = vmovl_s16(src1_s16);
int32x4_t src2_s32 = vmovl_s16(src2_s16);
int32x4_t zp_s32 = vdupq_n_s32(zp);
int32x4_t dst1_s32 = vsubq_s32(src1_s32, zp_s32);
int32x4_t dst2_s32 = vsubq_s32(src2_s32, zp_s32);
int16x4_t dst1_s16 = vqmovn_s32(dst1_s32);
int16x4_t dst2_s16 = vqmovn_s32(dst2_s32);
vst1_s16(packed_weight_data + dst_ic_offset, dst1_s16);
vst1_s16(packed_weight_data + dst_ic_offset + 4, dst2_s16);
#else
for (int ci = 0; ci < C8NUM; ++ci) {
(packed_weight_data + dst_ic_offset + ci)[0] = (int16_t)((origin_weight_data + src_ic_offset + ci)[0] - zp);
}
#endif
}
dst_oc_offset += ic8 * kernel_plane;
for (; i < input_channel; i++) {
int c8_block_rem = i % C8NUM;
int src_ic_offset = src_oc_offset + i;
int dst_ic_offset = dst_oc_offset + c8_block_rem;
(packed_weight_data + dst_ic_offset)[0] = (int16_t)((origin_weight_data + src_ic_offset)[0] - zp);
}
}
}
}
void PackInputSum16x4Int8(const int8_t *input, int32_t *input_sum, const int32_t *filter_zp,
const ConvParameter *conv_param) {
size_t hw = conv_param->output_h_ * conv_param->output_w_;
size_t hw4 = UP_ROUND(hw, C4NUM);
size_t ic16 = UP_ROUND(conv_param->input_channel_, C16NUM);
if (conv_param->conv_quant_arg_.filter_arg_num_ == 1) {
PackInputSum16x4PerLayer(input, input_sum, conv_param->conv_quant_arg_.filter_quant_args_[0].zp_, hw4, ic16);
} else {
#ifdef ENABLE_ARM32
PackInputSum16x4PerChannelArm32(input, input_sum, filter_zp, hw, conv_param->input_channel_,
conv_param->output_channel_);
#else
PackInputSum16x4PerChannel(input, input_sum, filter_zp, hw, conv_param->input_channel_,
conv_param->output_channel_);
#endif
}
return;
}
void PackInputSum16x4PerLayer(const int8_t *src, int32_t *dst, int32_t filter_zp, size_t row4, size_t col16) {
/* normal matmul : 4x16 * 16x4 -> 4x4 */
#ifdef ENABLE_ARM
PreSum4x16Int8Pert(src, dst, row4, col16, filter_zp);
#else
for (int r = 0; r < row4; r++) {
int32_t tmp_value = 0;
for (int c = 0; c < col16; c++) {
int r4div = r / C4NUM, r4mod = r % C4NUM, c16div = c / C16NUM, c16mod = c % C16NUM;
int src_index = r4div * C4NUM * col16 + c16div * C16NUM * C4NUM + r4mod * C16NUM + c16mod;
tmp_value += src[src_index];
}
dst[r] = tmp_value * filter_zp;
}
#endif
return;
}
void PackDepthwiseInt8Input(const int8_t *src, int16_t *dst, const ConvParameter *conv_param) {
int input_zp = conv_param->conv_quant_arg_.input_quant_args_[0].zp_;
int ic4 = UP_DIV(conv_param->input_channel_, C4NUM);
int unit = conv_param->input_h_ * conv_param->input_w_;
for (int b = 0; b < conv_param->input_batch_; b++) {
const int8_t *src_b = src + b * unit * conv_param->input_channel_;
int16_t *dst_b = dst + b * unit * ic4 * C4NUM;
for (int k = 0; k < unit; k++) {
const int8_t *src_k = src_b + k * conv_param->input_channel_;
int16_t *dst_k = dst_b + k * ic4 * C4NUM;
for (int c = 0; c < conv_param->input_channel_; c++) {
dst_k[c] = (int16_t)(src_k[c] - input_zp);
}
}
}
}
void PackDepthwiseInt8Weight(const int8_t *origin_weight, int16_t *packed_weight_, int plane, int channel,
const ConvQuantArg *quant_qrg) {
int weight_zp = quant_qrg->filter_quant_args_[0].zp_;
for (int c = 0; c < channel; c++) {
if (quant_qrg->per_channel_ & FILTER_PER_CHANNEL) {
weight_zp = quant_qrg->filter_quant_args_[c].zp_;
}
int c8_block_num = c / C8NUM;
int c8_block_rem = c % C8NUM;
const int8_t *src_c = origin_weight + c * plane;
int16_t *dst_c = packed_weight_ + c8_block_num * plane * C8NUM;
for (int k = 0; k < plane; k++) {
const int8_t *src_kernel = src_c + k;
int16_t *dst_kernel = dst_c + C8NUM * k + c8_block_rem;
*dst_kernel = (int16_t)(src_kernel[0] - weight_zp);
}
}
}
void PackDeconvDepthwiseInt8Weight(const int8_t *origin_weight, int16_t *packed_weight_, int plane, int channel,
const ConvQuantArg *quant_qrg) {
int weight_zp = quant_qrg->filter_quant_args_[0].zp_;
for (int c = 0; c < channel; c++) {
if (quant_qrg->per_channel_ & FILTER_PER_CHANNEL) {
weight_zp = quant_qrg->filter_quant_args_[c].zp_;
}
int c4_block_num = c / C4NUM;
int c4_block_rem = c % C4NUM;
const int8_t *src_c = origin_weight + c * plane;
int16_t *dst_c = packed_weight_ + c4_block_num * plane * C4NUM;
for (int k = 0; k < plane; k++) {
const int8_t *src_kernel = src_c + k;
int16_t *dst_kernel = dst_c + C4NUM * k + c4_block_rem;
*dst_kernel = (int16_t)(src_kernel[0] - weight_zp);
}
}
}
void PackNHWCToNHWC4Int8(const void *src, void *dst, int batch, int plane, int channel) {
int c4 = UP_DIV(channel, C4NUM);
int c4_channel = c4 * C4NUM;
int nhwc4_batch_unit_offset = c4 * C4NUM * plane;
int ic_remainder_ = channel % C4NUM;
if (ic_remainder_ != 0) {
int nhwc4_batch_offset = 0;
for (int b = 0; b < batch; b++) {
int batch_offset = b * channel * plane;
for (int i = 0; i < plane; i++) {
int8_t *dst_per_plane = (int8_t *)dst + nhwc4_batch_offset + i * c4_channel;
memcpy(dst_per_plane, (int8_t *)src + batch_offset + i * channel, channel);
for (int j = channel; j < c4_channel; ++j) {
dst_per_plane[j] = 0;
}
}
nhwc4_batch_offset += nhwc4_batch_unit_offset;
}
} else {
size_t ori_input_size = batch * plane * channel;
memcpy((int8_t *)dst, (int8_t *)src, ori_input_size);
}
}
void PackNHWC4ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel) {
int c4 = UP_DIV(channel, C4NUM);
int nhwc4_batch_unit_offset = c4 * C4NUM * plane;
int ic_remainder_ = channel % C4NUM;
if (ic_remainder_ != 0) {
for (int b = 0; b < batch; b++) {
int batch_offset = b * channel * plane;
int nhwc4_batch_offset = b * nhwc4_batch_unit_offset;
for (int i = 0; i < plane; i++) {
memcpy((int8_t *)dst + batch_offset + i * channel, (int8_t *)src + nhwc4_batch_offset + i * c4 * C4NUM,
channel);
}
}
} else {
size_t ori_input_size = batch * plane * channel;
memcpy((int8_t *)dst, (int8_t *)src, ori_input_size);
}
}
void PackNHWCToNHWC8Int8(const void *src, void *dst, int batch, int plane, int channel) {
int c8 = UP_DIV(channel, C8NUM);
int nhwc8_batch_unit_offset = c8 * C8NUM * plane;
int ic_remainder_ = channel % C8NUM;
if (ic_remainder_ != 0) {
int nhwc8_batch_offset = 0;
for (int b = 0; b < batch; b++) {
int batch_offset = b * channel * plane;
for (int i = 0; i < plane; i++) {
memcpy((int8_t *)dst + nhwc8_batch_offset + i * c8 * C8NUM, (int8_t *)src + batch_offset + i * channel,
channel);
}
nhwc8_batch_offset += nhwc8_batch_unit_offset;
}
} else {
size_t ori_input_size = batch * plane * channel;
memcpy((int8_t *)dst, (int8_t *)src, ori_input_size);
}
}
void PackNHWC8ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel) {
int c8 = UP_DIV(channel, C8NUM);
int nhwc8_batch_unit_offset = c8 * C8NUM * plane;
int ic_remainder_ = channel % C8NUM;
if (ic_remainder_ != 0) {
for (int b = 0; b < batch; b++) {
int batch_offset = b * channel * plane;
int nhwc8_batch_offset = b * nhwc8_batch_unit_offset;
for (int i = 0; i < plane; i++) {
memcpy((int8_t *)dst + batch_offset + i * channel, (int8_t *)src + nhwc8_batch_offset + i * c8 * C8NUM,
channel);
}
}
} else {
size_t ori_input_size = batch * plane * channel;
memcpy((int8_t *)dst, (int8_t *)src, ori_input_size);
}
}
void PackNCHWToNC8HW8Int8(const void *src, void *dst, int batch, int plane, int channel) {
int c8 = UP_DIV(channel, C8NUM);
for (int b = 0; b < batch; b++) {
int src_offset = b * plane * channel;
int dst_offset = b * plane * c8 * C8NUM;
for (int c = 0; c < channel; c++) {
int c8_block_num = c / C8NUM;
int c8_block_rem = c % C8NUM;
int src_c_offset = src_offset + c * plane;
int dst_c_offset = dst_offset + c8_block_num * plane * C8NUM;
for (int k = 0; k < plane; k++) {
int src_kernel_offset = src_c_offset + k;
int dst_kernel_offset = dst_c_offset + C8NUM * k + c8_block_rem;
((int8_t *)dst + dst_kernel_offset)[0] = ((int8_t *)src + src_kernel_offset)[0];
}
}
}
}
void PackNC4HW4ToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel) {
int c4 = UP_DIV(channel, C4NUM);
for (int b = 0; b < batch; b++) {
int src_offset = b * plane * c4 * C4NUM;
int dst_offset = b * plane * channel;
for (int k = 0; k < plane; k++) {
int src_kernel_offset = src_offset + k * C4NUM;
int dst_kernel_offset = dst_offset + k * channel;
for (int c = 0; c < c4 - 1; c++) {
int src_c_offset = src_kernel_offset + c * plane * C4NUM;
int dst_c_offset = dst_kernel_offset + c * C4NUM;
((int8_t *)dst + dst_c_offset)[0] = ((int8_t *)src + src_c_offset)[0];
((int8_t *)dst + dst_c_offset)[1] = ((int8_t *)src + src_c_offset)[1];
((int8_t *)dst + dst_c_offset)[2] = ((int8_t *)src + src_c_offset)[2];
((int8_t *)dst + dst_c_offset)[3] = ((int8_t *)src + src_c_offset)[3];
}
// res part
int res_c = channel - (c4 - 1) * C4NUM;
for (int i = 0; i < res_c; i++) {
int src_res_c_offset = src_kernel_offset + (c4 - 1) * C4NUM * plane + i;
int dst_res_c_offset = dst_kernel_offset + (c4 - 1) * C4NUM + i;
((int8_t *)dst + dst_res_c_offset)[0] = ((int8_t *)src + src_res_c_offset)[0];
}
}
}
}
void PackNHWCToC8HWN8Int8(const void *src, void *dst, int batch, int plane, int channel) {
for (int n = 0; n < batch; n++) {
for (int hw = 0; hw < plane; hw++) {
for (int c = 0; c < channel; c++) {
int c8div = c / C8NUM;
int c8mod = c % C8NUM;
int src_index = n * plane * channel + hw * channel + c;
int dst_index = c8div * batch * plane * C8NUM + hw * batch * C8NUM + n * C8NUM + c8mod;
((int8_t *)dst)[dst_index] = ((int8_t *)src)[src_index];
}
}
}
return;
}
void PackNCHWToNHWCInt8(const void *src, void *dst, int batch, int plane, int channel) {
for (int n = 0; n < batch; n++) {
for (int c = 0; c < channel; c++) {
for (int hw = 0; hw < plane; hw++) {
int nhwc_index = n * channel * plane + hw * channel + c;
int nchw_index = n * channel * plane + c * plane + hw;
((int8_t *)(dst))[nhwc_index] = ((const int8_t *)(src))[nchw_index];
}
}
}
return;
}
void PackNHWCToNCHWInt8(const void *src, void *dst, int batches, int plane, int channel) {
int hw8 = plane / C8NUM * C8NUM;
int c8 = channel / C8NUM * C8NUM;
int batch = plane * channel;
for (int n = 0; n < batches; n++) {
const int8_t *src_batch = (const int8_t *)src + n * batch;
int8_t *dst_batch = (int8_t *)dst + n * batch;
int hw = 0;
for (; hw < hw8; hw += C8NUM) {
int c = 0;
for (; c < c8; c += C8NUM) {
const int8_t *src_ptr = src_batch + hw * channel + c;
int8_t *dst_ptr = dst_batch + c * plane + hw;
#ifdef ENABLE_ARM64
size_t srcStride = channel * sizeof(int8_t);
size_t dstStride = plane * sizeof(int8_t);
asm volatile(
"mov x10, %[src_ptr]\n"
"mov x11, %[dst_ptr]\n"
"ld1 {v0.8b}, [x10], %[srcStride]\n"
"ld1 {v1.8b}, [x10], %[srcStride]\n"
"ld1 {v2.8b}, [x10], %[srcStride]\n"
"ld1 {v3.8b}, [x10], %[srcStride]\n"
"trn1 v4.8b, v0.8b, v1.8b\n"
"trn2 v5.8b, v0.8b, v1.8b\n"
"trn1 v6.8b, v2.8b, v3.8b\n"
"trn2 v7.8b, v2.8b, v3.8b\n"
"ld1 {v0.8b}, [x10], %[srcStride]\n"
"ld1 {v1.8b}, [x10], %[srcStride]\n"
"ld1 {v2.8b}, [x10], %[srcStride]\n"
"ld1 {v3.8b}, [x10], %[srcStride]\n"
"trn1 v8.4h, v4.4h, v6.4h\n"
"trn2 v9.4h, v4.4h, v6.4h\n"
"trn1 v10.4h, v5.4h, v7.4h\n"
"trn2 v11.4h, v5.4h, v7.4h\n"
"trn1 v4.8b, v0.8b, v1.8b\n"
"trn2 v5.8b, v0.8b, v1.8b\n"
"trn1 v6.8b, v2.8b, v3.8b\n"
"trn2 v7.8b, v2.8b, v3.8b\n"
"trn1 v12.4h, v4.4h, v6.4h\n"
"trn2 v13.4h, v4.4h, v6.4h\n"
"trn1 v14.4h, v5.4h, v7.4h\n"
"trn2 v15.4h, v5.4h, v7.4h\n"
"trn1 v0.2s, v8.2s, v12.2s\n"
"trn2 v4.2s, v8.2s, v12.2s\n"
"trn1 v1.2s, v10.2s, v14.2s\n"
"trn2 v5.2s, v10.2s, v14.2s\n"
"trn1 v2.2s, v9.2s, v13.2s\n"
"trn2 v6.2s, v9.2s, v13.2s\n"
"trn1 v3.2s, v11.2s, v15.2s\n"
"trn2 v7.2s, v11.2s, v15.2s\n"
"st1 {v0.8b}, [x11], %[dstStride]\n"
"st1 {v1.8b}, [x11], %[dstStride]\n"
"st1 {v2.8b}, [x11], %[dstStride]\n"
"st1 {v3.8b}, [x11], %[dstStride]\n"
"st1 {v4.8b}, [x11], %[dstStride]\n"
"st1 {v5.8b}, [x11], %[dstStride]\n"
"st1 {v6.8b}, [x11], %[dstStride]\n"
"st1 {v7.8b}, [x11], %[dstStride]\n"
:
:
[ dst_ptr ] "r"(dst_ptr), [ src_ptr ] "r"(src_ptr), [ srcStride ] "r"(srcStride), [ dstStride ] "r"(dstStride)
: "x10", "x11", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14",
"v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
"v30", "v31");
#elif ENABLE_ARM32
size_t srcStride = channel * sizeof(int8_t);
size_t dstStride = plane * sizeof(int8_t);
asm volatile(
"mov r10, %[src_ptr]\n"
"mov r12, %[dst_ptr]\n"
"vld1.8 {d0}, [r10], %[srcStride]\n"
"vld1.8 {d1}, [r10], %[srcStride]\n"
"vld1.8 {d2}, [r10], %[srcStride]\n"
"vld1.8 {d3}, [r10], %[srcStride]\n"
"vld1.8 {d4}, [r10], %[srcStride]\n"
"vld1.8 {d5}, [r10], %[srcStride]\n"
"vld1.8 {d6}, [r10], %[srcStride]\n"
"vld1.8 {d7}, [r10], %[srcStride]\n"
"vtrn.8 d0, d1\n"
"vtrn.8 d2, d3\n"
"vtrn.8 d4, d5\n"
"vtrn.8 d6, d7\n"
"vtrn.16 d0, d2\n"
"vtrn.16 d1, d3\n"
"vtrn.16 d4, d6\n"
"vtrn.16 d5, d7\n"
"vtrn.32 d0, d4\n"
"vtrn.32 d1, d5\n"
"vtrn.32 d2, d6\n"
"vtrn.32 d3, d7\n"
"vst1.8 {d0}, [r12], %[dstStride]\n"
"vst1.8 {d1}, [r12], %[dstStride]\n"
"vst1.8 {d2}, [r12], %[dstStride]\n"
"vst1.8 {d3}, [r12], %[dstStride]\n"
"vst1.8 {d4}, [r12], %[dstStride]\n"
"vst1.8 {d5}, [r12], %[dstStride]\n"
"vst1.8 {d6}, [r12], %[dstStride]\n"
"vst1.8 {d7}, [r12], %[dstStride]\n"
:
:
[ dst_ptr ] "r"(dst_ptr), [ src_ptr ] "r"(src_ptr), [ srcStride ] "r"(srcStride), [ dstStride ] "r"(dstStride)
: "r10", "r12", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14",
"q15");
#else
for (int tr = 0; tr < C8NUM; tr++) {
for (int tc = 0; tc < C8NUM; tc++) {
dst_ptr[tc * plane + tr] = src_ptr[tr * channel + tc];
}
}
#endif
}
for (; c < channel; c++) {
const int8_t *src_ptr = src_batch + hw * channel + c;
int8_t *dst_ptr = dst_batch + c * plane + hw;
for (size_t i = 0; i < C8NUM; i++) {
dst_ptr[i] = src_ptr[i * channel];
}
}
}
for (; hw < plane; hw++) {
const int8_t *src_ptr = src_batch + hw * channel;
int8_t *dst_ptr = dst_batch + hw;
for (size_t i = 0; i < channel; i++) {
dst_ptr[i * plane] = src_ptr[i];
}
}
}
return;
}
|
mindspore-ai/mindspore
|
mindspore/lite/src/common/config_file.h
|
<filename>mindspore/lite/src/common/config_file.h<gh_stars>1000+
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_COMMON_CONFIG_FILE_H_
#define MINDSPORE_LITE_SRC_COMMON_CONFIG_FILE_H_
#include <limits.h>
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <memory>
#include <fstream>
#include <string>
#include <map>
#include <vector>
#include <utility>
#include "src/common/utils.h"
#include "src/common/log_adapter.h"
#include "ir/dtype/type_id.h"
namespace mindspore {
namespace lite {
constexpr int MAX_CONFIG_FILE_LENGTH = 1024;
#define CONFIG_FILE_EXECUTION_PLAN "execution_plan"
int GetSectionInfoFromConfigFile(const std::string &file, const std::string §ion_name,
std::map<std::string, std::string> *section_info);
void ParserExecutionPlan(const std::map<std::string, std::string> *config_infos,
std::map<std::string, TypeId> *data_type_plan);
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_SRC_COMMON_CONFIG_FILE_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/optimizer/graph_kernel/expanders/expander_factory.h
|
<reponame>mindspore-ai/mindspore
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_EXPANDERS_EXPANDER_FACTORY_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_EXPANDERS_EXPANDER_FACTORY_H_
#include <unordered_map>
#include <functional>
#include <string>
#include <memory>
#include "backend/optimizer/graph_kernel/expanders/utils.h"
namespace mindspore::graphkernel::expanders {
class OpExpanderFactory {
public:
static OpExpanderFactory &Instance() {
static OpExpanderFactory instance;
return instance;
}
std::shared_ptr<OpExpander> GetExpander(const std::string &op) {
if (auto iter = creators.find(op); iter != creators.end()) {
auto expander_ptr = iter->second();
expander_ptr->op_ = op;
return expander_ptr;
}
return nullptr;
}
~OpExpanderFactory() = default;
using RegFunc = std::function<std::shared_ptr<OpExpander>()>;
void Register(const std::string &op, const RegFunc &func) { creators[op] = func; }
private:
std::unordered_map<std::string, RegFunc> creators;
};
class OpExpanderRegister {
public:
OpExpanderRegister(const std::string &name, const OpExpanderFactory::RegFunc &func) : func_(func) {
OpExpanderFactory::Instance().Register(name, func);
}
~OpExpanderRegister() = default;
private:
// for pclint-plus
OpExpanderFactory::RegFunc func_;
};
#define OP_EXPANDER_REGISTER(name, cls) \
static const OpExpanderRegister g_##cls##_expander_reg( \
name, []() -> std::shared_ptr<OpExpander> { return std::make_shared<cls>(); })
} // namespace mindspore::graphkernel::expanders
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_EXPANDERS_EXPANDER_FACTORY_H_
|
mindspore-ai/mindspore
|
mindspore/lite/src/train/train_export.h
|
<reponame>mindspore-ai/mindspore
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_TRAIN_TRAIN_EXPORT_H_
#define MINDSPORE_LITE_SRC_TRAIN_TRAIN_EXPORT_H_
#include <string>
#include <vector>
#include <memory>
#include <map>
#include <unordered_map>
#include "schema/inner/model_generated.h"
#include "src/lite_kernel.h"
#include "src/lite_model.h"
#include "include/train/train_cfg.h"
namespace mindspore {
#ifndef _STUB
namespace schema {
struct CNodeT;
struct TensorT;
struct MetaGraphT;
} // namespace schema
#endif
namespace lite {
class TrainExport {
public:
explicit TrainExport(const std::string file_name) : file_name_(file_name) {}
virtual ~TrainExport();
int ExportNet(const std::vector<mindspore::kernel::LiteKernel *> &kernels,
const std::vector<mindspore::lite::Tensor *> &tensors, const std::vector<std::string> &output_names,
const Model *model, QuantizationType quant_type);
int ExportInit(const std::string model_name, std::string version);
int SaveToFile();
void set_connect(const std::unordered_map<size_t, size_t> &map) { connect_ = map; }
int LoadModel(void *buf, size_t buf_size);
int AddTransformNode();
int TrainModelFusion();
protected:
virtual std::vector<uint8_t> CreateData(const mindspore::lite::Tensor *tensor);
private:
std::string file_name_;
schema::MetaGraphT *meta_graph_ = nullptr;
std::vector<size_t> out_idx_;
std::map<size_t, size_t> remap_;
std::unordered_map<size_t, size_t> connect_; // connection map (backbone tenor id-> head tensor id)
bool IsNodeNonDepend(const std::unique_ptr<schema::CNodeT> &node, const std::vector<size_t> &sinked_tensor_idxes);
int TopologicalSort();
void PrepareRemap(int offset);
Model::Node *FindNode(const mindspore::kernel::LiteKernel *kernel, const Model *model);
std::unique_ptr<schema::TensorT> CreateTensor(const Tensor *tensor, schema::Tensor *scTensor);
std::unique_ptr<schema::CNodeT> CreateCNode(const mindspore::kernel::LiteKernel *kernel,
std::vector<uint32_t> inputIndex, std::vector<uint32_t> outputIndex,
const Model *model);
int IsInputTensor(const schema::TensorT &t);
int CreateAndAddCNode(const mindspore::kernel::LiteKernel *kernel, std::vector<uint32_t> inputIndex,
std::vector<uint32_t> outputIndex, const Model *model);
std::unique_ptr<schema::CNodeT> CreateTransformNode(std::vector<uint32_t> inputIndex,
std::vector<uint32_t> outputIndex, size_t id);
std::unique_ptr<schema::TensorT> CreateTransformTensor(size_t id);
std::unique_ptr<schema::TensorT> CreateTransformConst(size_t last_id);
int AddTransform();
bool NeedQuantization(const mindspore::lite::Tensor *tensor);
virtual int QuantTensorData(schema::TensorT *dest_tensor, const mindspore::lite::Tensor *src_tensor);
mindspore::schema::QuantType GetNodeQuantType(const mindspore::kernel::LiteKernel *kernel);
void TagQuantizedNodes();
QuantizationType quant_type_;
};
}; // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_SRC_TRAIN_TRAIN_EXPORT_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/runtime/framework/actor/control_flow/stack_actor.h
|
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_STACK_ACTOR_H_
#define MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_STACK_ACTOR_H_
#include <vector>
#include <string>
#include <memory>
#include <unordered_map>
#include <stack>
#include "runtime/framework/actor/actor_common.h"
#include "runtime/framework/actor/abstract_actor.h"
namespace mindspore {
namespace runtime {
// Stack actor is used to record those device actors that need additional storage in recursive scenes.
class StackActor : public MemoryAwareActor {
public:
StackActor(const std::string &name, const std::vector<KernelWithIndex> ¶meters)
: AbstractActor(name, KernelTransformType::kStackActor, nullptr), formal_parameters_(parameters) {
device_contexts_.resize(parameters.size());
}
~StackActor() override = default;
void Init() override;
// The stack actor run when receive the real parameter nodes.
void CollectRealParameter(const AnfNodePtr &node, size_t index, size_t position,
OpContext<DeviceTensor> *const context);
private:
friend class GraphScheduler;
// Formal parameters record the input front-end node, these nodes may be parameter, kernel, call node.
std::vector<KernelWithIndex> formal_parameters_;
// The backend parameter is used to save the backend node corresponding to the device tensor in the stack.
// When these device tensors are used as output, they need to be placed in the node of the result arrow,
// so these nodes need to be saved.
std::vector<KernelWithIndex> backend_parameters_;
// Input data.
std::unordered_map<uuids::uuid *, std::unordered_map<size_t, KernelWithIndex>> input_nodes_;
// The input data records that the stack actor is copied from the input nodes and needs to be stored in the
// device tensor in the stack. This part of the device tensor does not belong to any node, and it will be
// cleaned up directly after the stack is popped.
std::unordered_map<uuids::uuid *, std::unordered_map<size_t, std::stack<DeviceTensor *>>> input_data_;
};
using StackActorPtr = std::shared_ptr<StackActor>;
} // namespace runtime
} // namespace mindspore
#endif // MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_STACK_ACTOR_H_
|
mindspore-ai/mindspore
|
include/api/context.h
|
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_INCLUDE_API_CONTEXT_H
#define MINDSPORE_INCLUDE_API_CONTEXT_H
#include <string>
#include <memory>
#include <vector>
#include <map>
#include "include/api/types.h"
#include "include/api/dual_abi_helper.h"
namespace mindspore {
enum DeviceType {
kCPU = 0,
kGPU,
kKirinNPU,
kAscend910,
kAscend310,
// add new type here
kInvalidDeviceType = 100,
};
class Allocator;
class Delegate;
class DeviceInfoContext;
/// \brief Context is used to store environment variables during execution.
class MS_API Context {
public:
Context();
~Context() = default;
/// \brief Set the number of threads at runtime. Only valid for Lite.
///
/// \param[in] thread_num the number of threads at runtime.
void SetThreadNum(int32_t thread_num);
/// \brief Get the current thread number setting. Only valid for Lite.
///
/// \return The current thread number setting.
int32_t GetThreadNum() const;
/// \brief Set the thread affinity to CPU cores. Only valid for Lite.
///
/// \param[in] mode: 0: no affinities, 1: big cores first, 2: little cores first
void SetThreadAffinity(int mode);
/// \brief Get the thread affinity of CPU cores. Only valid for Lite.
///
/// \return Thread affinity to CPU cores. 0: no affinities, 1: big cores first, 2: little cores first
int GetThreadAffinityMode() const;
/// \brief Set the thread lists to CPU cores. Only valid for Lite.
///
/// \note If core_list and mode are set by SetThreadAffinity at the same time, the core_list is effective, but the
/// mode is not effective.
///
/// \param[in] core_list: a vector of thread core lists.
void SetThreadAffinity(const std::vector<int> &core_list);
/// \brief Get the thread lists of CPU cores. Only valid for Lite.
///
/// \return core_list: a vector of thread core lists.
std::vector<int32_t> GetThreadAffinityCoreList() const;
/// \brief Set the status whether to perform model inference or training in parallel. Only valid for Lite.
///
/// \param[in] is_parallel: true, parallel; false, not in parallel.
void SetEnableParallel(bool is_parallel);
/// \brief Get the status whether to perform model inference or training in parallel. Only valid for Lite.
///
/// \return Bool value that indicates whether in parallel.
bool GetEnableParallel() const;
/// \brief Set Delegate to access third-party AI framework. Only valid for Lite.
///
/// \param[in] Pointer to the custom delegate.
void SetDelegate(const std::shared_ptr<Delegate> &delegate);
/// \brief Get the delegate of the third-party AI framework. Only valid for Lite.
///
/// \return Pointer to the custom delegate.
std::shared_ptr<Delegate> GetDelegate() const;
/// \brief Get a mutable reference of DeviceInfoContext vector in this context. Only MindSpore Lite supports
/// heterogeneous scenarios with multiple members in the vector.
///
/// \return Mutable reference of DeviceInfoContext vector in this context.
std::vector<std::shared_ptr<DeviceInfoContext>> &MutableDeviceInfo();
private:
struct Data;
std::shared_ptr<Data> data_;
};
/// \brief DeviceInfoContext defines different device contexts.
class MS_API DeviceInfoContext : public std::enable_shared_from_this<DeviceInfoContext> {
public:
struct Data;
DeviceInfoContext();
virtual ~DeviceInfoContext() = default;
/// \brief Get the type of this DeviceInfoContext.
///
/// \return Type of this DeviceInfoContext.
virtual enum DeviceType GetDeviceType() const = 0;
/// \brief A similar function to RTTI is provided when the -fno-rtti compilation option is turned on, which converts
/// DeviceInfoContext to a shared pointer of type T, and returns nullptr if the conversion fails.
///
/// \param T Type
/// \return A pointer of type T after conversion. If the conversion fails, it will be nullptr.
template <class T>
std::shared_ptr<T> Cast() {
static_assert(std::is_base_of<DeviceInfoContext, T>::value, "Wrong cast type.");
if (GetDeviceType() != T().GetDeviceType()) {
return nullptr;
}
return std::static_pointer_cast<T>(shared_from_this());
}
/// \brief obtain provider's name
///
/// \return provider's name.
std::string GetProvider() const;
/// \brief set provider's name.
///
/// \param[in] provider define the provider's name.
void SetProvider(const std::string &provider);
/// \brief obtain provider's device type.
///
/// \return provider's device type.
std::string GetProviderDevice() const;
/// \brief set provider's device type.
///
/// \param[in] device define the provider's device type.EG: CPU.
void SetProviderDevice(const std::string &device);
/// \brief set memory allocator.
///
/// \param[in] allocator define the memory allocator which can be defined by user.
void SetAllocator(const std::shared_ptr<Allocator> &allocator);
/// \brief obtain memory allocator.
///
/// \return memory allocator.
std::shared_ptr<Allocator> GetAllocator() const;
protected:
std::shared_ptr<Data> data_;
};
/// \brief Derived from DeviceInfoContext, The configuration of the model running on the CPU. This option is only valid
/// for MindSpore Lite.
class MS_API CPUDeviceInfo : public DeviceInfoContext {
public:
/// \brief Get the type of this DeviceInfoContext.
///
/// \return Type of this DeviceInfoContext.
enum DeviceType GetDeviceType() const override { return DeviceType::kCPU; };
/// \brief Set enables to perform the float16 inference
///
/// \param[in] is_fp16 Enable float16 inference or not.
void SetEnableFP16(bool is_fp16);
/// \brief Get enables to perform the float16 inference
///
/// \return Whether enable float16 inference.
bool GetEnableFP16() const;
};
/// \brief Derived from DeviceInfoContext, The configuration of the model running on the NPU. This option is only valid
/// for MindSpore Lite.
class MS_API KirinNPUDeviceInfo : public DeviceInfoContext {
public:
/// \brief Get the type of this DeviceInfoContext.
///
/// \return Type of this DeviceInfoContext.
enum DeviceType GetDeviceType() const override { return DeviceType::kKirinNPU; };
/// \brief Set the NPU frequency.
///
/// \param[in] frequency Can be set to 1 (low power consumption), 2 (balanced), 3 (high performance), 4 (extreme
/// performance), default as 3.
void SetFrequency(int frequency);
/// \brief Get the NPU frequency.
///
/// \return NPU frequency
int GetFrequency() const;
};
/// \brief Derived from DeviceInfoContext, The configuration of the model running on the GPU.
class MS_API GPUDeviceInfo : public DeviceInfoContext {
public:
/// \brief Get the type of this DeviceInfoContext.
///
/// \return Type of this DeviceInfoContext.
enum DeviceType GetDeviceType() const override { return DeviceType::kGPU; };
/// \brief Set device id.
///
/// \param[in] device_id The device id.
void SetDeviceID(uint32_t device_id);
/// \brief Get the device id.
///
/// \return The device id.
uint32_t GetDeviceID() const;
/// \brief Set the precision mode.
///
/// \param[in] precision_mode Optional "origin", "fp16". "origin" is set as default.
inline void SetPrecisionMode(const std::string &precision_mode);
/// \brief Get the precision mode.
///
/// \return The precision mode.
inline std::string GetPrecisionMode() const;
/// \brief Set enables to perform the float16 inference
///
/// \param[in] is_fp16 Enable float16 inference or not.
void SetEnableFP16(bool is_fp16);
/// \brief Get enables to perform the float16 inference
///
/// \return Whether enable float16 inference.
bool GetEnableFP16() const;
private:
void SetPrecisionMode(const std::vector<char> &precision_mode);
std::vector<char> GetPrecisionModeChar() const;
};
void GPUDeviceInfo::SetPrecisionMode(const std::string &precision_mode) {
SetPrecisionMode(StringToChar(precision_mode));
}
std::string GPUDeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); }
/// \brief Derived from DeviceInfoContext, The configuration of the model running on the Ascend910. This option is
/// invalid for MindSpore Lite.
class MS_API Ascend910DeviceInfo : public DeviceInfoContext {
public:
/// \brief Get the type of this DeviceInfoContext.
///
/// \return Type of this DeviceInfoContext.
enum DeviceType GetDeviceType() const override { return DeviceType::kAscend910; };
/// \brief Set device id.
///
/// \param[in] device_id The device id.
void SetDeviceID(uint32_t device_id);
/// \brief Get the device id.
///
/// \return The device id.
uint32_t GetDeviceID() const;
};
/// \brief Derived from DeviceInfoContext, The configuration of the model running on the Ascend310. This option is
/// invalid for MindSpore Lite.
class MS_API Ascend310DeviceInfo : public DeviceInfoContext {
public:
/// \brief Get the type of this DeviceInfoContext.
///
/// \return Type of this DeviceInfoContext.
enum DeviceType GetDeviceType() const override { return DeviceType::kAscend310; };
/// \brief Set device id.
///
/// \param[in] device_id The device id.
void SetDeviceID(uint32_t device_id);
/// \brief Get the device id.
///
/// \return The device id.
uint32_t GetDeviceID() const;
/// \brief Set AIPP configuration file path.
///
/// \param[in] cfg_path AIPP configuration file path.
inline void SetInsertOpConfigPath(const std::string &cfg_path);
/// \brief Get AIPP configuration file path.
///
/// \return AIPP configuration file path.
inline std::string GetInsertOpConfigPath() const;
/// \brief Set format of model inputs.
///
/// \param[in] format Optional "NCHW", "NHWC", etc.
inline void SetInputFormat(const std::string &format);
/// \brief Get format of model inputs.
///
/// \return The format of model inputs.
inline std::string GetInputFormat() const;
/// \brief Set shape of model inputs.
///
/// \param[in] shape e.g. "input_op_name1: 1,2,3,4;input_op_name2: 4,3,2,1".
inline void SetInputShape(const std::string &shape);
/// \brief Get shape of model inputs.
///
/// \return The shape of model inputs.
inline std::string GetInputShape() const;
/// \brief Set shape of model inputs.
///
/// \param[in] shape e.g. {{1, {1,2,3,4}}, {2, {4,3,2,1}}} means the first input shape 1,2,3,4 and the second input
/// shape 4,3,2,1.
void SetInputShapeMap(const std::map<int, std::vector<int>> &shape);
/// \brief Get shape of model inputs.
///
/// \return The shape of model inputs.
std::map<int, std::vector<int>> GetInputShapeMap() const;
void SetDynamicBatchSize(const std::vector<size_t> &dynamic_batch_size);
inline std::string GetDynamicBatchSize() const;
/// \brief Set type of model outputs.
///
/// \param[in] output_type FP32, UINT8 or FP16, default as FP32.
void SetOutputType(enum DataType output_type);
/// \brief Get type of model outputs.
///
/// \return The set type of model outputs.
enum DataType GetOutputType() const;
/// \brief Set precision mode of model.
///
/// \param[in] precision_mode Optional "force_fp16", "allow_fp32_to_fp16", "must_keep_origin_dtype" and
/// "allow_mix_precision", "force_fp16" is set as default
inline void SetPrecisionMode(const std::string &precision_mode);
/// \brief Get precision mode of model.
///
/// \return The set type of model outputs
inline std::string GetPrecisionMode() const;
/// \brief Set op select implementation mode.
///
/// \param[in] op_select_impl_mode Optional "high_performance" and "high_precision", "high_performance" is set as
/// default.
inline void SetOpSelectImplMode(const std::string &op_select_impl_mode);
/// \brief Get op select implementation mode.
///
/// \return The set op select implementation mode.
inline std::string GetOpSelectImplMode() const;
inline void SetFusionSwitchConfigPath(const std::string &cfg_path);
inline std::string GetFusionSwitchConfigPath() const;
// Optional "l1_optimize", "l2_optimize", "off_optimize" or "l1_and_l2_optimize", default as "l2_optimize"
inline void SetBufferOptimizeMode(const std::string &buffer_optimize_mode);
inline std::string GetBufferOptimizeMode() const;
private:
void SetInsertOpConfigPath(const std::vector<char> &cfg_path);
std::vector<char> GetInsertOpConfigPathChar() const;
void SetInputFormat(const std::vector<char> &format);
std::vector<char> GetInputFormatChar() const;
void SetInputShape(const std::vector<char> &shape);
std::vector<char> GetInputShapeChar() const;
std::vector<char> GetDynamicBatchSizeChar() const;
void SetPrecisionMode(const std::vector<char> &precision_mode);
std::vector<char> GetPrecisionModeChar() const;
void SetOpSelectImplMode(const std::vector<char> &op_select_impl_mode);
std::vector<char> GetOpSelectImplModeChar() const;
void SetFusionSwitchConfigPath(const std::vector<char> &cfg_path);
std::vector<char> GetFusionSwitchConfigPathChar() const;
void SetBufferOptimizeMode(const std::vector<char> &buffer_optimize_mode);
std::vector<char> GetBufferOptimizeModeChar() const;
};
void Ascend310DeviceInfo::SetInsertOpConfigPath(const std::string &cfg_path) {
SetInsertOpConfigPath(StringToChar(cfg_path));
}
std::string Ascend310DeviceInfo::GetInsertOpConfigPath() const { return CharToString(GetInsertOpConfigPathChar()); }
void Ascend310DeviceInfo::SetInputFormat(const std::string &format) { SetInputFormat(StringToChar(format)); }
std::string Ascend310DeviceInfo::GetInputFormat() const { return CharToString(GetInputFormatChar()); }
void Ascend310DeviceInfo::SetInputShape(const std::string &shape) { SetInputShape(StringToChar(shape)); }
std::string Ascend310DeviceInfo::GetInputShape() const { return CharToString(GetInputShapeChar()); }
std::string Ascend310DeviceInfo::GetDynamicBatchSize() const { return CharToString(GetDynamicBatchSizeChar()); }
void Ascend310DeviceInfo::SetPrecisionMode(const std::string &precision_mode) {
SetPrecisionMode(StringToChar(precision_mode));
}
std::string Ascend310DeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); }
void Ascend310DeviceInfo::SetOpSelectImplMode(const std::string &op_select_impl_mode) {
SetOpSelectImplMode(StringToChar(op_select_impl_mode));
}
std::string Ascend310DeviceInfo::GetOpSelectImplMode() const { return CharToString(GetOpSelectImplModeChar()); }
void Ascend310DeviceInfo::SetFusionSwitchConfigPath(const std::string &cfg_path) {
SetFusionSwitchConfigPath(StringToChar(cfg_path));
}
std::string Ascend310DeviceInfo::GetFusionSwitchConfigPath() const {
return CharToString(GetFusionSwitchConfigPathChar());
}
void Ascend310DeviceInfo::SetBufferOptimizeMode(const std::string &buffer_optimize_mode) {
SetBufferOptimizeMode(StringToChar(buffer_optimize_mode));
}
std::string Ascend310DeviceInfo::GetBufferOptimizeMode() const { return CharToString(GetBufferOptimizeModeChar()); }
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_CONTEXT_H
|
mindspore-ai/mindspore
|
mindspore/ccsrc/frontend/optimizer/irpass/incorporate_call.h
|
<gh_stars>1000+
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_IRPASS_INCORPORATE_CALL_H_
#define MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_IRPASS_INCORPORATE_CALL_H_
#include <vector>
#include <algorithm>
#include <unordered_map>
#include <utility>
#include <memory>
#include "frontend/optimizer/irpass.h"
#include "frontend/optimizer/optimizer.h"
#include "frontend/optimizer/anf_visitor.h"
#include "ir/func_graph.h"
#include "ir/func_graph_cloner.h"
#include "frontend/operator/ops.h"
namespace mindspore {
namespace opt {
namespace irpass {
namespace internal {
class CallOutputTransform {
public:
CallOutputTransform() : cache_() {}
~CallOutputTransform() = default;
FuncGraphPtr operator()(const FuncGraphPtr &fg, size_t nargs, bool xs_first) {
if (cache_.find(fg) == cache_.end()) {
cache_[fg] = {};
}
auto &cache = cache_[fg];
auto key = std::make_pair(nargs, xs_first);
if (cache.find(key) == cache.end()) {
FuncGraphPtr new_fg = TransformableClone(fg, std::make_shared<TraceTransform>("call"));
std::vector<AnfNodePtr> new_items;
new_items.push_back(new_fg->output());
if (xs_first) {
for (size_t i = 0; i < nargs; i++) {
new_items.push_back(new_fg->add_parameter());
}
} else {
for (size_t i = 0; i < nargs; i++) {
new_items.push_back(new_fg->InsertFrontParameter());
}
}
new_fg->set_output(new_fg->NewCNode(new_items));
cache[key] = new_fg;
}
return cache[key];
}
private:
std::unordered_map<FuncGraphPtr, std::unordered_map<std::pair<size_t, bool>, FuncGraphPtr, PairHasher>> cache_;
};
} // namespace internal
// {{G, Xs}, Ys}
class IncorporateCall : public AnfVisitor {
public:
IncorporateCall() : call_output_transform_() {}
~IncorporateCall() override = default;
AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
Reset();
if (!node->isa<CNode>() || node->func_graph() == nullptr) {
return nullptr;
}
auto &inputs = node->cast<CNodePtr>()->inputs();
if (inputs[0] == nullptr || !inputs[0]->isa<CNode>()) {
return nullptr;
}
AnfVisitor::Visit(inputs[0]);
if (fg_ == nullptr) {
return nullptr;
}
auto xs_size = Xs_.size();
auto ys_size = inputs.size() - 1;
bool xs_first = true;
if ((xs_size > 0) && (Xs_[xs_size - 1]->abstract() != nullptr) &&
(Xs_[xs_size - 1]->abstract()->isa<abstract::AbstractMonad>())) {
xs_first = false;
}
auto new_fg = call_output_transform_(fg_, ys_size, xs_first);
std::vector<AnfNodePtr> args;
args.push_back(NewValueNode(new_fg));
if (xs_first) {
if (xs_size > 0) {
(void)args.insert(args.end(), Xs_.begin(), Xs_.end());
}
if (ys_size > 0) {
(void)args.insert(args.end(), inputs.begin() + 1, inputs.end());
}
} else {
if (ys_size > 0) {
(void)args.insert(args.end(), inputs.begin() + 1, inputs.end());
}
if (xs_size > 0) {
(void)args.insert(args.end(), Xs_.begin(), Xs_.end());
}
}
auto new_node = node->func_graph()->NewCNode(args);
new_node->set_abstract(node->abstract());
// Check if the another only usage of {G, Xs} is UpdateState{s, {G, Xs}}, if yes, replace
// UpdateState{s, {G, Xs}} with UpdateState{s, new_node};
const auto &manager = fg_->manager();
MS_EXCEPTION_IF_NULL(manager);
auto &node_users_map = manager->node_users();
auto it = node_users_map.find(fg_call_cnode_);
if (it != node_users_map.end()) {
AnfNodePtr update_state_node = nullptr;
auto &node_users = it->second;
if (node_users.size() == 2) {
for (auto &node_user : node_users) {
if (IsPrimitiveCNode(node_user.first, prim::kPrimUpdateState)) {
update_state_node = node_user.first;
}
}
}
if (update_state_node != nullptr) {
auto update_state_cnode = update_state_node->cast<CNodePtr>();
// double check;
const size_t attach_index = 2;
if (update_state_cnode->input(attach_index) == fg_call_cnode_) {
MS_LOG(DEBUG) << "Replace UpdateState node: " << update_state_cnode->DebugString(2)
<< ", input 2 with: " << new_node->DebugString();
manager->SetEdge(update_state_cnode, attach_index, new_node);
}
}
}
return new_node;
}
void Visit(const CNodePtr &cnode) override {
// {G, Xs}
if (cnode->size() < 1 || !IsValueNode<FuncGraph>(cnode->input(0))) {
return;
}
auto &inputs = cnode->inputs();
fg_ = GetValueNode<FuncGraphPtr>(inputs[0]);
fg_call_cnode_ = cnode;
(void)std::copy(inputs.begin() + 1, inputs.end(), std::back_inserter(Xs_));
}
void Reset() {
Xs_.clear();
fg_ = nullptr;
fg_call_cnode_ = nullptr;
}
private:
FuncGraphPtr fg_;
CNodePtr fg_call_cnode_{nullptr};
std::vector<AnfNodePtr> Xs_{};
internal::CallOutputTransform call_output_transform_;
};
// {{{prim::kPrimSwitch, X, G1, G2}, Xs}, Ys}
class IncorporateCallSwitch : public AnfVisitor {
public:
IncorporateCallSwitch() : call_output_transform_() {}
~IncorporateCallSwitch() override = default;
AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override {
Reset();
if (!node->isa<CNode>() || node->func_graph() == nullptr) {
return nullptr;
}
// {{...}, Ys}
auto &inputs = node->cast<CNodePtr>()->inputs();
if (inputs[0] == nullptr || !inputs[0]->isa<CNode>()) {
return nullptr;
}
// {{...}, Xs}
auto &inputs_x = inputs[0]->cast<CNodePtr>()->inputs();
if (inputs_x[0] == nullptr || !inputs_x[0]->isa<CNode>()) {
return nullptr;
}
// {prim::kPrimSwitch, X, G1, G2}
AnfVisitor::Match(prim::kPrimSwitch, {IsNode, IsValueNode<FuncGraph>, IsValueNode<FuncGraph>})(inputs_x[0]);
if (g2_ == nullptr) {
return nullptr;
}
auto fg = node->func_graph();
auto xs_size = inputs_x.size() - 1;
auto ys_size = inputs.size() - 1;
bool xs_first = true;
if ((xs_size > 0) && (inputs_x[xs_size]->abstract() != nullptr) &&
(inputs_x[xs_size]->abstract()->isa<abstract::AbstractMonad>())) {
xs_first = false;
}
auto new_g1 = call_output_transform_(g1_, ys_size, xs_first);
auto new_g2 = call_output_transform_(g2_, ys_size, xs_first);
auto sw_node = fg->NewCNode({NewValueNode(prim::kPrimSwitch), x_, NewValueNode(new_g1), NewValueNode(new_g2)});
std::vector<AnfNodePtr> args{sw_node};
if (xs_first) {
if (xs_size > 0) {
(void)args.insert(args.end(), inputs_x.begin() + 1, inputs_x.end());
}
if (ys_size > 0) {
(void)args.insert(args.end(), inputs.begin() + 1, inputs.end());
}
} else {
if (ys_size > 0) {
(void)args.insert(args.end(), inputs.begin() + 1, inputs.end());
}
if (xs_size > 0) {
(void)args.insert(args.end(), inputs_x.begin() + 1, inputs_x.end());
}
}
auto new_node = fg->NewCNode(args);
new_node->set_abstract(node->abstract());
return new_node;
}
void Visit(const AnfNodePtr &node) override {
if (x_ == nullptr) {
x_ = node;
return;
}
AnfVisitor::Visit(node);
}
void Visit(const ValueNodePtr &vnode) override {
auto g = GetValueNode<FuncGraphPtr>(vnode);
if (g1_ == nullptr) {
g1_ = g;
} else {
g2_ = g;
}
}
void Reset() {
x_ = nullptr;
g1_ = nullptr;
g2_ = nullptr;
}
private:
AnfNodePtr x_{nullptr};
FuncGraphPtr g1_{nullptr}, g2_{nullptr};
internal::CallOutputTransform call_output_transform_;
};
} // namespace irpass
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_CCSRC_FRONTEND_OPTIMIZER_IRPASS_INCORPORATE_CALL_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/session/kernel_build_client.h
|
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_SESSION_KERNEL_BUILD_CLIENT_H_
#define MINDSPORE_CCSRC_BACKEND_SESSION_KERNEL_BUILD_CLIENT_H_
#include <vector>
#include <string>
#include <cstring>
#include <cstdlib>
#include <memory>
#include <mutex>
#include "common/duplex_pipe.h"
#include "utils/log_adapter.h"
#include "utils/ms_context.h"
namespace mindspore {
namespace kernel {
void ReplaceStr(std::string *dest, const std::string &replace, char new_char);
constexpr inline static int kBufferSize = 4096;
constexpr inline static auto kEnv = "python";
// The TAG as prefix of real command from remote.
constexpr inline static auto kTag = "[~]";
static std::string GetPyExe() {
// get real python executable path
auto ms_context = MsContext::GetInstance();
if (ms_context == nullptr) {
return kEnv;
}
auto env = ms_context->get_param<std::string>(MS_CTX_PYTHON_EXE_PATH);
if (env.empty()) {
return kEnv;
}
return env;
}
class KernelBuildClient {
public:
// Send Finish request to server
constexpr inline static auto kFinish = "FINISH";
constexpr inline static auto kAkgStart = "AKG/START";
constexpr inline static auto kAkgData = "AKG/DATA";
constexpr inline static auto kAkgAttr = "AKG/ATTR";
constexpr inline static auto kAkgWait = "AKG/WAIT";
// Receive the response from server
constexpr inline static auto kAck = "ACK";
constexpr inline static auto kErr = "ERR";
constexpr inline static auto kTrue = "True";
constexpr inline static auto kSuccess = "Success";
// Revert \n, \r, [space].
constexpr inline static auto kLF = "[LF]";
constexpr inline static auto kCR = "[CR]";
constexpr inline static auto kSP = "[SP]";
virtual std::string GetEnv() = 0;
virtual std::string GetScript() = 0;
void Open() {
if (!init_) {
// Exception's thrown if open failed
if (dp_->Open({GetEnv(), GetScript()}, true) != -1) {
dp_->SetFinalizeCallback(std::make_shared<std::function<void()>>([this]() { Close(); }));
init_ = true;
}
}
}
void Close() {
if (init_) {
dp_->Close();
init_ = false;
}
}
// Send a request and fetch its response
std::string SendRequest(std::string data) {
std::lock_guard<std::mutex> locker(mutex_);
Request(data);
return Response();
}
void Request(std::string req) {
if (!init_) {
MS_LOG(EXCEPTION) << "Try to send request before Open()";
}
MS_LOG(DEBUG) << "\t[" << req << "]";
*dp_ << req;
}
std::string Response() {
if (!init_) {
MS_LOG(EXCEPTION) << "Try to get response before Open()";
}
std::string res;
*dp_ >> res;
// Filter out the interference
if (res.empty()) {
MS_LOG(EXCEPTION) << "Response is empty";
}
auto start = res.find(kTag);
if (start == std::string::npos) {
MS_LOG(EXCEPTION) << "Response seems incorrect, res: " << res;
}
auto pos = start + std::strlen(kTag);
if (pos > res.size()) { // Safe check for codedex
MS_LOG(EXCEPTION) << "Response seems incorrect, res(" << res.size() << "): {" << res << "}, start: " << start;
}
res = res.substr(pos);
// Revert the line feed and space
if (res != kSuccess && res != kAck && res != kErr && res != kTrue) {
ReplaceStr(&res, kLF, '\n');
ReplaceStr(&res, kSP, ' ');
}
MS_LOG(DEBUG) << "\t[" << res << "]";
return res;
}
// Run AKG building.
bool AkgStart(int process_num, int wait_time);
bool AkgSendAttr(const std::string &attr);
bool AkgSendData(const std::vector<std::string> &jsons);
bool AkgWait();
protected:
KernelBuildClient() : init_(false), dp_(std::make_shared<DuplexPipe>()) {}
virtual ~KernelBuildClient() = default;
private:
// Support multi-thread.
std::mutex mutex_;
bool init_;
std::shared_ptr<DuplexPipe> dp_;
};
static std::string GetScriptFilePath(const std::string &cmd_env, const std::string &cmd_script,
const std::string &server_script) {
auto ms_context = MsContext::GetInstance();
MS_EXCEPTION_IF_NULL(ms_context);
auto server_dir = ms_context->get_param<std::string>(MS_CTX_KERNEL_BUILD_SERVER_DIR);
if (!server_dir.empty()) {
return server_dir + server_script;
}
std::string cmd = cmd_env;
(void)cmd.append(1, ' ').append(cmd_script);
FILE *fpipe = popen(cmd.c_str(), "r");
if (fpipe == nullptr) {
MS_LOG(EXCEPTION) << "popen failed, errno: " << errno;
}
bool start = false;
std::string result;
char buf[kBufferSize];
while (std::fgets(buf, sizeof(buf), fpipe) != nullptr) {
auto len = std::strlen(buf);
if (len == 0 || len >= kBufferSize) {
// Safe check for codedex
// Should never reach here
MS_LOG(EXCEPTION) << "fgets() failed, len: " << len << ", errno: " << errno;
}
if (std::strncmp(buf, kTag, std::strlen(kTag)) == 0) {
start = true;
}
// Filter with 'kTAG' and '\n'
if (start) {
bool line_end = buf[len - 1] == '\n';
result.append(buf, line_end ? len - 1 : len);
if (line_end) {
break;
}
}
}
pclose(fpipe);
const std::string py_suffix = ".py";
if (result.empty() || result.rfind(py_suffix) != (result.length() - py_suffix.length())) {
MS_LOG(EXCEPTION) << "py file seems incorrect, result: {" << result << "}";
}
if (strlen(kTag) > result.size()) { // Safe check for codedex
MS_LOG(EXCEPTION) << "result size seems incorrect, result(" << result.size() << "): {" << result << "}";
}
result = result.substr(strlen(kTag));
MS_LOG(DEBUG) << "result: " << result;
return result;
}
class AscendKernelBuildClient : public KernelBuildClient {
public:
// Server configure
constexpr inline static auto kGetPathScript =
"-c "
"\""
"import pkgutil;"
"path = pkgutil"
".get_loader(\\\"mindspore._extends.remote.kernel_build_server_ascend\\\")" // Server module name
".get_filename();"
"print('[~]' + path)"
"\"";
constexpr inline static auto kServerScript = "kernel_build_server_ascend.py";
// Receive the response from server
constexpr inline static auto kFailed = "-1";
// Send server info. query to server
constexpr inline static auto kFormat = "FORMAT";
constexpr inline static auto kSupport = "SUPPORT";
static AscendKernelBuildClient &Instance() {
static AscendKernelBuildClient instance;
return instance;
}
std::string GetEnv() override { return GetPyExe(); }
std::string GetScript() override {
auto env = GetPyExe();
return GetScriptFilePath(env, kGetPathScript, kServerScript);
}
// Run TBE building.
std::string TbeSendJob(const std::string &job_json_str);
AscendKernelBuildClient(const AscendKernelBuildClient &) = delete;
AscendKernelBuildClient &operator=(const AscendKernelBuildClient &) = delete;
AscendKernelBuildClient(AscendKernelBuildClient &&) = delete;
AscendKernelBuildClient &operator=(AscendKernelBuildClient &&) = delete;
private:
AscendKernelBuildClient() { Open(); }
~AscendKernelBuildClient() override { Close(); }
};
class AkgKernelBuildClient : public KernelBuildClient {
public:
// Server configure
constexpr inline static auto kGetPathScript =
"-c "
"\""
"import pkgutil;"
"path = pkgutil"
".get_loader(\\\"mindspore._extends.remote.kernel_build_server_akg\\\")" // Server module name
".get_filename();"
"print('[~]' + path)"
"\"";
constexpr inline static auto kServerScript = "kernel_build_server_akg.py";
static AkgKernelBuildClient &Instance() {
static AkgKernelBuildClient instance;
return instance;
}
std::string GetEnv() override { return GetPyExe(); }
std::string GetScript() override {
auto env = GetPyExe();
return GetScriptFilePath(env, kGetPathScript, kServerScript);
}
AkgKernelBuildClient(const AkgKernelBuildClient &) = delete;
AkgKernelBuildClient &operator=(const AkgKernelBuildClient &) = delete;
AkgKernelBuildClient(AkgKernelBuildClient &&) = delete;
AkgKernelBuildClient &operator=(AkgKernelBuildClient &&) = delete;
private:
AkgKernelBuildClient() { Open(); }
~AkgKernelBuildClient() override { Close(); }
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_SESSION_KERNEL_BUILD_CLIENT_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/runtime/framework/actor/copy_actor.h
|
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_COPY_ACTOR_H_
#define MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_COPY_ACTOR_H_
#include <vector>
#include <string>
#include <memory>
#include <utility>
#include <unordered_map>
#include "runtime/framework/actor/actor_common.h"
#include "runtime/framework/actor/memory_aware_actor.h"
#include "runtime/hardware/device_context.h"
#include "runtime/framework/device_tensor_store.h"
namespace mindspore {
namespace runtime {
using mindspore::device::DeviceContext;
// The copy actor is used to receive the device tensors and control info to copy data between input device tensor and
// output device tensor. The processing flow is RunOpData/RunOpControl -> CheckRunningCondition -> SendMemoryAllocReq
// -> OnMemoryAllocFinish -> Copy -> SendMemoryFreeReq -> SendOutput.
class CopyActor : public MemoryAwareActor {
public:
CopyActor(const std::string &name, const AID &memory_manager_aid)
: MemoryAwareActor(name, KernelTransformType::kCopyActor, nullptr, memory_manager_aid), output_(nullptr) {}
~CopyActor() override = default;
void Init() override;
// The memory related operation interface.
void SendMemoryAllocReq(OpContext<DeviceTensor> *const context) override;
void SendMemoryFreeReq(OpContext<DeviceTensor> *const context) override;
// The copy processing after memory alloc finished.
void OnMemoryAllocFinish(OpContext<DeviceTensor> *const context) override;
const DeviceTensorPtr &output() const { return output_; }
protected:
void Run(OpContext<DeviceTensor> *const context) override;
void UpdateOutputData(OpData<DeviceTensor> *const output_data, const DataArrow *data_arrow,
OpContext<DeviceTensor> *const context) override;
private:
friend class GraphScheduler;
// Fetch the device tensor for copy.
void FetchDeviceTensor(OpContext<DeviceTensor> *const context);
// The input device tensor is saved from the input data or fetched by device_tensor_store_keys_.
std::vector<DeviceTensor *> input_device_tensor_;
// The output device tensor is saved from the output or fetched by device_tensor_store_keys_.
std::vector<DeviceTensor *> output_device_tensor_;
// The output is created in the copy actor build, so can't be the raw pointer.
DeviceTensorPtr output_;
};
using CopyActorPtr = std::shared_ptr<CopyActor>;
} // namespace runtime
} // namespace mindspore
#endif // MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_COPY_ACTOR_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/optimizer/graph_kernel/arithmetic_simplify.h
|
<gh_stars>1000+
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_ARITHMETIC_SIMPLIFY_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_ARITHMETIC_SIMPLIFY_H_
#include <memory>
#include <vector>
#include <unordered_map>
#include <string>
#include "backend/optimizer/common/optimizer.h"
#include "ir/func_graph.h"
#include "backend/optimizer/graph_kernel/model/lite_graph.h"
namespace mindspore::graphkernel {
class PatternTree;
using PatternTreePtr = std::shared_ptr<PatternTree>;
class ArithmeticSimplify : public opt::Pass {
public:
ArithmeticSimplify() : Pass("arithmetic_simplify") {}
~ArithmeticSimplify() override = default;
bool Run(const FuncGraphPtr &func_graph) override;
private:
bool DoArithmeticTrans(const inner::LiteGraphPtr &litegraph);
bool DoConstantFold(const inner::LiteGraphPtr &litegraph);
std::unordered_map<std::string, std::vector<PatternTreePtr>> expressions_map_;
};
using ArithmeticSimplifyPtr = std::shared_ptr<ArithmeticSimplify>;
} // namespace mindspore::graphkernel
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_ARITHMETIC_SIMPLIFY_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_build.h
|
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_TBE_TBE_KERNEL_BUILD_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_TBE_TBE_KERNEL_BUILD_H_
#include <string>
#include <unordered_map>
#include <memory>
#include <map>
#include <utility>
#include <vector>
#include <nlohmann/json.hpp>
#include "ir/dtype.h"
#include "backend/kernel_compiler/kernel.h"
#include "backend/kernel_compiler/oplib/oplib.h"
#include "backend/kernel_compiler/tbe/tbe_adapter.h"
namespace mindspore {
namespace kernel {
class TbeKernelBuild {
public:
static bool GetIOSize(const nlohmann::json &kernel_json, std::vector<size_t> *input_size_list,
std::vector<size_t> *output_size_list, const AnfNodePtr &anf_node);
static bool GetIOSize(const nlohmann::json &fusion_op_list, const std::vector<AnfNodePtr> &output_nodes,
std::vector<size_t> *input_size_list, std::vector<size_t> *output_size_list);
static void CalInputSize(const nlohmann::json &fusion_op_list, std::vector<size_t> *input_size_list);
static bool CalOutputSize(const nlohmann::json &fusion_op_list,
const std::vector<mindspore::AnfNodePtr> &output_nodes,
std::vector<size_t> *output_size_list);
private:
TbeKernelBuild() = default;
~TbeKernelBuild() = default;
static size_t GetIOSizeImpl(const nlohmann::json &desc);
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_TBE_TBE_KERNEL_BUILD_H_
|
mindspore-ai/mindspore
|
mindspore/lite/src/delegate/tensorrt/tensorrt_subgraph.h
|
<filename>mindspore/lite/src/delegate/tensorrt/tensorrt_subgraph.h
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_TENSORRT_SUB_GTAPH_
#define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_TENSORRT_SUB_GTAPH_
#include <utility>
#include <set>
#include <string>
#include <vector>
#include <memory>
#include "include/api/kernel.h"
#include "src/delegate/tensorrt/tensorrt_runtime.h"
#include "src/delegate/tensorrt/tensorrt_utils.h"
#include "include/api/context.h"
namespace mindspore::lite {
using mindspore::lite::RET_ERROR;
using mindspore::lite::RET_OK;
class TensorRTSubGraph : public kernel::Kernel {
public:
TensorRTSubGraph(std::vector<TensorRTOp *> ops, const std::vector<mindspore::MSTensor> &inputs,
const std::vector<mindspore::MSTensor> &outputs, const mindspore::Context *ctx,
std::shared_ptr<GPUDeviceInfo> device_info, TensorRTRuntime *runtime, bool support_hw_resize)
: kernel::Kernel(inputs, outputs, nullptr, ctx),
all_ops_(std::move(ops)),
device_info_(device_info),
runtime_(runtime) {
trt_specific_weight_nodes_ = {
schema::PrimitiveType_Conv2DFusion, schema::PrimitiveType_ReduceFusion, schema::PrimitiveType_Transpose,
schema::PrimitiveType_Gather, schema::PrimitiveType_Reshape, schema::PrimitiveType_PowFusion,
schema::PrimitiveType_AddFusion, schema::PrimitiveType_DivFusion, schema::PrimitiveType_SubFusion,
schema::PrimitiveType_MatMul, schema::PrimitiveType_PowFusion, schema::PrimitiveType_Eltwise,
schema::PrimitiveType_ScaleFusion, schema::PrimitiveType_MulFusion, schema::PrimitiveType_StridedSlice,
schema::PrimitiveType_PadFusion, schema::PrimitiveType_FullConnection};
if (!support_hw_resize) {
input_hw_index_ = -1;
}
}
~TensorRTSubGraph() override;
int Prepare() override;
int Execute() override;
int ReSize();
int BuildTensorRTGraph();
int Init();
private:
int BuildEngine();
int SetDeviceConfig();
bool SupportFP16();
nvinfer1::ITensor *SetTensorRTNetworkInput(const mindspore::MSTensor &in_tensor);
ITensorHelper FindTensorRTInputs(TensorRTOp *cur_op, const mindspore::MSTensor &in_tensor);
int MarkOutputs();
std::vector<TensorRTOp *> all_ops_{};
// subgraph input nodes.
std::vector<TensorRTOp *> in_ops_{};
// subgraph output nodes.
std::vector<TensorRTOp *> out_ops_{};
void **tensor_bindings_{nullptr};
std::shared_ptr<GPUDeviceInfo> device_info_{nullptr};
TensorRTRuntime *runtime_{nullptr}; // all subgraph in one delegate share a runtime_
std::set<mindspore::schema::PrimitiveType> trt_specific_weight_nodes_;
// save in/out tensor name for subgraph isolate.
std::vector<std::string> trt_in_tensor_name_;
std::vector<std::string> trt_out_tensor_name_;
nvinfer1::INetworkDefinition *network_{nullptr};
nvinfer1::IBuilderConfig *config_{nullptr};
nvinfer1::ICudaEngine *engine_{nullptr};
nvinfer1::IExecutionContext *trt_context_{nullptr};
nvinfer1::IOptimizationProfile *profile_{nullptr};
int input_batchsize_index_{0};
int output_batchsize_index_{0};
// -1 means don't support hw resize
int input_hw_index_{0};
};
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_TENSORRT_SUB_GTAPH_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/kernel_compiler/cpu/rl/buffer_get_cpu_kernel.h
|
<gh_stars>1000+
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_BUFFER_GET_CPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_BUFFER_GET_CPU_KERNEL_H_
#include <memory>
#include <string>
#include <vector>
#include <algorithm>
#include "backend/kernel_compiler/cpu/cpu_kernel.h"
#include "backend/kernel_compiler/cpu/cpu_kernel_factory.h"
namespace mindspore {
namespace kernel {
class BufferCPUGetKernel : public CPUKernel {
public:
BufferCPUGetKernel() : element_nums_(0), capacity_(0) {}
~BufferCPUGetKernel() override = default;
void Init(const CNodePtr &kernel_node) {
auto shapes = AnfAlgo::GetNodeAttr<std::vector<int64_t>>(kernel_node, "buffer_elements");
auto types = AnfAlgo::GetNodeAttr<std::vector<TypePtr>>(kernel_node, "buffer_dtype");
capacity_ = AnfAlgo::GetNodeAttr<int64_t>(kernel_node, "capacity");
element_nums_ = shapes.size();
for (size_t i = 0; i < element_nums_; i++) {
exp_element_list.push_back(shapes[i] * UnitSizeInBytes(types[i]->type_id()));
}
// buffer size
for (auto i : exp_element_list) {
input_size_list_.push_back(i * capacity_);
output_size_list_.push_back(i);
}
// count, head, index
input_size_list_.push_back(sizeof(int));
input_size_list_.push_back(sizeof(int));
input_size_list_.push_back(sizeof(int));
}
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs) {
auto count_addr = GetDeviceAddress<int>(inputs, element_nums_);
auto head_addr = GetDeviceAddress<int>(inputs, element_nums_ + 1);
auto index_addr = GetDeviceAddress<int>(inputs, element_nums_ + 2);
int index = index_addr[0];
if (index_addr[0] < 0) index += count_addr[0];
if (!(index >= 0 && index < count_addr[0])) {
MS_LOG(ERROR) << "The index " << index_addr[0] << " is out of range:[ " << -1 * count_addr[0] << ", "
<< count_addr[0] << ").";
}
int t = count_addr[0] - head_addr[0];
if (index < t) {
index += head_addr[0];
} else {
index -= t;
}
auto task = [&](size_t start, size_t end) {
for (size_t i = start; i < end; i++) {
auto buffer_addr = GetDeviceAddress<unsigned char>(inputs, i);
auto item_addr = GetDeviceAddress<unsigned char>(outputs, i);
size_t one_exp_len = output_size_list_[i];
size_t dist_len = one_exp_len;
if (memcpy_s(item_addr, one_exp_len, buffer_addr + IntToSize(index) * one_exp_len, dist_len) != EOK) {
MS_LOG(EXCEPTION) << "Launch kernel error: memcpy failed";
}
}
};
CPUKernelUtils::ParallelFor(task, element_nums_);
return true;
}
void InitKernel(const CNodePtr &kernel_node) { return; }
protected:
void InitSizeLists() { return; }
private:
size_t element_nums_;
int64_t capacity_;
std::vector<size_t> exp_element_list;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_RL_BUFFER_GET_CPU_KERNEL_H_
|
mindspore-ai/mindspore
|
mindspore/lite/tools/converter/parser/parser_utils.h
|
<reponame>mindspore-ai/mindspore<gh_stars>1000+
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_PARSER_UTILS_H
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_PARSER_UTILS_H
#include <set>
#include <vector>
#include "include/registry/model_parser.h"
#include "ir/anf.h"
#include "ir/func_graph.h"
#include "src/common/log_adapter.h"
#include "schema/inner/model_generated.h"
namespace mindspore {
namespace lite {
void GetAllFuncGraph(const FuncGraphPtr &func_graph, std::set<FuncGraphPtr> *all_func_graphs);
int CommonAnfAdjust(const std::set<FuncGraphPtr> &all_func_graphs);
int GetTransposePerm(schema::Format src_format, schema::Format dst_format, std::vector<int> *perm);
int GetTransposePermSharing(schema::Format src_format, schema::Format dst_format, std::vector<int> *perm);
AnfNodePtr GetRealConvWeightNode(const FuncGraphPtr &graph, const CNodePtr &cnode, size_t index);
int UnifyConvWeightFormat(const FuncGraphPtr &graph, const CNodePtr &cnode, schema::Format src_format,
schema::Format dst_format, std::set<AnfNodePtr> *has_visited);
int UnifyVariableConvWeight(const FuncGraphPtr &graph, const AnfNodePtr &weight_node, schema::Format src_format,
schema::Format dst_format, std::set<AnfNodePtr> *has_visited);
int UnifyConstConvWeight(const FuncGraphPtr &graph, const AnfNodePtr &weight_node, schema::Format src_format,
schema::Format dst_format, std::set<AnfNodePtr> *has_visited);
int HandleConstConvWeightShared(const FuncGraphPtr &graph, const AnfNodePtr &weight_node, schema::Format src_format,
schema::Format dst_format, std::set<AnfNodePtr> *has_visited);
template <class T>
converter::ModelParser *LiteModelParserCreator() {
auto *parser = new (std::nothrow) T();
if (parser == nullptr) {
MS_LOG(ERROR) << "new model parser failed";
return nullptr;
}
return parser;
}
} // namespace lite
} // namespace mindspore
#endif
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/optimizer/pass/communication_op_fusion.h
|
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_PASS_COMMUNICATION_OP_FUSION_H_
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_PASS_COMMUNICATION_OP_FUSION_H_
#include <utility>
#include <vector>
#include <string>
#include "backend/optimizer/common/pass.h"
#include "ir/func_graph.h"
#include "ir/anf.h"
#include "utils/utils.h"
namespace mindspore {
namespace opt {
struct CommunicationOpInfo {
std::vector<CNodePtr> communication_op_nodes;
std::vector<float> input_grad_size;
std::vector<float> input_grad_time;
};
class CommunicationOpFusion : public Pass {
public:
explicit CommunicationOpFusion(const std::string &name, std::string op_name, size_t groups = 1)
: Pass(name), op_name_(std::move(op_name)), groups_(groups) {}
~CommunicationOpFusion() override = default;
bool Run(const FuncGraphPtr &graph) override;
private:
bool DoFusion(const FuncGraphPtr &func_graph, const CommunicationOpInfo &communication_op_info, size_t segment_num,
const std::vector<size_t> &segment_index) const;
AnfNodePtr CreateFusedCommunicationOp(const FuncGraphPtr &func_graph,
const CommunicationOpInfo &communication_op_info, size_t start_index,
size_t end_index) const;
bool GetSplitSegments(const CommunicationOpInfo &communication_op_info, size_t *segment_num,
std::vector<size_t> *segment_index, const std::string &group) const;
std::string op_name_;
size_t groups_ = 1;
};
class SendFusion : public CommunicationOpFusion {
public:
explicit SendFusion(size_t groups = 1) : CommunicationOpFusion("send_fusion", kHcomSendOpName, groups) {}
~SendFusion() override = default;
};
class RecvFusion : public CommunicationOpFusion {
public:
explicit RecvFusion(size_t groups = 1) : CommunicationOpFusion("recv_fusion", kReceiveOpName, groups) {}
~RecvFusion() override = default;
};
class AllReduceFusion : public CommunicationOpFusion {
public:
explicit AllReduceFusion(size_t groups = 1) : CommunicationOpFusion("all_reduce_fusion", kAllReduceOpName, groups) {}
~AllReduceFusion() override = default;
};
class AllGatherFusion : public CommunicationOpFusion {
public:
explicit AllGatherFusion(size_t groups = 1) : CommunicationOpFusion("all_gather_fusion", kAllGatherOpName, groups) {}
~AllGatherFusion() override = default;
};
class BroadcastFusion : public CommunicationOpFusion {
public:
explicit BroadcastFusion(size_t groups = 1) : CommunicationOpFusion("broadcast_fusion", kBroadcastOpName, groups) {}
~BroadcastFusion() override = default;
};
class ReduceScatterFusion : public CommunicationOpFusion {
public:
explicit ReduceScatterFusion(size_t groups = 1)
: CommunicationOpFusion("reduce_scatter_fusion", kReduceScatterOpName, groups) {}
~ReduceScatterFusion() override = default;
};
} // namespace opt
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_PASS_COMMUNICATION_OP_FUSION_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/runtime/device/gpu/gpu_kernel_runtime.h
|
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_RUNTIME_DEVICE_GPU_GPU_KERNEL_RUNTIME_H_
#define MINDSPORE_CCSRC_RUNTIME_DEVICE_GPU_GPU_KERNEL_RUNTIME_H_
#include <string>
#include <map>
#include <memory>
#include <vector>
#include <set>
#include <utility>
#include <unordered_map>
#include <unordered_set>
#include "backend/session/anf_runtime_algorithm.h"
#include "runtime/device/kernel_runtime.h"
#include "runtime/device/kernel_runtime_manager.h"
#include "backend/optimizer/mem_reuse/mem_swap_manager.h"
#include "backend/optimizer/mem_reuse/mem_reuse.h"
namespace mindspore {
namespace device {
namespace gpu {
using mindspore::device::memswap::MemSwapManagerPtr;
using mindspore::memreuse::MemReuseUtilPtr;
class GPUKernelRuntime : public KernelRuntime {
public:
GPUKernelRuntime() = default;
~GPUKernelRuntime() override = default;
bool Init() override;
void ReleaseDeviceRes() override;
void ClearGraphRuntimeResource(uint32_t graph_id) override;
void AssignMemory(const session::KernelGraph &graph) override;
bool Run(const session::KernelGraph &graph, bool is_task_sink) override;
bool GenDynamicKernel(const session::KernelGraph &graph) override { return true; }
bool RunDynamicKernelAsync(const session::KernelGraph &graph) override { return true; }
DeviceAddressType GetTargetDeviceAddressType() const override { return DeviceAddressType::kGPU; }
std::shared_ptr<DeviceEvent> CreateDeviceEvent() override;
void *compute_stream() const override { return stream_; }
void *communication_stream() const override { return communication_stream_; }
void SetAddrInvalid(const DeviceAddressPtr &addr) { addr_state_.insert(addr); }
protected:
DeviceAddressPtr CreateDeviceAddress(void *device_ptr, size_t device_size, const string &format,
TypeId type_id) const override;
DeviceAddressPtr CreateDeviceAddress(void *device_ptr, size_t device_size, const string &format, TypeId type_id,
const KernelWithIndex &node_index) const override;
bool SyncStream() override;
bool MemcpyAsync(void *dst, const void *src, uint64_t size, int32_t kind) override;
private:
GPUKernelRuntime(const GPUKernelRuntime &);
GPUKernelRuntime &operator=(const GPUKernelRuntime &);
bool InitDevice();
bool device_init_{false};
// The related functions and members for using dynamic memory pool.
void InitKernelRefCount(const session::KernelGraph *graph);
void InitKernelOutputAddress(const session::KernelGraph *graph);
void InitKernelWorkspaceAddress(const session::KernelGraph *graph);
void InitMemorySwapInfo(const session::KernelGraph *graph);
void SaveGraphOutputNode(const session::KernelGraph *graph);
bool IsGraphOutput(const session::KernelGraph *graph, const mindspore::AnfNodePtr &kernel) const;
void ClearKernelOutputAddress(const session::KernelGraph *graph);
void ClearKernelWorkspaceAddress(const session::KernelGraph *graph);
void ClearKernelOldOutputAndWorkspace(const session::KernelGraph *graph);
bool RunOneStep(const session::KernelGraph *graph);
bool SearchMemSwapScheme(const session::KernelGraph *graph);
bool RefineMemSwapScheme(const session::KernelGraph *graph);
bool LaunchKernelDynamic(const session::KernelGraph *graph, bool mock = false, bool profiling = false);
bool RunOpLaunchKernelDynamic(const session::KernelGraph *graph);
void LaunchKernelWithTimeProfiling(const AnfNodePtr &kernel, const AddressPtrList &inputs,
const AddressPtrList &workspace, const AddressPtrList &outputs);
bool AttemptMallocMem(const DeviceAddressPtr &device_address, size_t size, bool mock);
bool AllocKernelDynamicRes(const mindspore::kernel::KernelMod &kernel_mod, const mindspore::AnfNodePtr &kernel,
AddressPtrList *kernel_inputs, AddressPtrList *kernel_workspaces,
AddressPtrList *kernel_outputs, bool mock);
bool AllocKernelInputDynamicRes(const mindspore::AnfNodePtr &kernel, AddressPtrList *kernel_inputs, bool mock);
bool AllocKernelOutputDynamicRes(const mindspore::kernel::KernelMod &kernel_mod, const mindspore::AnfNodePtr &kernel,
AddressPtrList *kernel_outputs, bool mock);
bool AllocKernelWorkspaceDynamicRes(const mindspore::kernel::KernelMod &kernel_mod,
const mindspore::AnfNodePtr &kernel, AddressPtrList *kernel_workspaces,
bool mock);
void AllocCommunicationOpDynamicRes(const session::KernelGraph *graph);
void AllocCommunicationOpInputDynamicRes(const mindspore::AnfNodePtr &kernel);
void AllocCommunicationOpOutputDynamicRes(const mindspore::AnfNodePtr &kernel);
void AllocCommunicationOpMemory(bool is_need_alloc_memory, bool is_need_free_memory,
const DeviceAddressPtrList addr_list, size_t total_size,
std::vector<size_t> size_list);
void FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel);
bool UpdateMemorySwapTask(const AnfNodePtr &kernel, bool mock, bool profiling);
bool AddMemorySwapTask(const AnfNodePtr &kernel, bool mock, bool profiling);
void UpdateHostSwapInQueue(const DeviceAddressPtr device_address, bool mock);
void UpdateHostSwapOutQueue(bool mock);
void ClearSwapInfo(bool mock);
void AllocInplaceNodeMemory(const session::KernelGraph *graph);
bool IsDistributedTraining(const session::KernelGraph *graph);
void FetchMemUnitSize(const session::KernelGraph *graph);
DeviceAddressPtr GetPrevNodeMutableOutputAddr(const AnfNodePtr &node, size_t i, bool visit_nop_node);
DeviceAddressPtr GetMutableOutputAddr(const AnfNodePtr &node, size_t i, bool visit_nop_node);
session::KernelWithIndex GetPrevNodeOutput(const AnfNodePtr &node, size_t i);
void LaunchKernelWithoutMock(const session::KernelGraph *graph, const AnfNodePtr &kernel,
const AddressPtrList &inputs, const AddressPtrList &workspaces,
const AddressPtrList &outputs, bool profiling);
std::unordered_map<uint32_t, MemReuseUtilPtr> mem_reuse_util_map_;
std::unordered_map<uint32_t, MemSwapManagerPtr> mem_swap_map_;
std::unordered_map<uint32_t, bool> is_first_step_map_;
std::unordered_map<uint32_t, std::set<AnfNodePtr>> graph_output_map_;
std::unordered_map<uint32_t, bool> is_alloc_communication_res_;
std::unordered_map<uint32_t, bool> is_alloc_inplace_res_;
MemReuseUtilPtr mem_reuse_util_{nullptr};
MemSwapManagerPtr mem_swap_manager_{nullptr};
bool enable_relation_cache_{false};
std::unordered_set<DeviceAddressPtr> addr_state_;
std::unordered_map<AnfNodePtr, std::vector<session::KernelWithIndex>> prev_node_mut_output_addr_cache_;
std::unordered_map<AnfNodePtr, std::vector<session::KernelWithIndex>> prev_node_mut_output_addr_skip_nop_node_cache_;
std::unordered_map<AnfNodePtr, std::vector<DeviceAddressPtr>> mut_output_addr_cache_;
std::unordered_map<AnfNodePtr, std::vector<DeviceAddressPtr>> mut_output_addr_skip_nop_node_cache_;
std::unordered_map<AnfNodePtr, std::vector<session::KernelWithIndex>> prev_node_output_cache_;
};
MS_REG_KERNEL_RUNTIME(kGPUDevice, GPUKernelRuntime);
} // namespace gpu
} // namespace device
} // namespace mindspore
#endif // MINDSPORE_CCSRC_RUNTIME_DEVICE_GPU_GPU_KERNEL_RUNTIME_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/slice_gpu_kernel.h
|
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_ARRAYS_SLICE_GPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_ARRAYS_SLICE_GPU_KERNEL_H_
#include <vector>
#include <utility>
#include <algorithm>
#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "backend/kernel_compiler/gpu/cuda_impl/slice_impl.cuh"
namespace mindspore {
namespace kernel {
template <typename T>
class SliceGpuFwdKernel : public GpuKernel {
public:
SliceGpuFwdKernel() : is_null_input_(false), input_size_(0), output_size_(0), workspace_size_(0) {}
~SliceGpuFwdKernel() override = default;
const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; }
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
if (is_null_input_) {
return true;
}
T *input = GetDeviceAddress<T>(inputs, 0);
T *output = GetDeviceAddress<T>(outputs, 0);
size_t input_rank = input_shape_.size();
switch (input_rank) {
case 1:
Slice1DKernel(begin_[0], size_[0], input_shape_[0], input, output, reinterpret_cast<cudaStream_t>(stream_ptr));
break;
case 2:
Slice2DKernel(begin_[0], begin_[1], size_[0], size_[1], input_shape_[0], input_shape_[1], input, output,
reinterpret_cast<cudaStream_t>(stream_ptr));
break;
case 3:
Slice3DKernel(begin_[0], begin_[1], begin_[2], size_[0], size_[1], size_[2], input_shape_[0], input_shape_[1],
input_shape_[2], input, output, reinterpret_cast<cudaStream_t>(stream_ptr));
break;
case 4:
Slice4DKernel(begin_[0], begin_[1], begin_[2], begin_[3], size_[0], size_[1], size_[2], size_[3],
input_shape_[0], input_shape_[1], input_shape_[2], input_shape_[3], input, output,
reinterpret_cast<cudaStream_t>(stream_ptr));
break;
case 5:
Slice5DKernel(begin_[0], begin_[1], begin_[2], begin_[3], begin_[4], size_[0], size_[1], size_[2], size_[3],
size_[4], input_shape_[0], input_shape_[1], input_shape_[2], input_shape_[3], input_shape_[4],
input, output, reinterpret_cast<cudaStream_t>(stream_ptr));
break;
case 6:
Slice6DKernel(begin_[0], begin_[1], begin_[2], begin_[3], begin_[4], begin_[5], size_[0], size_[1], size_[2],
size_[3], size_[4], size_[5], input_shape_[0], input_shape_[1], input_shape_[2], input_shape_[3],
input_shape_[4], input_shape_[5], input, output, reinterpret_cast<cudaStream_t>(stream_ptr));
break;
case 7:
Slice7DKernel(begin_[0], begin_[1], begin_[2], begin_[3], begin_[4], begin_[5], begin_[6], size_[0], size_[1],
size_[2], size_[3], size_[4], size_[5], size_[6], input_shape_[0], input_shape_[1],
input_shape_[2], input_shape_[3], input_shape_[4], input_shape_[5], input_shape_[6], input,
output, reinterpret_cast<cudaStream_t>(stream_ptr));
break;
default:
MS_LOG(EXCEPTION) << "gpu Slice operator does not support inputs with rank >= " << input_rank << ".";
}
return true;
}
bool Init(const CNodePtr &kernel_node) override {
if (!CheckParam(kernel_node)) {
return false;
}
auto input_shape = AnfAlgo::GetInputDeviceShape(kernel_node, 0);
auto out_shape = AnfAlgo::GetOutputDeviceShape(kernel_node, 0);
is_null_input_ = CHECK_NULL_INPUT(input_shape) || CHECK_NULL_INPUT(out_shape);
if (is_null_input_) {
MS_LOG(WARNING) << "For 'SliceGpuKernel', input or output is null";
InitSizeLists();
return true;
}
(void)std::transform(input_shape.begin(), input_shape.end(), std::back_inserter(input_shape_),
[](const int64_t &e) { return static_cast<int32_t>(e); });
input_size_ = sizeof(T);
for (size_t x : input_shape) {
input_size_ *= x;
}
output_size_ = sizeof(T);
for (size_t x : out_shape) {
output_size_ *= x;
}
// transpose begin and size for NHWC data
auto data_format = AnfAlgo::GetInputFormat(kernel_node, 0);
if (data_format == "NHWC") {
std::swap(begin_[1], begin_[3]);
std::swap(begin_[1], begin_[2]);
std::swap(size_[1], size_[3]);
std::swap(size_[1], size_[2]);
} else if (data_format == "NDHWC") {
std::swap(begin_[1], begin_[4]);
std::swap(begin_[1], begin_[3]);
std::swap(begin_[1], begin_[2]);
std::swap(size_[1], size_[4]);
std::swap(size_[1], size_[3]);
std::swap(size_[1], size_[2]);
}
InitSizeLists();
return true;
}
protected:
void InitSizeLists() override {
input_size_list_.push_back(input_size_);
output_size_list_.push_back(output_size_);
}
private:
bool CheckParam(const CNodePtr &kernel_node) {
size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node);
if (input_num != 1) {
MS_LOG(ERROR) << "Input number is " << input_num << ", but SliceGpuFwdKernel needs 1 inputs.";
return false;
}
size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node);
if (output_num != 1) {
MS_LOG(ERROR) << "Output number is " << output_num << ", but SliceGpuFwdKernel needs 1 output.";
return false;
}
auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
if (input_shape.size() > 7) {
MS_LOG(ERROR) << "Input dims is " << input_shape.size() << ", but SliceGpuFwdKernel olny support 7d or lower.";
return false;
}
if (input_shape.size() == 0) {
MS_LOG(ERROR) << "Input dims is " << input_shape.size() << ", scalar is not supported.";
return false;
}
auto size = GetAttr<std::vector<int64_t>>(kernel_node, "size");
auto begin = GetAttr<std::vector<int64_t>>(kernel_node, "begin");
for (size_t i = 0; i < input_shape.size(); i++) {
if (i >= size.size() || input_shape[i] <= 0 || size[i] <= 0) {
MS_LOG(WARNING) << "Slice output is null.";
is_null_input_ = true;
}
}
(void)std::transform(size.begin(), size.end(), std::back_inserter(size_),
[](const int64_t &e) { return static_cast<int32_t>(e); });
(void)std::transform(begin.begin(), begin.end(), std::back_inserter(begin_),
[](const int64_t &e) { return static_cast<int32_t>(e); });
return true;
}
// use int32_t, a smaller type than the typical size_t, so that we can add higher
// dimension later on. cuda kernel arguments' total size cannot exceed 256 bytes
std::vector<int32_t> begin_;
std::vector<int32_t> size_;
std::vector<int32_t> input_shape_;
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
bool is_null_input_;
size_t input_size_;
size_t output_size_;
size_t workspace_size_;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_ARRAYS_SLICE_GPU_KERNEL_H_
|
mindspore-ai/mindspore
|
mindspore/lite/src/lite_kernel_util.h
|
<reponame>mindspore-ai/mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_LITE_KERNEL_UTIL_H_
#define MINDSPORE_LITE_SRC_LITE_KERNEL_UTIL_H_
#include <vector>
#include <set>
#include "src/lite_kernel.h"
namespace mindspore::kernel {
class LiteKernelUtil {
public:
static std::vector<kernel::LiteKernel *> SubgraphInputNodes(const std::vector<kernel::LiteKernel *> &kernels);
static std::vector<kernel::LiteKernel *> SubgraphOutputNodes(const std::vector<kernel::LiteKernel *> &kernels);
static std::vector<lite::Tensor *> SubgraphInputTensors(const std::vector<kernel::LiteKernel *> &kernels);
static std::vector<lite::Tensor *> SubgraphOutputTensors(const std::vector<kernel::LiteKernel *> &kernels);
static int TopologicalSortKernels(std::vector<kernel::LiteKernel *> *kernels);
static void InitTensorInitRefCount(const std::vector<kernel::LiteKernel *> &kernels);
static int SetInput(const LiteKernel &kernelMod, const std::vector<lite::Tensor *> &inputs);
#ifndef CONTROLFLOW_TENSORLIST_CLIP
static bool IsSwitchCall(kernel::LiteKernel *kernel);
#endif
static kernel::LiteKernel *GetInputsSpecificNode(const kernel::LiteKernel *kernel,
const schema::PrimitiveType &primitive_type);
static bool InputsContainsSpecificNode(const kernel::LiteKernel *kernel, const schema::PrimitiveType &primitive_type);
// find in_kernels_ and out_kernels of kernel, sub_graph and nodes_ in sub_graph
static void FindAllInoutKernels(const std::vector<kernel::LiteKernel *> &kernels);
private:
static std::set<lite::Tensor *> AllOutTensor(const std::vector<kernel::LiteKernel *> &kernels);
};
} // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_LITE_KERNEL_UTIL_H_
|
mindspore-ai/mindspore
|
mindspore/lite/tools/common/meta_graph_utils.h
|
<gh_stars>1000+
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_TOOLS_COMMON_META_GRAPH_UTIL_H
#define MINDSPORE_LITE_TOOLS_COMMON_META_GRAPH_UTIL_H
#include <vector>
#include "inner/model_generated.h"
#include "include/errorcode.h"
namespace mindspore::lite {
std::vector<size_t> GetLinkedPreIdx(const schema::MetaGraphT &graphT, const size_t &tensorIdx);
std::vector<size_t> GetLinkedPostIdx(const schema::MetaGraphT &graphT, const size_t &tensorIdx);
std::vector<size_t> GetInputNodeIdx(const schema::MetaGraphT &graphT, const schema::CNodeT &node,
int inputIndexIdx = -1);
std::vector<size_t> GetInputNodeIdx(const schema::MetaGraphT &graphT, const size_t &nodeIdx, int inputIndexIdx = -1);
std::vector<size_t> GetOutputNodeIdx(const schema::MetaGraphT &graphT, const schema::CNodeT &node,
int outputIndexIdx = -1);
std::vector<size_t> GetOutputNodeIdx(const schema::MetaGraphT &graphT, const size_t &nodeIdx, int outputIndexIdx = -1);
STATUS IsolateNode(schema::MetaGraphT *subGraph, schema::CNodeT *node);
STATUS RemoveTensor(schema::MetaGraphT *graphT, std::vector<uint32_t> toDeleteTensorIdxes, bool forceDelete = false);
void ReplaceOutput(const uint32_t &old_index, const uint32_t &new_index, schema::MetaGraphT *graphT);
STATUS UpdateNodeIndex(schema::CNodeT *node, uint32_t deleteIdx);
STATUS IsolateOneWayNode(schema::MetaGraphT *graphT, size_t subGraphIdx, size_t nodeIdx, bool removeTensor = true);
STATUS IsolateOneWayNode(schema::MetaGraphT *graphT, schema::CNodeT *node, bool removeTensor = true);
STATUS IsolateOneWayNode(schema::MetaGraphT *graphT, size_t nodeIdx, bool removeTensor = true);
STATUS SetSubgraphTensorIndices(schema::MetaGraphT *meta_graphT);
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_TOOLS_COMMON_META_GRAPH_UTIL_H
|
mindspore-ai/mindspore
|
mindspore/lite/src/cxx_api/converters.h
|
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_CXX_API_CONVERTERS_H_
#define MINDSPORE_LITE_SRC_CXX_API_CONVERTERS_H_
#include <limits.h>
#include "include/api/status.h"
#include "include/api/types.h"
#include "include/lite_types.h"
#include "src/inner_context.h"
namespace mindspore {
namespace lite {
struct Context;
class TrainCfg;
} // namespace lite
class Context;
class TrainCfg;
inline lite::QuantizationType A2L_ConvertQT(mindspore::QuantizationType qt) {
if (qt == kNoQuant) {
return lite::QT_NONE;
}
if (qt == kWeightQuant) {
return lite::QT_WEIGHT;
}
return lite::QT_DEFAULT;
}
inline lite::CpuBindMode A2L_ConvertAffinityMode(int affinity_mode) {
switch (affinity_mode) {
case 0:
return lite::NO_BIND;
case 1:
return lite::HIGHER_CPU;
case 2:
return lite::MID_CPU;
default:
return lite::NO_BIND;
}
}
inline bool IsAffinityModeValid(int affinity_mode) {
return affinity_mode >= lite::NO_BIND && affinity_mode <= lite::MID_CPU;
}
Status A2L_ConvertContext(Context *a_context, lite::InnerContext *l_context);
Status A2L_ConvertConfig(const TrainCfg *a_train_cfg, lite::TrainCfg *l_train_cfg);
} // namespace mindspore
#endif // MINDSPORE_LITE_SRC_CXX_API_CONVERTERS_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/profiler/device/ascend/ascend_profiling.h
|
<filename>mindspore/ccsrc/profiler/device/ascend/ascend_profiling.h<gh_stars>1000+
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PROFILER_DEVICE_ASCEND_PROFILING_H
#define MINDSPORE_CCSRC_PROFILER_DEVICE_ASCEND_PROFILING_H
#include <string>
#include <memory>
#include "profiler/device/profiling.h"
namespace mindspore {
namespace profiler {
namespace ascend {
class AscendProfiler : public Profiler {
public:
static std::shared_ptr<AscendProfiler> &GetInstance();
AscendProfiler() : profiling_options_("") {}
~AscendProfiler() = default;
AscendProfiler(const AscendProfiler &) = delete;
AscendProfiler &operator=(const AscendProfiler &) = delete;
void Init(const std::string &profileDataPath) { return; }
void Stop();
void StepProfilingEnable(const bool enable_flag) override;
void OpDataProducerEnd() { return; }
void Start(const std::string &profiling_options);
bool GetProfilingEnableFlag() const { return enable_flag_; }
std::string GetProfilingOptions() const { return profiling_options_; }
void SaveProfileData() { return; }
void ClearInst() { return; }
private:
static std::shared_ptr<AscendProfiler> ascend_profiler_;
std::string profiling_options_;
};
} // namespace ascend
} // namespace profiler
} // namespace mindspore
#endif
|
mindspore-ai/mindspore
|
mindspore/lite/tools/converter/quantizer/full_quant_quantizer.h
|
/**
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_QUANTIZER_FULL_QUANT_QUANTIZER_H
#define MINDSPORE_LITE_TOOLS_CONVERTER_QUANTIZER_FULL_QUANT_QUANTIZER_H
#include <string>
#include <memory>
#include <unordered_map>
#include <utility>
#include <vector>
#include <cfloat>
#include <map>
#include "ops/primitive_c.h"
#include "schema/inner/model_generated.h"
#include "src/lite_session.h"
#include "tools/converter/quantizer/quantizer.h"
#include "tools/converter/converter.h"
#include "include/ms_tensor.h"
#include "tools/converter/quantizer/quantize_util.h"
#include "tools/converter/quantizer/quant_params.h"
#include "tools/converter/preprocess/preprocess_param.h"
namespace mindspore::lite::quant {
class Calibrator;
constexpr int kDefaultBinNumber = 2048;
struct DivergInfo {
std::vector<float> histogram;
CNodePtr cnode;
int bin_num = 0;
float interval = 0;
float max = 0.0f;
float min = 0.0f;
float best_T = 0.0f;
size_t bit_num = 0;
int quant_max = 255;
int quant_min = 0;
ActivationQuantizedMethod activation_quant_method = MAX_MIN;
std::vector<float> min_datas;
std::vector<float> max_datas;
std::pair<float, float> percent_result{0.0, 0.0};
float scale_tmp = 0;
DivergInfo() = default;
DivergInfo(CNodePtr cnode, int bins, size_t bits, int quant_max, int quant_min,
ActivationQuantizedMethod activation_quant_method) {
this->activation_quant_method = activation_quant_method;
this->cnode = std::move(cnode);
this->bin_num = bins;
this->bit_num = bits;
histogram.resize(bin_num);
max = -FLT_MAX;
min = FLT_MAX;
this->quant_max = quant_max;
this->quant_min = quant_min;
std::fill(histogram.begin(), histogram.end(), 1.0e-7);
}
STATUS RecordMaxMinValue(const std::vector<float> &data);
STATUS RecordMaxMinValueArray(const std::vector<float> &data);
void UpdateInterval();
STATUS UpdateHistogram(const std::vector<float> &data);
void DumpHistogram();
void HandleBinForKL(int quant_bint_nums, int bin_index, std::vector<float> *quantized_histogram,
std::vector<float> *expanded_histogram);
STATUS ComputeThreshold();
std::pair<CNodePtr, float> GetScale();
std::pair<CNodePtr, int32_t> GetZeropoint();
};
class FullQuantQuantizer : public Quantizer {
public:
FullQuantQuantizer(FuncGraphPtr graph, int bit_num, TypeId target_type = kNumberTypeInt8, bool per_channel = true);
~FullQuantQuantizer() override;
STATUS DoQuantize(FuncGraphPtr func_graph) override;
size_t bit_num;
int quant_max{INT8_MAX};
int quant_min{INT8_MIN};
private:
bool per_channel_{true};
TypeId target_type_{kNumberTypeInt8};
std::unique_ptr<Calibrator> calibrator_{nullptr};
session::LiteSession *fp32_session_{nullptr};
Model *fp32_model_{nullptr};
session::LiteSession *int8_session_{nullptr};
Model *int8_model_{nullptr};
std::map<std::string, std::vector<float>> fp32_op_input_map; // concurrency
std::map<std::string, std::vector<float>> fp32_op_output_ch_mean_map; // concurrency
std::map<std::string, std::vector<float>> op_bias_diff_map; // only use by int8 model
std::mutex mutex_op_input;
std::mutex mutex_op_output;
enum OperationType {
STORE,
FETCH,
};
bool OpInputDataHandle(OperationType type, const string &op_name, std::vector<float> *data);
bool OpOutputChMeanDataHandle(OperationType type, const string &op_name, std::vector<float> *data);
const std::string kTypeConv2D = schema::EnumNamePrimitiveType(schema::PrimitiveType_Conv2DFusion);
const std::string kTypeDepthwiseConv2D = schema::EnumNamePrimitiveType(schema::PrimitiveType_Conv2DFusion);
const std::string kTypeConcat = schema::EnumNamePrimitiveType(schema::PrimitiveType_Concat);
STATUS PreProcess();
static STATUS CheckFp32TensorVec(const std::string &node_name,
const std::vector<mindspore::tensor::MSTensor *> &tensor_vec);
STATUS DoInference();
STATUS UpdateDivergeInterval();
STATUS CollectDataFrequency();
STATUS ComputeThreshold();
STATUS QuantNodeSimpleOp(const CNodePtr &cnode);
STATUS QuantNode();
STATUS SetInOutQuantParam(const AnfNodePtr &input_node, const std::unique_ptr<DivergInfo> &info,
const PrimitivePtr &primitive, bool is_input, size_t index) const;
STATUS DoWeightQuant(const std::string &op_name, const AnfNodePtr &weight, const PrimitivePtr &primitive,
bool per_channel, int input_index) const;
STATUS DoParameterNodeQuant(const CNodePtr &cnode, const AnfNodePtr &input_node, size_t input_index);
static STATUS DoBiasQuant(const AnfNodePtr &bias, const PrimitivePtr &primitive);
STATUS Int8Inference();
STATUS BiasCorrection(const FuncGraphPtr &func_graph);
STATUS BiasCorrection(const FuncGraphPtr &func_graph, const CNodePtr &cnode);
KernelCallBack GetBeforeCallBack(bool int8_op);
KernelCallBack GetAfterCallBack(bool int8_op);
KernelCallBack GetInt8AfterCallBack();
KernelCallBack GetFloatAfterCallBack();
};
class Calibrator {
public:
explicit Calibrator(size_t bit_num, int quant_max, int quant_min)
: bit_num_(bit_num), quant_max_(quant_max), quant_min_(quant_min) {}
~Calibrator() = default;
STATUS GenerateInputData(const std::string &input_name, size_t image_index,
mindspore::tensor::MSTensor *tensor) const;
size_t GetBatchNum() const { return data_pre_process_param_.calibrate_size; }
uint32_t GetThreadNum() const { return full_quant_param_.thread_num; }
bool GetBiasCorrection() const { return full_quant_param_.bias_correction; }
size_t GetInputNum() const { return data_pre_process_param_.calibrate_path_vector.size(); }
STATUS AddQuantizedOp(const CNodePtr &cnode);
static STATUS RecordMaxMinValue(const std::vector<float> &data, const std::unique_ptr<DivergInfo> &diverg_info);
static STATUS UpdateDivergInterval(
std::unordered_map<std::string, std::vector<std::unique_ptr<DivergInfo>>> *diverg_info);
static STATUS UpdateDataFrequency(const std::vector<float> &data, const std::unique_ptr<DivergInfo> &diverg_info);
STATUS ComputeThreshold();
std::unordered_map<std::string, std::vector<std::unique_ptr<DivergInfo>>> *GetInputDivergInfo();
std::unordered_map<std::string, std::vector<std::unique_ptr<DivergInfo>>> *GetOutputDivergInfo();
FullQuantParam full_quant_param_;
preprocess::DataPreProcessParam data_pre_process_param_;
private:
std::unordered_map<std::string, std::vector<std::unique_ptr<DivergInfo>>> inputs_diverg_info_;
std::unordered_map<std::string, std::vector<std::unique_ptr<DivergInfo>>> outputs_diverg_info_;
size_t bit_num_;
int quant_max_;
int quant_min_;
};
} // namespace mindspore::lite::quant
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_QUANTIZER_FULL_QUANT_QUANTIZER_H
|
mindspore-ai/mindspore
|
mindspore/ccsrc/debug/tensor_load.h
|
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_DEBUG_TENSOR_LOAD_H_
#define MINDSPORE_CCSRC_DEBUG_TENSOR_LOAD_H_
#include <memory>
#include <vector>
#include <map>
#include <mutex>
#include <tuple>
#include <string>
#include <utility>
#include <deque>
#include <algorithm>
#include "debug/tensor_data.h"
#ifdef ONLINE_DBG_MODE
#include "debug/data_dump/dump_json_parser.h"
namespace mindspore {
#endif
class TensorLoader {
public:
TensorLoader() : iter_num_(-1), mem_total_(0), mem_usage_(0) {}
~TensorLoader() { EmptyTensor(); }
void MoveTensorCurrentToPrev(std::string tensor_name) {
auto handle = tensor_list_map_.extract(tensor_name);
if (!handle.empty()) {
MS_LOG(INFO) << "Moving " << tensor_name << " from current map to previous map";
prev_tensor_list_map_.insert(std::move(handle));
}
}
void SwapCurrentPrev() { tensor_list_map_.swap(prev_tensor_list_map_); }
bool TensorExistsInCurrent(std::string tensor_name) const {
return tensor_list_map_.find(tensor_name) != tensor_list_map_.end();
}
// only parameters will return true
bool PrevTensorExistsInCurrent(std::string tensor_name) const { return TensorExistsInCurrent(tensor_name + ":prev"); }
void MoveParametersCurrentToPrev() {
MS_LOG(INFO) << "Moving parameters from current map to previous map";
auto iter = tensor_list_map_.begin();
while (iter != tensor_list_map_.end()) {
auto key = iter->first;
if (PrevTensorExistsInCurrent(key)) {
// :prev tensor only exists for parameter. Move it to prev
++iter;
MoveTensorCurrentToPrev(key);
} else {
++iter;
}
}
}
bool IsPrevTensor(std::string tensor_name) const {
const std::string suffix = ":prev";
if (tensor_name.length() <= suffix.length()) return false;
return std::equal(suffix.rbegin(), suffix.rend(), tensor_name.rbegin());
}
bool LoadNewTensor(std::shared_ptr<TensorData> tensor, bool keep_prev) {
lock_.lock();
auto tensor_name = tensor->GetName();
if (keep_prev) {
// add prev step tensor into current step map with ":prev" suffix
auto handle = prev_tensor_list_map_.extract(tensor_name);
if (!handle.empty()) {
handle.key() = tensor_name + ":prev";
tensor_list_map_.insert(std::move(handle));
}
}
std::string key_name = tensor_name;
#ifdef OFFLINE_DBG_MODE
key_name += (":" + std::to_string(tensor->GetDeviceId()) + ":" + std::to_string(tensor->GetRootGraphId()) + ":" +
std::to_string(tensor->GetIsOutput()) + ":" + std::to_string(tensor->GetSlot()));
if (tensor_list_map_.find(key_name) != tensor_list_map_.end() &&
tensor->GetIteration() == tensor_list_map_[key_name]->GetIteration() - 1) {
key_name += ":prev";
}
auto iter = tensor_list_map_.find(key_name);
if (iter != tensor_list_map_.end()) {
iter->second->DeleteDataPtr();
}
#endif
tensor_list_map_[key_name] = tensor; // use [] instead of insert to ensure latest value
lock_.unlock();
return true;
}
std::vector<std::shared_ptr<TensorData>> GetTensor() {
std::vector<std::shared_ptr<TensorData>> tensor_list;
for (auto &it : tensor_list_map_) {
if (!IsPrevTensor(it.first)) tensor_list.push_back(it.second);
}
return tensor_list;
}
std::shared_ptr<TensorData> GetTensor(const std::string &tensor_name) const {
auto iter = tensor_list_map_.find(tensor_name);
if (iter != tensor_list_map_.end()) return iter->second;
return nullptr;
}
std::shared_ptr<TensorData> GetPrevTensor(const std::string &tensor_name) {
if (tensor_list_map_.find(tensor_name + ":prev") != tensor_list_map_.end()) {
return tensor_list_map_[tensor_name + ":prev"];
}
return nullptr;
}
void SearchTensors(const std::vector<std::string> &search_list,
std::vector<std::tuple<std::string, std::shared_ptr<TensorData>>> *result_list) {
for (auto i : search_list) {
std::map<std::string, std::shared_ptr<TensorData>>::iterator iter;
iter = tensor_list_map_.find(i);
if (iter != tensor_list_map_.end()) {
result_list->push_back(std::make_tuple(i, iter->second));
} else {
result_list->push_back(std::make_tuple(i, nullptr));
}
}
}
void EmptyTensor() {
std::lock_guard<std::mutex> lg(lock_);
prev_tensor_list_map_.clear();
tensor_list_map_.swap(prev_tensor_list_map_);
}
void EmptyCurrentTensor() { tensor_list_map_.clear(); }
bool EnableMemoryControl() { return mem_total_ > 0; }
void AppendToCacheEvictQueue(const std::string &tensor_name) {
std::lock_guard<std::mutex> lk(mem_lock_);
if (std::find(cache_evict_queue_.begin(), cache_evict_queue_.end(), tensor_name) == cache_evict_queue_.end()) {
cache_evict_queue_.push_back(tensor_name);
evict_cond.notify_one();
}
}
bool CheckMemoryAvailable(const std::string &backend_name, const uint64_t data_size) {
// 1. Check if the tensor can fit in the entire limit. If not, don't attempt any read or evictions and generate
// warning.
if (data_size > mem_total_) {
MS_LOG(ERROR) << "Failed to load data of tensor " << backend_name << " because the its data size (" << data_size
<< ") exceeds the maximum memory limit (" << mem_total_ << ").";
return false;
}
// 2. Check if there's is enough cache space available for current tensor. If not, try evict cache.
bool ret = CheckAndEvictTensorCache(data_size);
return ret;
}
bool CheckAndEvictTensorCache(const uint64_t data_size) {
std::string candidate_name;
uint64_t candidates_size;
std::unique_lock<std::mutex> lk(mem_lock_);
while (data_size > mem_total_ - mem_usage_) {
// wait until there is any not-in-use candidate to be evicted from cache
evict_cond.wait(lk, [&] { return !cache_evict_queue_.empty(); });
candidate_name = cache_evict_queue_.front();
candidates_size = tensor_list_map_[candidate_name]->GetByteSize();
// evict candidate tensor
lock_.lock();
tensor_list_map_[candidate_name]->DeleteDataPtr();
tensor_list_map_.erase(candidate_name);
lock_.unlock();
cache_evict_queue_.pop_front();
mem_usage_ = std::max(uint64_t(0), mem_usage_ - candidates_size);
MS_LOG(INFO) << "Evict tensor: " << candidate_name;
}
// Reserve space for the current target tensor.
mem_usage_ = std::min(mem_total_, mem_usage_ + data_size);
return true;
}
void SetMemTotal(uint64_t total_mem_size) { this->mem_total_ = total_mem_size; }
#ifdef ONLINE_DBG_MODE
bool DumpTensorToFile(const std::string &tensor_name, bool trans_flag, const std::string &filepath,
const std::string &host_fmt, const std::vector<int64_t> &host_shape, TypeId host_type,
TypeId device_type, const std::string &addr_format, size_t slot) {
if (filepath.empty()) {
MS_LOG(ERROR) << "Dump file path is null!";
return false;
}
std::string path = "";
if (trans_flag) {
path = filepath + '.' + host_fmt;
} else {
path = filepath + '.' + addr_format;
}
MS_LOG(INFO) << "Dump path is " << path;
std::string tensor_loader_name = tensor_name + ":" + std::to_string(slot);
auto iter = tensor_list_map_.find(tensor_loader_name);
if (iter != tensor_list_map_.end()) {
std::shared_ptr<TensorData> node = iter->second;
size_t host_size = node->GetByteSize();
return DumpJsonParser::DumpToFile(path, node->GetDataPtr(), host_size, host_shape, host_type);
}
MS_LOG(INFO) << "Tensor name:" << tensor_name << " not found in tensor_list_map_";
return false;
}
#endif
private:
// the pair is (device_id, iteration)
std::map<std::string, std::shared_ptr<TensorData>> tensor_list_map_;
std::map<std::string, std::shared_ptr<TensorData>> prev_tensor_list_map_;
uint32_t iter_num_;
std::mutex lock_;
std::mutex mem_lock_;
uint64_t mem_total_;
uint64_t mem_usage_;
std::deque<std::string> cache_evict_queue_;
std::condition_variable evict_cond;
};
#ifdef ONLINE_DBG_MODE
} // namespace mindspore
#endif
#endif // MINDSPORE_CCSRC_DEBUG_TENSOR_LOAD_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/ps/core/communicator/tcp_communicator.h
|
<reponame>mindspore-ai/mindspore<gh_stars>1000+
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_PS_CORE_COMMUNICATOR_TCP_COMMUNICATOR_H_
#define MINDSPORE_CCSRC_PS_CORE_COMMUNICATOR_TCP_COMMUNICATOR_H_
#include <map>
#include <vector>
#include <string>
#include <memory>
#include <unordered_map>
#include "proto/ps.pb.h"
#include "ps/core/server_node.h"
#include "ps/core/cluster_metadata.h"
#include "ps/core/cluster_config.h"
#include "ps/ps_context.h"
#include "ps/core/communicator/task_executor.h"
#include "ps/core/communicator/communicator_base.h"
#include "ps/core/communicator/tcp_msg_handler.h"
#include "ps/core/comm_util.h"
#include "ps/constants.h"
namespace mindspore {
namespace ps {
namespace core {
const std::unordered_map<TcpUserCommand, std::string> kUserCommandToMsgType = {
{TcpUserCommand::kPush, "push"},
{TcpUserCommand::kPull, "pull"},
{TcpUserCommand::kCount, "count"},
{TcpUserCommand::kReachThreshold, "countReachThreshold"},
{TcpUserCommand::kResetCount, "resetCnt"},
{TcpUserCommand::kGetMetadata, "getMetadata"},
{TcpUserCommand::kUpdateMetadata, "updateMetadata"},
{TcpUserCommand::kCounterEvent, "counterEvent"},
{TcpUserCommand::kPullWeight, "pullWeight"},
{TcpUserCommand::kPushWeight, "pushWeight"},
{TcpUserCommand::kSyncIteration, "syncIteration"},
{TcpUserCommand::kNotifyLeaderToNextIter, "notifyLeaderToNextIter"},
{TcpUserCommand::kPrepareForNextIter, "prepareForNextIter"},
{TcpUserCommand::kProceedToNextIter, "proceedToNextIter"},
{TcpUserCommand::kEndLastIter, "endLastIter"},
{TcpUserCommand::kStartFLJob, "startFLJob"},
{TcpUserCommand::kUpdateModel, "updateModel"},
{TcpUserCommand::kGetModel, "getModel"},
{TcpUserCommand::kPushMetrics, "pushMetrics"},
{TcpUserCommand::kNewInstance, "newInstance"},
{TcpUserCommand::kQueryInstance, "queryInstance"},
{TcpUserCommand::kEnableFLS, "enableFLS"},
{TcpUserCommand::kDisableFLS, "disableFLS"}};
class TcpCommunicator : public CommunicatorBase {
public:
explicit TcpCommunicator(const std::shared_ptr<TaskExecutor> &task_executor, AbstractNode *node)
: task_executor_(task_executor),
server_num_(0),
worker_num_(0),
scheduler_ip_(""),
scheduler_port_(0),
abstrace_node_(node) {}
~TcpCommunicator() = default;
bool Start() override;
bool Stop() override;
void RegisterMsgCallBack(const std::string &msg_type, const MessageCallback &cb) override;
void RegisterEventCallback(const core::ClusterEvent &event, const EventCallback &event_cb);
template <class T>
bool SendPbRequest(const T &pb_msg, const uint32_t &rank_id, TcpUserCommand command,
std::shared_ptr<std::vector<unsigned char>> *output = nullptr) {
const std::string &msg_str = pb_msg.SerializeAsString();
std::shared_ptr<unsigned char[]> msg(new unsigned char[msg_str.size()]);
MS_ERROR_IF_NULL_W_RET_VAL(msg, false);
size_t dest_size = msg_str.size();
size_t src_size = msg_str.size();
if (memcpy_s(msg.get(), dest_size, msg_str.c_str(), src_size) != EOK) {
MS_LOG(EXCEPTION) << "Memcpy_s error";
}
if (output != nullptr) {
if (!abstrace_node_->Send(NodeRole::SERVER, rank_id, msg, msg_str.size(), static_cast<int>(command), output)) {
MS_LOG(ERROR) << "Sending protobuffer message to server " << rank_id << " failed.";
return false;
}
} else {
if (!abstrace_node_->Send(NodeRole::SERVER, rank_id, msg, msg_str.size(), static_cast<int>(command))) {
MS_LOG(ERROR) << "Sending protobuffer message to server " << rank_id << " failed.";
return false;
}
}
return true;
}
private:
std::shared_ptr<TaskExecutor> task_executor_;
TcpMsgCallback tcp_msg_callback_;
OnNodeEventCallback event_callback_;
uint32_t server_num_;
uint32_t worker_num_;
std::string scheduler_ip_;
uint16_t scheduler_port_;
AbstractNode *abstrace_node_;
};
} // namespace core
} // namespace ps
} // namespace mindspore
#endif // MINDSPORE_CCSRC_PS_CORE_COMMUNICATOR_TCP_COMMUNICATOR_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/runtime/framework/actor/control_flow/switch_actor.h
|
<reponame>mindspore-ai/mindspore
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_SWITCH_ACTOR_H_
#define MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_SWITCH_ACTOR_H_
#include <vector>
#include <string>
#include <unordered_map>
#include <memory>
#include <utility>
#include "runtime/framework/actor/actor_common.h"
#include "runtime/framework/actor/abstract_actor.h"
namespace mindspore {
namespace runtime {
using mindspore::device::DeviceContext;
using mindspore::session::KernelWithIndex;
constexpr size_t kSwitchCondPos = 1;
constexpr size_t kMaxSwitchCondSize = 8;
// Switch actor is used to execute the branch according to the input condition.
// Switch and SwitchLayer node will be converted to switch actor.
class SwitchActor : public AbstractActor {
public:
SwitchActor(const std::string &name, const std::vector<KernelWithIndex> ¶meters)
: AbstractActor(name, KernelTransformType::kSwitchActor, nullptr), formal_parameters_(parameters) {
input_result_num_ = formal_parameters_.size();
}
~SwitchActor() override = default;
void Init() override;
// The switch actor collects single node when receive the result of kernel actor.
void CollectRealParameter(const AnfNodePtr &node, size_t index, size_t position,
OpContext<DeviceTensor> *const context);
// The switch actor collects all real parameters when receive the output of gather actor.
void CollectRealParameters(const std::vector<KernelWithIndex> &real_parameters, size_t position,
OpContext<DeviceTensor> *const context);
private:
friend class GraphScheduler;
size_t GetIndex(const OpContext<DeviceTensor> *const context);
// Formal parameters of actor, which is the front node.
std::vector<KernelWithIndex> formal_parameters_;
// Input data.
std::unordered_map<int, std::unordered_map<size_t, std::vector<KernelWithIndex>>> input_nodes_;
// The store node records the value node input of the switch actor.
std::vector<std::pair<size_t, AnfNodePtr>> store_nodes_;
// Output arrow.
std::vector<std::vector<DataArrowPtr>> output_branch_data_arrows_;
std::vector<std::vector<DataArrowPtr>> output_branch_result_arrows_;
std::vector<AID> output_branch_real_parameter_arrows_;
// The output_data_ corresponds to the output_data_arrows_ one by one.
std::vector<std::vector<OpDataUniquePtr<DeviceTensor>>> output_data_;
size_t input_result_num_;
};
using SwitchActorPtr = std::shared_ptr<SwitchActor>;
} // namespace runtime
} // namespace mindspore
#endif // MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_SWITCH_ACTOR_H_
|
mindspore-ai/mindspore
|
mindspore/ccsrc/runtime/framework/actor/control_flow/gather_actor.h
|
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_GATHER_ACTOR_H_
#define MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_GATHER_ACTOR_H_
#include <vector>
#include <string>
#include <memory>
#include <unordered_map>
#include <stack>
#include <utility>
#include <algorithm>
#include "runtime/framework/actor/actor_common.h"
#include "runtime/framework/actor/abstract_actor.h"
namespace mindspore {
namespace runtime {
// Gather actor will be used in the control flow. When the subgraph is called, the real parameters need to be put
// together and sent to the subgraph.
class GatherActor : public AbstractActor {
public:
GatherActor(const std::string &name, const std::vector<KernelWithIndex> ¶meters)
: AbstractActor(name, KernelTransformType::kGatherActor, nullptr), formal_parameters_(parameters) {}
~GatherActor() override = default;
// The gather actor collects single node when receive the result of kernel actor.
void CollectRealParameter(const AnfNodePtr &node, size_t index, size_t position,
OpContext<DeviceTensor> *const context);
// The gather actor collects all real parameters when receive the output of switch actor.
void CollectRealParameters(const std::vector<KernelWithIndex> &real_parameters, size_t position,
OpContext<DeviceTensor> *const context);
private:
friend class GraphScheduler;
// Formal parameters of actor, which is the front node.
std::vector<KernelWithIndex> formal_parameters_;
// Input data.
std::unordered_map<uuids::uuid *, std::unordered_map<size_t, std::vector<KernelWithIndex>>> input_nodes_;
// The store node records the value node input of the gather actor.
std::vector<std::pair<size_t, KernelWithIndex>> store_nodes_;
// Output arrow.
std::unordered_map<AnfNodePtr, std::pair<AID, size_t>> output_branch_arrows_;
};
using GatherActorPtr = std::shared_ptr<GatherActor>;
} // namespace runtime
} // namespace mindspore
#endif // MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_CONTROLFLOW_GATHER_ACTOR_H_
|
mindspore-ai/mindspore
|
mindspore/core/utils/info.h
|
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_UTILS_INFO_H_
#define MINDSPORE_CORE_UTILS_INFO_H_
#include <iostream>
#include <string>
#include <memory>
#include <stack>
#include <utility>
#include <vector>
#include "base/base.h"
#include "utils/trace_info.h"
namespace mindspore {
// namespace to support intermediate representation definition
enum SourceLineTip { kSourceLineTipDiscard = 0, kSourceLineTipNextLine = 1, kSourceLineTipInLine = 2 };
// Location class record the location in source code.
class Location {
public:
Location(const std::string &file_name, int line, int column, int line_end, int column_end)
: file_name_(file_name), line_(line), column_(column), line_end_(line_end), column_end_(column_end) {}
Location(const Location &loc)
: file_name_(loc.file_name_),
line_(loc.line_),
column_(loc.column_),
line_end_(loc.line_end_),
column_end_(loc.column_end_) {}
std::string ToString(SourceLineTip tip = kSourceLineTipNextLine) const;
std::string file_name() const { return file_name_; }
int line() const { return line_; }
void set_line(int line) { line_ = line; }
int line_end() const { return line_end_; }
void set_line_end(int line) { line_end_ = line; }
int column() const { return column_; }
void set_column(int column) { column_ = column; }
int column_end() const { return column_end_; }
void set_column_end(int column) { column_end_ = column; }
~Location() = default;
private:
std::string file_name_;
int line_;
int column_;
int line_end_;
int column_end_;
};
class TraceContext;
using TraceContextPtr = std::shared_ptr<TraceContext>;
/// \brief TraceManager defines interface for debug trace management.
class MS_CORE_API TraceManager {
public:
/// \brief Constructor of TraceManager.
TraceManager() = default;
/// \brief Destructor of TraceManager.
~TraceManager() = default;
/// \brief Get current trace context.
///
/// \return The current trace context.
static TraceContextPtr CurrentContextInfo();
/// \brief Debug trace with the given function name and location.
///
/// \param[in] func_name The function name for debug trace.
/// \param[in] location The source code location for debug trace.
static void DebugTrace(const std::string &func_name, const LocationPtr &location);
/// \brief Debug trace with the given location.
///
/// \param[in] location The source code location for debug trace.
static void DebugTrace(const LocationPtr &location);
/// \brief Debug trace with the given trace info.
///
/// \param[in] trace_info The trace info for debug.
static void DebugTrace(const TraceInfoPtr &trace_info);
/// \brief Debug trace with a cloned trace info and debug info.
///
/// \param[in] debug_info The debug info for debug trace.
/// \param[in] trace_info The trace info for debug trace.
static void DebugTrace(const DebugInfoPtr &debug_info, const TraceInfoPtr &trace_info);
/// \brief End current debug trace.
static void EndTrace();
/// \brief Clear debug info for parse or resolve.
static void ClearParseOrResolveDebugInfo();
/// \brief Get debug info for parse or resolve.
///
/// \return The debug info for parse or resolve.
static DebugInfoPtr GetParseOrResolveDebugInfo();
/// \brief Trace context stack for current thread.
thread_local static std::stack<TraceContextPtr> trace_context_stack_;
/// \brief Debug info for parse or resolve for current thread.
thread_local static DebugInfoPtr parse_or_resolve_debug_info_;
};
class TraceGuard {
public:
TraceGuard(const std::string func_name, const LocationPtr &location) {
TraceManager::DebugTrace(func_name, location);
}
explicit TraceGuard(const LocationPtr &location) { TraceManager::DebugTrace(location); }
explicit TraceGuard(const TraceInfoPtr &trace_info) { TraceManager::DebugTrace(trace_info); }
TraceGuard(const DebugInfoPtr &debug_info, const TraceInfoPtr &trace_info) {
TraceManager::DebugTrace(debug_info, trace_info);
}
~TraceGuard() { TraceManager::EndTrace(); }
};
class TraceContext {
public:
~TraceContext() = default;
explicit TraceContext(const LocationPtr &loc) {
ProcessAttributeFromContext();
location_ = loc;
}
explicit TraceContext(const std::string &func_name) {
ProcessAttributeFromContext();
func_name_ = func_name;
}
explicit TraceContext(const TraceInfoPtr &trace_info) {
ProcessAttributeFromContext();
trace_info_ = trace_info;
}
void set_location(const LocationPtr &loc) { location_ = loc; }
LocationPtr location() { return location_; }
void set_trace_info(const TraceInfoPtr &trace_info) { trace_info_ = trace_info; }
TraceInfoPtr trace_info() const { return trace_info_; }
void set_func_name(const std::string &func_name) { func_name_ = func_name; }
std::string func_name() { return func_name_; }
protected:
void ProcessAttributeFromContext();
private:
LocationPtr location_;
TraceInfoPtr trace_info_;
std::string func_name_;
};
/// \brief DebugInfo defines information for debug trace.
class MS_CORE_API DebugInfo : public Base {
public:
/// \brief Construct a default DebugInfo.
DebugInfo();
/// \brief Construct DebugInfo with the given name.
///
/// \param[in] name The DebugInfo name.
explicit DebugInfo(const std::string &name);
/// \brief Construct DebugInfo with the given location.
///
/// \param[in] loc The location for DebugInfo.
explicit DebugInfo(const LocationPtr &loc);
/// \brief Destructor of DebugInfo.
~DebugInfo() override = default;
MS_DECLARE_PARENT(DebugInfo, Base);
/// \brief Get the debug id.
///
/// \return The debug id.
int64_t debug_id();
/// \brief Get the unique id.
///
/// \return The unique id.
int64_t unique_id() const { return unique_id_; }
/// \brief Get the unique id through copy.
///
/// \return The unique id through copy.
int64_t unique_id_through_copy() const;
/// \brief Get the id as a string.
///
/// \return The string id.
std::string get_id() { return std::to_string(debug_id()); }
/// \brief Set the trace info.
///
/// \param[in] trace_info The trace info to be set.
void set_trace_info(const TraceInfoPtr &trace_info) { trace_info_ = trace_info; }
/// \brief Get the trace info.
///
/// \return The trace info.
TraceInfoPtr trace_info() const { return trace_info_; }
/// \brief Set the location.
///
/// \param[in] loc The location to be set.
void set_location(const LocationPtr &loc) { location_ = loc; }
/// \brief Get the location.
///
/// \return The location.
virtual LocationPtr location() { return location_; }
/// \brief Get the name.
///
/// \return The name of the DebugInfo.
std::string name() { return name_; }
/// \brief Set the name.
///
/// \param[in] name The name to be set.
void set_name(const std::string &name) { name_ = name; }
/// \brief Get the debug name.
///
/// \return The debug name of the DebugInfo.
virtual std::string debug_name();
/// \brief Get the python function name that this DebugInfo belongs to.
///
/// \return The python function name that this DebugInfo belongs to.
virtual std::string get_python_func_belonged() { return ""; }
protected:
template <typename Derived>
std::shared_ptr<Derived> shared_from_base() {
return std::static_pointer_cast<Derived>(shared_from_this());
}
private:
void InitValueFromContext() {
if (TraceManager::CurrentContextInfo() != nullptr) {
auto context_info = TraceManager::CurrentContextInfo();
trace_info_ = context_info->trace_info();
location_ = context_info->location();
}
}
static int64_t gen_unique_id() {
static int64_t cur_unique_id = 0;
return cur_unique_id++;
}
protected:
int64_t unique_id_;
int64_t debug_id_;
TraceInfoPtr trace_info_;
LocationPtr location_;
std::string name_;
};
/// \brief NodeDebugInfo defines debug information for a node.
class MS_CORE_API NodeDebugInfo : public DebugInfo {
public:
/// \brief Construct a default NodeDebugInfo.
NodeDebugInfo() {
if (TraceManager::CurrentContextInfo() != nullptr) {
auto context_info = TraceManager::CurrentContextInfo();
py_func_belonged_ = context_info->func_name();
}
}
/// \brief Construct NodeDebugInfo with a given name.
///
/// \param[in] name the name of the NodeDebugInfo.
explicit NodeDebugInfo(const std::string &name) : DebugInfo(name) {
if (TraceManager::CurrentContextInfo() != nullptr) {
auto context_info = TraceManager::CurrentContextInfo();
py_func_belonged_ = context_info->func_name();
}
}
/// \brief Destructor of the NodeDebugInfo.
~NodeDebugInfo() override = default;
std::string debug_name() override;
/// \brief Set the node.
///
/// \param[in] node The node to be set.
void set_node(const std::shared_ptr<AnfNode> &node) { node_ = AnfNodeWeakPtr(node); }
/// \brief Get the node.
///
/// \return The node.
std::shared_ptr<AnfNode> get_node() const { return node_.lock(); }
/// \brief Set python function name that this NodeDebugInfo belongs to.
///
/// \param[in] name The python function name to be set.
void set_py_func_belonged(const std::string &name) { py_func_belonged_ = name; }
std::string get_python_func_belonged() override { return py_func_belonged_; }
private:
AnfNodeWeakPtr node_;
std::string py_func_belonged_;
};
using NodeDebugInfoPtr = std::shared_ptr<NodeDebugInfo>;
class GraphDebugInfo : public DebugInfo {
public:
GraphDebugInfo() {
if (TraceManager::CurrentContextInfo() != nullptr) {
auto context_info = TraceManager::CurrentContextInfo();
py_func_name_ = context_info->func_name();
deco_loc_ = nullptr;
}
}
explicit GraphDebugInfo(const std::string &name) : DebugInfo(name) {
if (TraceManager::CurrentContextInfo() != nullptr) {
auto context_info = TraceManager::CurrentContextInfo();
py_func_name_ = context_info->func_name();
deco_loc_ = nullptr;
}
}
~GraphDebugInfo() override = default;
std::string debug_name() override;
LocationPtr location() override;
LocationPtr deco_location() { return deco_loc_; }
void set_graph(const FuncGraphPtr &func_graph) { func_graph_ = FuncGraphWeakPtr(func_graph); }
FuncGraphPtr get_graph() const { return func_graph_.lock(); }
void set_full_name(const std::string &name) { full_name_ = name; }
std::string get_full_name() { return full_name_; }
void set_deco_location(const LocationPtr &deco_list_loc);
std::string get_python_func_belonged() override { return py_func_name_; }
private:
FuncGraphWeakPtr func_graph_;
LocationPtr deco_loc_;
std::string py_func_name_;
std::string full_name_;
};
using GraphDebugInfoPtr = std::shared_ptr<GraphDebugInfo>;
} // namespace mindspore
#endif // MINDSPORE_CORE_UTILS_INFO_H_
|
mindspore-ai/mindspore
|
mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.h
|
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_MODEL_PARSER_H
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_MODEL_PARSER_H
#include <string>
#include <unordered_map>
#include <memory>
#include <vector>
#include <set>
#include <map>
#include <utility>
#include "include/registry/model_parser.h"
#include "include/registry/model_parser_registry.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
#include "tools/common/tensor_util.h"
namespace mindspore {
namespace lite {
class TfliteModelParser : public converter::ModelParser {
public:
TfliteModelParser() = default;
~TfliteModelParser() override {
if (tflite_model_buf_ != nullptr) {
delete[] tflite_model_buf_;
tflite_model_buf_ = nullptr;
}
}
api::FuncGraphPtr Parse(const converter::ConverterParameters &flag) override;
static int Tflite2AnfAdjust(const std::set<FuncGraphPtr> &all_func_graphs);
private:
std::unique_ptr<tflite::ModelT> ReadTfliteModel(const std::string &model_path);
STATUS ConvertConstTensor(const std::unique_ptr<tflite::TensorT> &tensor, const ParameterPtr ¶meter,
const std::string &tensor_name, bool is_uint8_weight_quant);
STATUS ConvertOps(const std::unique_ptr<tflite::SubGraphT> &tflite_subgraph, const FuncGraphPtr &func_graph,
std::unordered_map<int, AnfNodePtr> *anf_node_map);
STATUS ConvertGraphInputs(const std::unique_ptr<tflite::SubGraphT> &tflite_subgraph, const FuncGraphPtr &func_graph,
std::unordered_map<int, AnfNodePtr> *anf_node_map);
STATUS ConvertGraphOutputs(const std::unique_ptr<tflite::SubGraphT> &tflite_subgraph, const FuncGraphPtr &func_graph,
std::unordered_map<int, AnfNodePtr> *anf_node_map);
STATUS ConvertTfliteGraph();
STATUS ProcessControlFlowOp(const std::unique_ptr<tflite::OperatorT> &op, const CNodePtr &anf_node,
const std::string &op_type);
STATUS BuildSubFuncGraphMap(size_t subgraph_idx, const FuncGraphPtr &sub_func_graph,
const std::string &subgraph_name);
STATUS ControlFlowNodePostProcess();
static STATUS ConvertOutputTensor(const std::unique_ptr<tflite::SubGraphT> &tflite_subgraph,
const FuncGraphPtr &func_graph, const std::unique_ptr<tflite::OperatorT> &op,
const CNodePtr &dst_cnode, std::unordered_map<int, AnfNodePtr> *anf_node_map);
static STATUS ConvertOpQuantParams(const std::unique_ptr<tflite::OperatorT> &op,
const std::unique_ptr<tflite::SubGraphT> &tflite_subgraph,
ops::PrimitiveC *primitive_c);
static STATUS SetTensorQuantParam(const std::unique_ptr<tflite::TensorT> &tflite_tensor,
std::vector<QuantParamT> *quant_params, int round_type = 1);
STATUS TfliteModelVerify();
private:
std::unique_ptr<tflite::ModelT> tflite_model_;
std::map<int, CNodePtr> control_flow_nodes_;
std::map<CNodePtr, std::pair<FuncGraphPtr, FuncGraphPtr>> control_flow_map_;
char *tflite_model_buf_ = nullptr;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_MODEL_PARSER_H
|
mindspore-ai/mindspore
|
mindspore/core/ir/manager.h
|
<reponame>mindspore-ai/mindspore
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
* Copyright 2019-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_IR_MANAGER_H_
#define MINDSPORE_CORE_IR_MANAGER_H_
#include <unordered_set>
#include <unordered_map>
#include <set>
#include <map>
#include <list>
#include <string>
#include <vector>
#include <utility>
#include <memory>
#include <functional>
#include "utils/any.h"
#include "utils/misc.h"
#include "utils/signal.h"
#include "utils/ordered_set.h"
#include "utils/ordered_map.h"
#include "ir/anf.h"
#include "ir/graph_utils.h"
#include "utils/hashing.h"
#include "base/base_ref.h"
#include "api/ir/func_graph_manager.h"
namespace mindspore {
namespace change {
struct ChangeCounter;
struct Change {
virtual ~Change() = default;
virtual void Apply(ChangeCounter *counter) = 0;
};
using ChangePtr = std::unique_ptr<Change>;
} // namespace change
class FuncGraphTransaction;
class FuncGraphManager;
using FuncGraphManagerPtr = std::shared_ptr<FuncGraphManager>;
using AnfNodeIndexSet = api::AnfNodeIndexSet;
// NodeUsersMap, for node B input i use node A, it will be one item in map with key: A, and value: (B, i)
using NodeUsersMap = api::NodeUsersMap;
using FuncGraphSetPair = std::pair<FuncGraphPtr, FuncGraphSet>;
using FuncGraphSetPtr = std::shared_ptr<FuncGraphSet>;
// manage the func graphs.
// if no manager exist, just create one and associate it to all func graphs; else reuse simply.
// func_graph, be managed graph
// manage: if true, created manager will be set in func_graph
// FuncGraphManagerPtr: return created manager
FuncGraphManagerPtr Manage(FuncGraphPtr func_graph, bool manage = true);
FuncGraphManagerPtr Manage(const std::vector<FuncGraphPtr> &func_graphs, bool manage = true);
FuncGraphManagerPtr MakeManager(const std::vector<FuncGraphPtr> &func_graphs = {}, bool manage = true);
struct Signals {
Signal<void()> InvalidateComputer;
};
using CNodeIndexPair = std::pair<AnfNodePtr, int>;
using CNodeIndexPairPtr = std::shared_ptr<CNodeIndexPair>;
using FuncGraphToFuncGraphSetMap = OrderedMap<FuncGraphPtr, FuncGraphSet>;
// analysis base class, graphs analysis which need dynamic compute by DepCollector in each read
class DepComputer {
public:
explicit DepComputer(const FuncGraphManager *manager);
virtual ~DepComputer() { manager_ = nullptr; }
virtual size_t size() const { return 0; }
void Reset() {
ExtraReset();
validate_ = false;
func_graphs_validate_.clear();
}
void OnInvalidateComputer() { Reset(); }
void Recompute();
void Recompute(const FuncGraphPtr &fg);
bool IsValidate() const { return validate_; }
bool IsValidate(const FuncGraphPtr &fg) { return func_graphs_validate_[fg]; }
protected:
// subclass can reset their own member;
virtual void ExtraReset() {}
// subclass do the real compute
virtual void RealRecompute() {}
virtual void RealRecompute(FuncGraphPtr) {}
const FuncGraphManager *manager_;
bool validate_;
OrderedMap<FuncGraphPtr, bool> func_graphs_validate_;
private:
friend FuncGraphManager;
};
// graph g's all direct or proxy parents
class FuncGraphParentsTotalComputer final : public DepComputer {
public:
explicit FuncGraphParentsTotalComputer(const FuncGraphManager *m) : DepComputer(m) {}
~FuncGraphParentsTotalComputer() override = default;
FuncGraphToFuncGraphSetMap &func_graph_parents_total_analysis() { return func_graph_parents_total_analysis_; }
size_t size() const override { return func_graph_parents_total_analysis_.size(); }
FuncGraphToFuncGraphSetMap func_graph_parents_total_analysis_;
protected:
void ExtraReset() override { func_graph_parents_total_analysis_.clear(); }
void RealRecompute(FuncGraphPtr fg) override;
private:
FuncGraphSetPtr SeekParents(const FuncGraphPtr &fg, std::unordered_map<FuncGraphPtr, FuncGraphSetPtr> *seen_fgs);
};
using FuncGraphToFuncGraphMap = OrderedMap<FuncGraphPtr, FuncGraphPtr>;
// graph's nearest parent in parents total
class ParentComputer final : public DepComputer {
public:
explicit ParentComputer(const FuncGraphManager *m) : DepComputer(m) {}
~ParentComputer() override = default;
FuncGraphToFuncGraphMap &parent_analysis() { return parent_analysis_; }
size_t size() const override { return parent_analysis_.size(); }
FuncGraphToFuncGraphMap parent_analysis_;
protected:
void ExtraReset() override { parent_analysis_.clear(); }
void RealRecompute(FuncGraphPtr fg) override;
};
// graph's children graph except self
class ChildrenComputer final : public DepComputer {
public:
explicit ChildrenComputer(const FuncGraphManager *m) : DepComputer(m) {}
~ChildrenComputer() override = default;
FuncGraphToFuncGraphSetMap &children_analysis() { return children_analysis_; }
size_t size() const override { return children_analysis_.size(); }
FuncGraphToFuncGraphSetMap children_analysis_;
protected:
void ExtraReset() override { children_analysis_.clear(); }
void RealRecompute(FuncGraphPtr fg) override;
};
// graph's children graph include self
class ScopeComputer final : public DepComputer {
public:
explicit ScopeComputer(const FuncGraphManager *m) : DepComputer(m) {}
~ScopeComputer() override = default;
FuncGraphToFuncGraphSetMap &scope_analysis() { return scope_analysis_; }
size_t size() const override { return scope_analysis_.size(); }
FuncGraphToFuncGraphSetMap scope_analysis_;
protected:
void ExtraReset() override { scope_analysis_.clear(); }
void RealRecompute(FuncGraphPtr fg) override;
};
using FVTotalMap = OrderedMap<FuncGraphPtr, OrderedMap<BaseRef, int, BaseRefHash>>;
class FVTotalComputer final : public DepComputer {
public:
explicit FVTotalComputer(const FuncGraphManager *m) : DepComputer(m) {}
~FVTotalComputer() override = default;
FVTotalMap &fv_total_analysis() { return fv_total_analysis_; }
size_t size() const override { return fv_total_analysis_.size(); }
FVTotalMap fv_total_analysis_;
protected:
void ExtraReset() override { fv_total_analysis_.clear(); }
void RealRecompute() override;
};
class FuncGraphsUsedTotalComputer final : public DepComputer {
public:
explicit FuncGraphsUsedTotalComputer(const FuncGraphManager *m) : DepComputer(m) {}
~FuncGraphsUsedTotalComputer() override = default;
FuncGraphToFuncGraphSetMap &func_graph_used_total_analysis() { return func_graph_used_total_analysis_; }
size_t size() const override { return func_graph_used_total_analysis_.size(); }
FuncGraphToFuncGraphSetMap func_graph_used_total_analysis_;
protected:
void ExtraReset() override { func_graph_used_total_analysis_.clear(); }
void RealRecompute(FuncGraphPtr fg) override;
};
using FuncGraphToBoolMap = OrderedMap<FuncGraphPtr, bool>;
using RecursiveMap = OrderedMap<FuncGraphPtr, std::shared_ptr<std::list<FuncGraphPtr>>>;
class RecursiveComputer final : public DepComputer {
public:
explicit RecursiveComputer(const FuncGraphManager *m) : DepComputer(m) {}
~RecursiveComputer() override = default;
RecursiveMap &recursive_map() { return recursive_map_; }
FuncGraphToBoolMap &recursive_analysis() { return recursive_analysis_; }
void CheckRecursiveGraphs(const FuncGraphPtr &fg, std::list<FuncGraphPtr> *trace);
size_t size() const override { return recursive_analysis_.size(); }
RecursiveMap recursive_map_;
FuncGraphToBoolMap recursive_analysis_;
protected:
void ExtraReset() override {
recursive_analysis_.clear();
recursive_map_.clear();
}
void RealRecompute(FuncGraphPtr fg) override;
};
class FuncGraphJTotalComputer final : public DepComputer {
public:
explicit FuncGraphJTotalComputer(const FuncGraphManager *m) : DepComputer(m) {}
~FuncGraphJTotalComputer() override = default;
FuncGraphToBoolMap &j_total_analysis() { return j_total_analysis_; }
size_t size() const override { return j_total_analysis_.size(); }
FuncGraphToBoolMap j_total_analysis_;
protected:
void ExtraReset() override { j_total_analysis_.clear(); }
void RealRecompute(FuncGraphPtr fg) override;
bool SeekJ(const FuncGraphPtr &fg, size_t seen_num);
};
class FuncGraphManager : public std::enable_shared_from_this<FuncGraphManager>, public api::FuncGraphManager {
public:
explicit FuncGraphManager(const std::vector<FuncGraphPtr> &roots, bool manage = true);
~FuncGraphManager() {
if (is_manage_) {
RemoveRoots();
}
Clear();
}
void Reset();
void Init();
void Clear();
void AddFuncGraph(const FuncGraphPtr &func_graph, bool is_root = false);
void KeepRoots(const std::vector<FuncGraphPtr> &roots = {});
void RemoveRoots();
void SetParameters(const FuncGraphPtr &fg, const std::vector<AnfNodePtr> ¶meters);
void AddParameter(const FuncGraphPtr &fg, const AnfNodePtr ¶meter);
void InsertFrontParameter(const FuncGraphPtr &fg, const AnfNodePtr ¶meter);
void MaybeDropFuncGraphs(const FuncGraphSet &func_graphs, bool ignore_users = false);
bool Replace(const AnfNodePtr &old_node, const AnfNodePtr &new_node) final;
void SetEdge(const AnfNodePtr &node, int index, const AnfNodePtr &value) final;
void AddEdge(const AnfNodePtr &node, const AnfNodePtr &value) final;
void MoveAllCNodeDropGraph(const FuncGraphPtr &source, const FuncGraphPtr &target, const ScopePtr &scope);
FuncGraphTransaction Transact();
void CommitChanges(std::vector<change::ChangePtr> &&changes);
bool IsManaged() const { return is_manage_; }
const FuncGraphSet &roots() const { return roots_; }
const FuncGraphSet &func_graphs() const { return func_graphs_; }
AnfNodeSet &all_nodes() { return all_nodes_; }
NodeUsersMap &node_users() { return node_users_; }
const NodeUsersMap &node_users() const final { return node_users_; }
FVTotalMap &free_variables_total() const;
FuncGraphSet &func_graph_parents_total(const FuncGraphPtr &fg) const;
FuncGraphSet &scopes(const FuncGraphPtr &fg) const;
FuncGraphPtr parent(const FuncGraphPtr &fg) const;
FuncGraphSet &children(const FuncGraphPtr &fg) const;
FuncGraphSet &func_graphs_used_total(const FuncGraphPtr &fg) const;
bool recursive(const FuncGraphPtr &fg) const;
std::shared_ptr<std::list<FuncGraphPtr>> recursive_graphs(const FuncGraphPtr &fg) const;
bool func_graph_j_total(const FuncGraphPtr &fg) const;
std::shared_ptr<Signals> signals() const { return signals_; }
// Static Analysis
NodeUsersMap node_users_;
AnfNodeSet all_nodes_; // managed nodes
// Dynamic Analysis
std::shared_ptr<ParentComputer> func_graph_parent_;
private:
// Erase OneGraph From Manager
void EraseOneGraph(const FuncGraphPtr &fg);
void AddIntoManaged(const FuncGraphPtr &fg);
void ProcessEdgeAdd(const AnfNodePtr &node, int index, const AnfNodePtr &input);
void ProcessEdgeRemove(const AnfNodePtr &node, int index, const AnfNodePtr &input);
void ProcessInputsEdgeAdd(const CNodePtr &cnode);
void ProcessInputsEdgeRemove(const CNodePtr &cnode);
void AcquireNodes(std::vector<AnfNodePtr> &&nodes);
FuncGraphSet MaybeDropNodes(std::vector<AnfNodePtr> &&nodes);
void OnEdgeAdded(const AnfNodePtr &node, int index, const AnfNodePtr &input);
void OnEdgeRemoved(const AnfNodePtr &node, int index, const AnfNodePtr &input);
void MoveAllNodes(const FuncGraphPtr &source, const FuncGraphPtr &target);
FuncGraphSet roots_; // Managed roots.
FuncGraphSet func_graphs_; // Managed func graphs.
std::shared_ptr<Signals> signals_;
// Dynamic Analysis
std::shared_ptr<FuncGraphParentsTotalComputer> func_graph_parents_total_;
std::shared_ptr<ChildrenComputer> children_;
std::shared_ptr<ScopeComputer> scopes_;
std::shared_ptr<FVTotalComputer> free_variables_total_;
std::shared_ptr<FuncGraphsUsedTotalComputer> func_graphs_used_total_;
std::shared_ptr<RecursiveComputer> recursive_;
std::shared_ptr<FuncGraphJTotalComputer> j_total_;
bool is_manage_;
};
class FuncGraphTransaction {
public:
explicit FuncGraphTransaction(FuncGraphManager *manager) : manager_(manager) {}
FuncGraphTransaction() : manager_(nullptr) {}
~FuncGraphTransaction() = default;
FuncGraphTransaction(const FuncGraphTransaction &other) = delete;
FuncGraphTransaction &operator=(const FuncGraphTransaction &other) = delete;
FuncGraphTransaction(FuncGraphTransaction &&other) = default;
FuncGraphTransaction &operator=(FuncGraphTransaction &&other) = default;
// set parameters of a func graph
void SetParameters(FuncGraphPtr fg, const std::vector<AnfNodePtr> ¶ms);
void AddParameter(FuncGraphPtr fg, const AnfNodePtr ¶m);
void InsertFrontParameter(FuncGraphPtr fg, const AnfNodePtr ¶m);
// replace old_node with new_node
bool Replace(const AnfNodePtr &old_node, const AnfNodePtr &new_node);
// set edge, i.e., declare setting node.inputs[key] to value.
void SetEdge(const AnfNodePtr &src_node, int k, const AnfNodePtr &v);
// Add edge, i.e., append value to node.inputs.
void AddEdge(const AnfNodePtr &src_node, const AnfNodePtr &v);
// commit all changes
void Commit();
private:
FuncGraphManager *manager_;
std::vector<change::ChangePtr> changes_;
};
inline FuncGraphTransaction FuncGraphManager::Transact() { return FuncGraphTransaction(this); }
} // namespace mindspore
#endif // MINDSPORE_CORE_IR_MANAGER_H_
|
funny-falcon/bin_utils
|
ext/bin_utils/native.c
|
<filename>ext/bin_utils/native.c
#include <ruby.h>
#if HAVE_STDINT_H
#include "stdint.h"
#elif defined(_MSC_VER)
typedef __int8 int8_t;
typedef unsigned __int8 uint8_t;
typedef __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
#else
#ifndef __int8_t_defined
typedef char int8_t;
typedef short int16_t;
typedef int int32_t;
#endif
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
#if SIZEOF_LONG==8
typedef long int64_t;
typedef unsigned long uint64_t;
#else
typedef long long int64_t;
typedef unsigned long long uint64_t;
#endif
#endif
#ifdef __GNUC__
#define FORCE_INLINE __attribute__((always_inline))
#elif defined(_MSC_VER)
#define FORCE_INLINE __forceinline
#else
#define FORCE_INLINE
#endif
#if defined(_MSC_VER)
#define LL(x) (x)
#define LLU(x) (x)
#else
#define LL(x) (x##LL)
#define LLU(x) (x##LLU)
#endif
#if SIZEOF_LONG == 8
#define I642NUM(v) LONG2NUM(v)
#define U642NUM(v) ULONG2NUM(v)
#define NUM2I64(v) NUM2LONG(v)
#define NUM2U64(v) NUM2ULONG(v)
#else
#define I642NUM(v) LL2NUM(v)
#define U642NUM(v) ULL2NUM(v)
#define NUM2I64(v) NUM2LL(v)
#define NUM2U64(v) NUM2ULL(v)
#endif
#ifndef RARRAY_CONST_PTR
# define RARRAY_CONST_PTR(ar) RARRAY_PTR(ar)
#endif
ID rshft;
ID band;
#ifndef HAVE_RB_STR_DROP_BYTES
/* rubinius has no rb_str_drop_bytes */
ID aslice;
static VALUE
rb_str_drop_bytes(VALUE str, long bytes)
{
VALUE args[2] = {0, INT2FIX(bytes)};
rb_funcall2(str, aslice, 2, args);
return str;
}
#endif
static int64_t
safe_int64_t(VALUE i)
{
if (FIXNUM_P(i)) {
return NUM2I64(i);
}
else {
VALUE argm = UINT2NUM(0xffffffff);
VALUE arg32 = INT2FIX(32);
uint64_t i0 = NUM2I64(rb_funcall2(i, band, 1, &argm));
i = rb_funcall2(i, rshft, 1, &arg32);
return i0 + (NUM2I64(rb_funcall2(i, band, 1, &argm)) << 32);
}
}
static long
check_size(long i, long strlen, long ilen)
{
if (i < 0) { i += strlen; }
if (i > strlen - ilen || i < 0) {
rb_raise(rb_eArgError, "index %ld should be in range 0..%ld or in range -%ld..-%ld for string of size %ld", i, strlen-ilen, strlen, -ilen, strlen);
}
return i;
}
static VALUE
check_argc(int argc, VALUE *argv)
{
if (argc == 0 || argc > 2) {
rb_raise(rb_eArgError, "accepts 1 or 2 arguments: (string[, offset=0])");
}
return argc == 2 ? argv[1] : INT2FIX(0);
}
typedef struct append_args {
VALUE str;
int argc;
VALUE *argv;
} append_args;
typedef struct append_args2 {
VALUE str;
int argc;
VALUE *argv;
VALUE int0;
} append_args2;
static void
check_argc_append(int argc, VALUE *argv, append_args *args, int bits)
{
if (argc < 1) {
rb_raise(rb_eArgError, "accepts at least 1 argument: (string[, *int%ds])", bits);
}
args->str = RTEST(argv[0]) ? argv[0] : rb_str_new(0, 0);
if (argc == 2 && TYPE(argv[1]) == T_ARRAY) {
args->argc = RARRAY_LEN(argv[1]);
args->argv = RARRAY_CONST_PTR(argv[1]);
}
else {
args->argc = argc-1;
args->argv = argv+1;
}
}
static void
check_argc_append_2(int argc, VALUE *argv, append_args2 *args, int bits, int bits1)
{
if (argc < 2) {
rb_raise(rb_eArgError, "accepts at least 2 arguments: (string, int%d[, *int%ds])", bits, bits1);
}
args->str = RTEST(argv[0]) ? argv[0] : rb_str_new(0, 0);
args->int0 = argv[1];
if (argc == 3 && TYPE(argv[2]) == T_ARRAY) {
args->argc = RARRAY_LEN(argv[2]);
args->argv = RARRAY_CONST_PTR(argv[2]);
}
else {
args->argc = argc-2;
args->argv = argv+2;
}
}
static uint32_t
get_int8(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 1);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return
(((uint32_t)ptr[i + 0]) << 0 ) |
(uint32_t)0;
}
static int32_t
get_sint8(VALUE rstr, VALUE ri)
{
int32_t res = (int32_t)get_int8(rstr, ri);
return res - ((res >> 7) << 8);
}
static VALUE
rb_get_int8(int argc, VALUE *argv, VALUE self)
{
return INT2FIX(get_int8(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint8(int argc, VALUE *argv, VALUE self)
{
return INT2FIX(get_sint8(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int8(VALUE self, VALUE rstr)
{
uint32_t res = get_int8(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 1);
return INT2FIX(res);
}
static VALUE
rb_slice_sint8(VALUE self, VALUE rstr)
{
int32_t res = get_sint8(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 1);
return INT2FIX(res);
}
static void
append_int8(VALUE rstr, int32_t v)
{
char a[] = {
(v >> 0) & 0xff,
0
};
rb_str_cat(rstr, a, 1);
}
static VALUE
append_var_int8(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int8(str, NUM2INT(argv[i]));
}
return str;
}
static VALUE
rb_append_int8(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 8);
return append_var_int8(args.argc, args.argv, args.str);
}
#define append_var_int8_le append_var_int8
#define append_var_int8_be append_var_int8
static uint32_t
get_int16_le(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 2);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return
(((uint32_t)ptr[i + 0]) << 0 ) |
(((uint32_t)ptr[i + 1]) << 8 ) |
(uint32_t)0;
}
static int32_t
get_sint16_le(VALUE rstr, VALUE ri)
{
int32_t res = (int32_t)get_int16_le(rstr, ri);
return res - ((res >> 15) << 16);
}
static VALUE
rb_get_int16_le(int argc, VALUE *argv, VALUE self)
{
return INT2FIX(get_int16_le(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint16_le(int argc, VALUE *argv, VALUE self)
{
return INT2FIX(get_sint16_le(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int16_le(VALUE self, VALUE rstr)
{
uint32_t res = get_int16_le(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 2);
return INT2FIX(res);
}
static VALUE
rb_slice_sint16_le(VALUE self, VALUE rstr)
{
int32_t res = get_sint16_le(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 2);
return INT2FIX(res);
}
static void
append_int16_le(VALUE rstr, int32_t v)
{
char a[] = {
(v >> 0) & 0xff,
(v >> 8) & 0xff,
0
};
rb_str_cat(rstr, a, 2);
}
static VALUE
append_var_int16_le(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int16_le(str, NUM2INT(argv[i]));
}
return str;
}
static VALUE
rb_append_int16_le(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 16);
return append_var_int16_le(args.argc, args.argv, args.str);
}
static uint32_t
get_int24_le(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 3);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return
(((uint32_t)ptr[i + 0]) << 0 ) |
(((uint32_t)ptr[i + 1]) << 8 ) |
(((uint32_t)ptr[i + 2]) << 16 ) |
(uint32_t)0;
}
static int32_t
get_sint24_le(VALUE rstr, VALUE ri)
{
int32_t res = (int32_t)get_int24_le(rstr, ri);
return res - ((res >> 23) << 24);
}
static VALUE
rb_get_int24_le(int argc, VALUE *argv, VALUE self)
{
return INT2FIX(get_int24_le(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint24_le(int argc, VALUE *argv, VALUE self)
{
return INT2FIX(get_sint24_le(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int24_le(VALUE self, VALUE rstr)
{
uint32_t res = get_int24_le(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 3);
return INT2FIX(res);
}
static VALUE
rb_slice_sint24_le(VALUE self, VALUE rstr)
{
int32_t res = get_sint24_le(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 3);
return INT2FIX(res);
}
static void
append_int24_le(VALUE rstr, int32_t v)
{
char a[] = {
(v >> 0) & 0xff,
(v >> 8) & 0xff,
(v >> 16) & 0xff,
0
};
rb_str_cat(rstr, a, 3);
}
static VALUE
append_var_int24_le(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int24_le(str, NUM2INT(argv[i]));
}
return str;
}
static VALUE
rb_append_int24_le(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 24);
return append_var_int24_le(args.argc, args.argv, args.str);
}
static uint32_t
get_int32_le(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 4);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return
(((uint32_t)ptr[i + 0]) << 0 ) |
(((uint32_t)ptr[i + 1]) << 8 ) |
(((uint32_t)ptr[i + 2]) << 16 ) |
(((uint32_t)ptr[i + 3]) << 24 ) |
(uint32_t)0;
}
static int32_t
get_sint32_le(VALUE rstr, VALUE ri)
{
int32_t res = (int32_t)get_int32_le(rstr, ri);
return res;
}
static VALUE
rb_get_int32_le(int argc, VALUE *argv, VALUE self)
{
return UINT2NUM(get_int32_le(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint32_le(int argc, VALUE *argv, VALUE self)
{
return INT2NUM(get_sint32_le(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int32_le(VALUE self, VALUE rstr)
{
uint32_t res = get_int32_le(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 4);
return UINT2NUM(res);
}
static VALUE
rb_slice_sint32_le(VALUE self, VALUE rstr)
{
int32_t res = get_sint32_le(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 4);
return INT2NUM(res);
}
static void
append_int32_le(VALUE rstr, int32_t v)
{
char a[] = {
(v >> 0) & 0xff,
(v >> 8) & 0xff,
(v >> 16) & 0xff,
(v >> 24) & 0xff,
0
};
rb_str_cat(rstr, a, 4);
}
static VALUE
append_var_int32_le(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int32_le(str, (int32_t)NUM2I64(argv[i]));
}
return str;
}
static VALUE
rb_append_int32_le(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 32);
return append_var_int32_le(args.argc, args.argv, args.str);
}
static uint32_t
get_int16_be(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 2);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return
(((uint32_t)ptr[i + 1]) << 0 ) |
(((uint32_t)ptr[i + 0]) << 8 ) |
(uint32_t)0;
}
static int32_t
get_sint16_be(VALUE rstr, VALUE ri)
{
int32_t res = (int32_t)get_int16_be(rstr, ri);
return res - ((res >> 15) << 16);
}
static VALUE
rb_get_int16_be(int argc, VALUE *argv, VALUE self)
{
return INT2FIX(get_int16_be(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint16_be(int argc, VALUE *argv, VALUE self)
{
return INT2FIX(get_sint16_be(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int16_be(VALUE self, VALUE rstr)
{
uint32_t res = get_int16_be(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 2);
return INT2FIX(res);
}
static VALUE
rb_slice_sint16_be(VALUE self, VALUE rstr)
{
int32_t res = get_sint16_be(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 2);
return INT2FIX(res);
}
static void
append_int16_be(VALUE rstr, int32_t v)
{
char a[] = {
(v >> 8) & 0xff,
(v >> 0) & 0xff,
0
};
rb_str_cat(rstr, a, 2);
}
static VALUE
append_var_int16_be(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int16_be(str, NUM2INT(argv[i]));
}
return str;
}
static VALUE
rb_append_int16_be(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 16);
return append_var_int16_be(args.argc, args.argv, args.str);
}
static uint32_t
get_int24_be(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 3);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return
(((uint32_t)ptr[i + 2]) << 0 ) |
(((uint32_t)ptr[i + 1]) << 8 ) |
(((uint32_t)ptr[i + 0]) << 16 ) |
(uint32_t)0;
}
static int32_t
get_sint24_be(VALUE rstr, VALUE ri)
{
int32_t res = (int32_t)get_int24_be(rstr, ri);
return res - ((res >> 23) << 24);
}
static VALUE
rb_get_int24_be(int argc, VALUE *argv, VALUE self)
{
return INT2FIX(get_int24_be(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint24_be(int argc, VALUE *argv, VALUE self)
{
return INT2FIX(get_sint24_be(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int24_be(VALUE self, VALUE rstr)
{
uint32_t res = get_int24_be(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 3);
return INT2FIX(res);
}
static VALUE
rb_slice_sint24_be(VALUE self, VALUE rstr)
{
int32_t res = get_sint24_be(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 3);
return INT2FIX(res);
}
static void
append_int24_be(VALUE rstr, int32_t v)
{
char a[] = {
(v >> 16) & 0xff,
(v >> 8) & 0xff,
(v >> 0) & 0xff,
0
};
rb_str_cat(rstr, a, 3);
}
static VALUE
append_var_int24_be(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int24_be(str, NUM2INT(argv[i]));
}
return str;
}
static VALUE
rb_append_int24_be(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 24);
return append_var_int24_be(args.argc, args.argv, args.str);
}
static uint32_t
get_int32_be(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 4);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return
(((uint32_t)ptr[i + 3]) << 0 ) |
(((uint32_t)ptr[i + 2]) << 8 ) |
(((uint32_t)ptr[i + 1]) << 16 ) |
(((uint32_t)ptr[i + 0]) << 24 ) |
(uint32_t)0;
}
static int32_t
get_sint32_be(VALUE rstr, VALUE ri)
{
int32_t res = (int32_t)get_int32_be(rstr, ri);
return res;
}
static VALUE
rb_get_int32_be(int argc, VALUE *argv, VALUE self)
{
return UINT2NUM(get_int32_be(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint32_be(int argc, VALUE *argv, VALUE self)
{
return INT2NUM(get_sint32_be(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int32_be(VALUE self, VALUE rstr)
{
uint32_t res = get_int32_be(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 4);
return UINT2NUM(res);
}
static VALUE
rb_slice_sint32_be(VALUE self, VALUE rstr)
{
int32_t res = get_sint32_be(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 4);
return INT2NUM(res);
}
static void
append_int32_be(VALUE rstr, int32_t v)
{
char a[] = {
(v >> 24) & 0xff,
(v >> 16) & 0xff,
(v >> 8) & 0xff,
(v >> 0) & 0xff,
0
};
rb_str_cat(rstr, a, 4);
}
static VALUE
append_var_int32_be(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int32_be(str, (int32_t)NUM2I64(argv[i]));
}
return str;
}
static VALUE
rb_append_int32_be(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 32);
return append_var_int32_be(args.argc, args.argv, args.str);
}
static uint64_t
get_int40_le(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 5);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return (uint64_t)(
(((uint32_t)ptr[i + 0]) << 0) |
(((uint32_t)ptr[i + 1]) << 8) |
(((uint32_t)ptr[i + 2]) << 16) |
(((uint32_t)ptr[i + 3]) << 24) |
0) | ((uint64_t)(
(((uint32_t)ptr[i + 4]) << 0) |
0) << 32);
}
static int64_t
get_sint40_le(VALUE rstr, VALUE ri)
{
int64_t res = (int64_t)get_int40_le(rstr, ri);
return res - ((res >> 39) << 40);
}
static VALUE
rb_get_int40_le(int argc, VALUE *argv, VALUE self)
{
return I642NUM(get_int40_le(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint40_le(int argc, VALUE *argv, VALUE self)
{
return I642NUM(get_sint40_le(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int40_le(VALUE self, VALUE rstr)
{
uint64_t res = get_int40_le(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 5);
return I642NUM(res);
}
static VALUE
rb_slice_sint40_le(VALUE self, VALUE rstr)
{
uint64_t res = get_sint40_le(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 5);
return I642NUM(res);
}
static void
append_int40_le(VALUE rstr, int64_t v)
{
char a[] = {
(v >> 0) & 0xff,
(v >> 8) & 0xff,
(v >> 16) & 0xff,
(v >> 24) & 0xff,
(v >> 32) & 0xff,
0
};
rb_str_cat(rstr, a, 5);
}
static VALUE
append_var_int40_le(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int40_le(str, NUM2I64(argv[i]));
}
return str;
}
static VALUE
rb_append_int40_le(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 40);
return append_var_int40_le(args.argc, args.argv, args.str);
}
static uint64_t
get_int48_le(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 6);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return (uint64_t)(
(((uint32_t)ptr[i + 0]) << 0) |
(((uint32_t)ptr[i + 1]) << 8) |
(((uint32_t)ptr[i + 2]) << 16) |
(((uint32_t)ptr[i + 3]) << 24) |
0) | ((uint64_t)(
(((uint32_t)ptr[i + 4]) << 0) |
(((uint32_t)ptr[i + 5]) << 8) |
0) << 32);
}
static int64_t
get_sint48_le(VALUE rstr, VALUE ri)
{
int64_t res = (int64_t)get_int48_le(rstr, ri);
return res - ((res >> 47) << 48);
}
static VALUE
rb_get_int48_le(int argc, VALUE *argv, VALUE self)
{
return I642NUM(get_int48_le(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint48_le(int argc, VALUE *argv, VALUE self)
{
return I642NUM(get_sint48_le(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int48_le(VALUE self, VALUE rstr)
{
uint64_t res = get_int48_le(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 6);
return I642NUM(res);
}
static VALUE
rb_slice_sint48_le(VALUE self, VALUE rstr)
{
uint64_t res = get_sint48_le(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 6);
return I642NUM(res);
}
static void
append_int48_le(VALUE rstr, int64_t v)
{
char a[] = {
(v >> 0) & 0xff,
(v >> 8) & 0xff,
(v >> 16) & 0xff,
(v >> 24) & 0xff,
(v >> 32) & 0xff,
(v >> 40) & 0xff,
0
};
rb_str_cat(rstr, a, 6);
}
static VALUE
append_var_int48_le(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int48_le(str, NUM2I64(argv[i]));
}
return str;
}
static VALUE
rb_append_int48_le(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 48);
return append_var_int48_le(args.argc, args.argv, args.str);
}
static uint64_t
get_int56_le(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 7);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return (uint64_t)(
(((uint32_t)ptr[i + 0]) << 0) |
(((uint32_t)ptr[i + 1]) << 8) |
(((uint32_t)ptr[i + 2]) << 16) |
(((uint32_t)ptr[i + 3]) << 24) |
0) | ((uint64_t)(
(((uint32_t)ptr[i + 4]) << 0) |
(((uint32_t)ptr[i + 5]) << 8) |
(((uint32_t)ptr[i + 6]) << 16) |
0) << 32);
}
static int64_t
get_sint56_le(VALUE rstr, VALUE ri)
{
int64_t res = (int64_t)get_int56_le(rstr, ri);
return res - ((res >> 55) << 56);
}
static VALUE
rb_get_int56_le(int argc, VALUE *argv, VALUE self)
{
return I642NUM(get_int56_le(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint56_le(int argc, VALUE *argv, VALUE self)
{
return I642NUM(get_sint56_le(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int56_le(VALUE self, VALUE rstr)
{
uint64_t res = get_int56_le(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 7);
return I642NUM(res);
}
static VALUE
rb_slice_sint56_le(VALUE self, VALUE rstr)
{
uint64_t res = get_sint56_le(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 7);
return I642NUM(res);
}
static void
append_int56_le(VALUE rstr, int64_t v)
{
char a[] = {
(v >> 0) & 0xff,
(v >> 8) & 0xff,
(v >> 16) & 0xff,
(v >> 24) & 0xff,
(v >> 32) & 0xff,
(v >> 40) & 0xff,
(v >> 48) & 0xff,
0
};
rb_str_cat(rstr, a, 7);
}
static VALUE
append_var_int56_le(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int56_le(str, NUM2I64(argv[i]));
}
return str;
}
static VALUE
rb_append_int56_le(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 56);
return append_var_int56_le(args.argc, args.argv, args.str);
}
static uint64_t
get_int64_le(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 8);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return (uint64_t)(
(((uint32_t)ptr[i + 0]) << 0) |
(((uint32_t)ptr[i + 1]) << 8) |
(((uint32_t)ptr[i + 2]) << 16) |
(((uint32_t)ptr[i + 3]) << 24) |
0) | ((uint64_t)(
(((uint32_t)ptr[i + 4]) << 0) |
(((uint32_t)ptr[i + 5]) << 8) |
(((uint32_t)ptr[i + 6]) << 16) |
(((uint32_t)ptr[i + 7]) << 24) |
0) << 32);
}
static int64_t
get_sint64_le(VALUE rstr, VALUE ri)
{
int64_t res = (int64_t)get_int64_le(rstr, ri);
return res;
}
static VALUE
rb_get_int64_le(int argc, VALUE *argv, VALUE self)
{
return U642NUM(get_int64_le(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint64_le(int argc, VALUE *argv, VALUE self)
{
return I642NUM(get_sint64_le(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int64_le(VALUE self, VALUE rstr)
{
uint64_t res = get_int64_le(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 8);
return U642NUM(res);
}
static VALUE
rb_slice_sint64_le(VALUE self, VALUE rstr)
{
uint64_t res = get_sint64_le(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 8);
return I642NUM(res);
}
static void
append_int64_le(VALUE rstr, int64_t v)
{
char a[] = {
(v >> 0) & 0xff,
(v >> 8) & 0xff,
(v >> 16) & 0xff,
(v >> 24) & 0xff,
(v >> 32) & 0xff,
(v >> 40) & 0xff,
(v >> 48) & 0xff,
(v >> 56) & 0xff,
0
};
rb_str_cat(rstr, a, 8);
}
static VALUE
append_var_int64_le(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int64_le(str, safe_int64_t(argv[i]));
}
return str;
}
static VALUE
rb_append_int64_le(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 64);
return append_var_int64_le(args.argc, args.argv, args.str);
}
static uint64_t
get_int40_be(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 5);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return (uint64_t)(
(((uint32_t)ptr[i + 4]) << 0) |
(((uint32_t)ptr[i + 3]) << 8) |
(((uint32_t)ptr[i + 2]) << 16) |
(((uint32_t)ptr[i + 1]) << 24) |
0) | ((uint64_t)(
(((uint32_t)ptr[i + 0]) << 0) |
0) << 32);
}
static int64_t
get_sint40_be(VALUE rstr, VALUE ri)
{
int64_t res = (int64_t)get_int40_be(rstr, ri);
return res - ((res >> 39) << 40);
}
static VALUE
rb_get_int40_be(int argc, VALUE *argv, VALUE self)
{
return I642NUM(get_int40_be(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint40_be(int argc, VALUE *argv, VALUE self)
{
return I642NUM(get_sint40_be(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int40_be(VALUE self, VALUE rstr)
{
uint64_t res = get_int40_be(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 5);
return I642NUM(res);
}
static VALUE
rb_slice_sint40_be(VALUE self, VALUE rstr)
{
uint64_t res = get_sint40_be(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 5);
return I642NUM(res);
}
static void
append_int40_be(VALUE rstr, int64_t v)
{
char a[] = {
(v >> 32) & 0xff,
(v >> 24) & 0xff,
(v >> 16) & 0xff,
(v >> 8) & 0xff,
(v >> 0) & 0xff,
0
};
rb_str_cat(rstr, a, 5);
}
static VALUE
append_var_int40_be(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int40_be(str, NUM2I64(argv[i]));
}
return str;
}
static VALUE
rb_append_int40_be(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 40);
return append_var_int40_be(args.argc, args.argv, args.str);
}
static uint64_t
get_int48_be(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 6);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return (uint64_t)(
(((uint32_t)ptr[i + 5]) << 0) |
(((uint32_t)ptr[i + 4]) << 8) |
(((uint32_t)ptr[i + 3]) << 16) |
(((uint32_t)ptr[i + 2]) << 24) |
0) | ((uint64_t)(
(((uint32_t)ptr[i + 1]) << 0) |
(((uint32_t)ptr[i + 0]) << 8) |
0) << 32);
}
static int64_t
get_sint48_be(VALUE rstr, VALUE ri)
{
int64_t res = (int64_t)get_int48_be(rstr, ri);
return res - ((res >> 47) << 48);
}
static VALUE
rb_get_int48_be(int argc, VALUE *argv, VALUE self)
{
return I642NUM(get_int48_be(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint48_be(int argc, VALUE *argv, VALUE self)
{
return I642NUM(get_sint48_be(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int48_be(VALUE self, VALUE rstr)
{
uint64_t res = get_int48_be(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 6);
return I642NUM(res);
}
static VALUE
rb_slice_sint48_be(VALUE self, VALUE rstr)
{
uint64_t res = get_sint48_be(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 6);
return I642NUM(res);
}
static void
append_int48_be(VALUE rstr, int64_t v)
{
char a[] = {
(v >> 40) & 0xff,
(v >> 32) & 0xff,
(v >> 24) & 0xff,
(v >> 16) & 0xff,
(v >> 8) & 0xff,
(v >> 0) & 0xff,
0
};
rb_str_cat(rstr, a, 6);
}
static VALUE
append_var_int48_be(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int48_be(str, NUM2I64(argv[i]));
}
return str;
}
static VALUE
rb_append_int48_be(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 48);
return append_var_int48_be(args.argc, args.argv, args.str);
}
static uint64_t
get_int56_be(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 7);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return (uint64_t)(
(((uint32_t)ptr[i + 6]) << 0) |
(((uint32_t)ptr[i + 5]) << 8) |
(((uint32_t)ptr[i + 4]) << 16) |
(((uint32_t)ptr[i + 3]) << 24) |
0) | ((uint64_t)(
(((uint32_t)ptr[i + 2]) << 0) |
(((uint32_t)ptr[i + 1]) << 8) |
(((uint32_t)ptr[i + 0]) << 16) |
0) << 32);
}
static int64_t
get_sint56_be(VALUE rstr, VALUE ri)
{
int64_t res = (int64_t)get_int56_be(rstr, ri);
return res - ((res >> 55) << 56);
}
static VALUE
rb_get_int56_be(int argc, VALUE *argv, VALUE self)
{
return I642NUM(get_int56_be(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint56_be(int argc, VALUE *argv, VALUE self)
{
return I642NUM(get_sint56_be(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int56_be(VALUE self, VALUE rstr)
{
uint64_t res = get_int56_be(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 7);
return I642NUM(res);
}
static VALUE
rb_slice_sint56_be(VALUE self, VALUE rstr)
{
uint64_t res = get_sint56_be(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 7);
return I642NUM(res);
}
static void
append_int56_be(VALUE rstr, int64_t v)
{
char a[] = {
(v >> 48) & 0xff,
(v >> 40) & 0xff,
(v >> 32) & 0xff,
(v >> 24) & 0xff,
(v >> 16) & 0xff,
(v >> 8) & 0xff,
(v >> 0) & 0xff,
0
};
rb_str_cat(rstr, a, 7);
}
static VALUE
append_var_int56_be(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int56_be(str, NUM2I64(argv[i]));
}
return str;
}
static VALUE
rb_append_int56_be(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 56);
return append_var_int56_be(args.argc, args.argv, args.str);
}
static uint64_t
get_int64_be(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri);
const uint8_t *ptr;
StringValue(rstr);
i = check_size(i, RSTRING_LEN(rstr), 8);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return (uint64_t)(
(((uint32_t)ptr[i + 7]) << 0) |
(((uint32_t)ptr[i + 6]) << 8) |
(((uint32_t)ptr[i + 5]) << 16) |
(((uint32_t)ptr[i + 4]) << 24) |
0) | ((uint64_t)(
(((uint32_t)ptr[i + 3]) << 0) |
(((uint32_t)ptr[i + 2]) << 8) |
(((uint32_t)ptr[i + 1]) << 16) |
(((uint32_t)ptr[i + 0]) << 24) |
0) << 32);
}
static int64_t
get_sint64_be(VALUE rstr, VALUE ri)
{
int64_t res = (int64_t)get_int64_be(rstr, ri);
return res;
}
static VALUE
rb_get_int64_be(int argc, VALUE *argv, VALUE self)
{
return U642NUM(get_int64_be(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_get_sint64_be(int argc, VALUE *argv, VALUE self)
{
return I642NUM(get_sint64_be(argv[0], check_argc(argc, argv)));
}
static VALUE
rb_slice_int64_be(VALUE self, VALUE rstr)
{
uint64_t res = get_int64_be(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 8);
return U642NUM(res);
}
static VALUE
rb_slice_sint64_be(VALUE self, VALUE rstr)
{
uint64_t res = get_sint64_be(rstr, INT2FIX(0));
rb_str_drop_bytes(rstr, 8);
return I642NUM(res);
}
static void
append_int64_be(VALUE rstr, int64_t v)
{
char a[] = {
(v >> 56) & 0xff,
(v >> 48) & 0xff,
(v >> 40) & 0xff,
(v >> 32) & 0xff,
(v >> 24) & 0xff,
(v >> 16) & 0xff,
(v >> 8) & 0xff,
(v >> 0) & 0xff,
0
};
rb_str_cat(rstr, a, 8);
}
static VALUE
append_var_int64_be(int argc, VALUE* argv, VALUE str)
{
int i;
for(i = 0; i < argc; i++) {
append_int64_be(str, safe_int64_t(argv[i]));
}
return str;
}
static VALUE
rb_append_int64_be(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 64);
return append_var_int64_be(args.argc, args.argv, args.str);
}
/* BER */
static uint64_t
parse_ber(const uint8_t *ptr, long max, long *i)
{
uint64_t res = 0;
while (1) {
if (*ptr < 128) {
res += *ptr;
break;
}
if (res > LLU(0xFFFFFFFFFFFFFFFF) / 128) {
rb_raise(rb_eArgError, "BER integer is greater then 2**64, could not parse such big");
}
res = (res + ((*ptr) - 128)) * 128;
ptr++;
if (++(*i) >= max) {
rb_raise(rb_eArgError, "String unexpectedly finished while parsing BER integer");
}
}
return res;
}
static uint64_t
get_ber(VALUE rstr, VALUE ri)
{
long i = NUM2LONG(ri), len;
const uint8_t *ptr;
StringValue(rstr);
len = RSTRING_LEN(rstr);
i = check_size(i, len, 1);
ptr = (const uint8_t*)RSTRING_PTR(rstr) + i;
return parse_ber(ptr, len, &i);
}
static VALUE
rb_get_ber(int argc, VALUE *argv, VALUE self)
{
return U642NUM(get_ber(argv[0], check_argc(argc, argv)));
}
static uint64_t
slice_ber(VALUE rstr, long *i)
{
long len;
const uint8_t *ptr;
StringValue(rstr);
len = RSTRING_LEN(rstr);
ptr = (const uint8_t*)RSTRING_PTR(rstr);
return parse_ber(ptr, len, i);
}
static VALUE
rb_slice_ber(VALUE self, VALUE rstr)
{
long i = 0;
int64_t res = slice_ber(rstr, &i);
rb_str_drop_bytes(rstr, i+1);
return U642NUM(res);
}
static int
append_ber(VALUE rstr, uint64_t ber)
{
int i = 10;
char a[11] = {128, 128, 128, 128,
128, 128 ,128 ,128,
128, 128, 0};
do {
a[i] += ber % 128;
ber /= 128;
i--;
} while (ber);
i++;
rb_str_cat(rstr, a+i, 11-i);
return 11-i;
}
/** APPEND BERSIZE **/
#define append_bersize_func(type, bytes) \
static VALUE \
rb_append_bersize_##type(int argc, VALUE* argv, VALUE self) \
{ \
append_args args; \
check_argc_append(argc, argv, &args, bytes * 8); \
append_ber(args.str, args.argc * bytes); \
return append_var_##type(args.argc, args.argv, args.str);\
}
append_bersize_func(int8, 1)
append_bersize_func(int16_le, 2)
append_bersize_func(int24_le, 3)
append_bersize_func(int32_le, 4)
append_bersize_func(int40_le, 5)
append_bersize_func(int48_le, 6)
append_bersize_func(int56_le, 7)
append_bersize_func(int64_le, 8)
append_bersize_func(int16_be, 2)
append_bersize_func(int24_be, 3)
append_bersize_func(int32_be, 4)
append_bersize_func(int40_be, 5)
append_bersize_func(int48_be, 6)
append_bersize_func(int56_be, 7)
append_bersize_func(int64_be, 8)
#define append_int32size_func(type, end, bytes) \
static VALUE \
rb_append_int32size_##type##_##end(int argc, VALUE* argv, VALUE self) \
{ \
append_args args; \
check_argc_append(argc, argv, &args, bytes * 8); \
append_int32_##end(args.str, args.argc * bytes); \
append_var_##type##_##end(args.argc, args.argv, args.str); \
return args.str; \
}
append_int32size_func(int8, le, 1)
append_int32size_func(int16, le, 2)
append_int32size_func(int24, le, 3)
append_int32size_func(int32, le, 4)
append_int32size_func(int40, le, 5)
append_int32size_func(int48, le, 6)
append_int32size_func(int56, le, 7)
append_int32size_func(int64, le, 8)
append_int32size_func(int8, be, 1)
append_int32size_func(int16, be, 2)
append_int32size_func(int24, be, 3)
append_int32size_func(int32, be, 4)
append_int32size_func(int40, be, 5)
append_int32size_func(int48, be, 6)
append_int32size_func(int56, be, 7)
append_int32size_func(int64, be, 8)
/** APPEND BER **/
static long
append_var_ber(int argc, VALUE* argv, VALUE str)
{
long i, bs = 0;
for(i = 0; i < argc; i++) {
bs += append_ber(str, safe_int64_t(argv[i]));
}
return bs;
}
static VALUE
rb_append_ber(int argc, VALUE* argv, VALUE self)
{
append_args args;
check_argc_append(argc, argv, &args, 0);
append_var_ber(args.argc, args.argv, args.str);
return args.str;
}
static VALUE rb_append_bersize_string(VALUE self, VALUE str, VALUE add);
static const char zeros[4] = {0, 0, 0, 0};
static VALUE
rb_append_bersize_ber(int argc, VALUE* argv, VALUE self)
{
append_args args;
VALUE add_str = rb_str_new(0, 0);
check_argc_append(argc, argv, &args, 0);
append_var_ber(args.argc, args.argv, add_str);
return rb_append_bersize_string(self, args.str, add_str);
}
static VALUE
rb_append_int32size_ber_le(int argc, VALUE* argv, VALUE self)
{
append_args args;
long ss, bs;
uint8_t *ptr;
check_argc_append(argc, argv, &args, 0);
rb_str_cat(args.str, zeros, 4);
ss = RSTRING_LEN(args.str) - 4;
bs = append_var_ber(args.argc, args.argv, args.str);
ptr = ((uint8_t*)RSTRING_PTR(args.str)) + ss;
ptr[0] = bs & 255;
ptr[1] = (bs >> 8) & 255;
ptr[2] = (bs >> 16) & 255;
ptr[3] = (bs >> 24) & 255;
return args.str;
}
static VALUE
rb_append_int32size_ber_be(int argc, VALUE* argv, VALUE self)
{
append_args args;
long ss, bs;
uint8_t *ptr;
check_argc_append(argc, argv, &args, 0);
rb_str_cat(args.str, zeros, 4);
ss = RSTRING_LEN(args.str) - 4;
bs = append_var_ber(args.argc, args.argv, args.str);
ptr = ((uint8_t*)RSTRING_PTR(args.str)) + ss;
ptr[3] = bs & 255;
ptr[2] = (bs >> 8) & 255;
ptr[1] = (bs >> 16) & 255;
ptr[0] = (bs >> 24) & 255;
return args.str;
}
/** APPEND BER END **/
/** APPEND STRING **/
static VALUE
rb_append_string(VALUE self, VALUE str, VALUE add)
{
if (!RTEST(str)) str = rb_str_new(0, 0);
StringValue(add);
rb_str_cat(str, RSTRING_PTR(add), RSTRING_LEN(add));
RB_GC_GUARD(add);
return str;
}
static VALUE
rb_append_bersize_string(VALUE self, VALUE str, VALUE add)
{
if (!RTEST(str)) str = rb_str_new(0, 0);
StringValue(add);
append_ber(str, RSTRING_LEN(add));
rb_str_cat(str, RSTRING_PTR(add), RSTRING_LEN(add));
RB_GC_GUARD(add);
return str;
}
static VALUE
rb_append_int32size_string_le(VALUE self, VALUE str, VALUE add)
{
if (!RTEST(str)) str = rb_str_new(0, 0);
StringValue(add);
append_int32_le(str, RSTRING_LEN(add));
rb_str_cat(str, RSTRING_PTR(add), RSTRING_LEN(add));
RB_GC_GUARD(add);
return str;
}
static VALUE
rb_append_int32size_string_be(VALUE self, VALUE str, VALUE add)
{
if (!RTEST(str)) str = rb_str_new(0, 0);
StringValue(add);
append_int32_be(str, RSTRING_LEN(add));
rb_str_cat(str, RSTRING_PTR(add), RSTRING_LEN(add));
RB_GC_GUARD(add);
return str;
}
/** APPEND STRING END **/
/** APPEND COMPLEX **/
static VALUE
rb_append_int8_ber(int argc, VALUE *argv, VALUE self)
{
append_args2 args;
check_argc_append_2(argc, argv, &args, 8, 0);
append_var_int8(1, &args.int0, args.str);
append_var_ber(args.argc, args.argv, args.str);
return args.str;
}
static VALUE
rb_append_ber_int8(int argc, VALUE *argv, VALUE self)
{
append_args2 args;
check_argc_append_2(argc, argv, &args, 8, 0);
append_var_ber(1, &args.int0, args.str);
return append_var_int8(args.argc, args.argv, args.str);
}
#define append_int_ber(bits, end) \
static VALUE \
rb_append_int##bits##_ber_##end(int argc, VALUE *argv, VALUE self) \
{ \
append_args2 args; \
check_argc_append_2(argc, argv, &args, bits, 0); \
append_var_int##bits##_##end(1, &args.int0, args.str); \
append_var_ber(args.argc, args.argv, args.str); \
return args.str; \
} \
static VALUE \
rb_append_ber_int##bits##_##end(int argc, VALUE *argv, VALUE self) \
{ \
append_args2 args; \
check_argc_append_2(argc, argv, &args, 0, bits); \
append_var_ber(1, &args.int0, args.str); \
return append_var_int##bits##_##end(args.argc, args.argv, args.str); \
}
append_int_ber(16, le)
append_int_ber(24, le)
append_int_ber(32, le)
append_int_ber(16, be)
append_int_ber(24, be)
append_int_ber(32, be)
#define append_int_int(bit1, bit2, end) \
static VALUE \
rb_append_int##bit1##_int##bit2##_##end(int argc, VALUE *argv, VALUE self) \
{ \
append_args2 args; \
check_argc_append_2(argc, argv, &args, bit1, bit2); \
append_var_int##bit1##_##end(1, &args.int0, args.str); \
return append_var_int##bit2##_##end(args.argc, args.argv, args.str); \
}
append_int_int(8, 16, le)
append_int_int(8, 24, le)
append_int_int(8, 32, le)
append_int_int(16, 8, le)
append_int_int(16, 24, le)
append_int_int(16, 32, le)
append_int_int(24, 8, le)
append_int_int(24, 16, le)
append_int_int(24, 32, le)
append_int_int(32, 8, le)
append_int_int(32, 16, le)
append_int_int(32, 24, le)
append_int_int(8, 16, be)
append_int_int(8, 24, be)
append_int_int(8, 32, be)
append_int_int(16, 8, be)
append_int_int(16, 24, be)
append_int_int(16, 32, be)
append_int_int(24, 8, be)
append_int_int(24, 16, be)
append_int_int(24, 32, be)
append_int_int(32, 8, be)
append_int_int(32, 16, be)
append_int_int(32, 24, be)
/** APPEND COMPLEX END **/
void
Init_bin_utils()
{
VALUE mod_bin_utils = rb_define_module("BinUtils");
VALUE mod_native = rb_define_module_under(mod_bin_utils, "Native");
rshft = rb_intern(">>");
band = rb_intern("&");
#ifndef HAVE_RB_STR_DROP_BYTES
aslice = rb_intern("slice!");
#endif
rb_define_method(mod_native, "get_ber", rb_get_ber, -1);
rb_define_method(mod_native, "get_int8", rb_get_int8, -1);
rb_define_method(mod_native, "get_sint8", rb_get_sint8, -1);
rb_define_method(mod_native, "get_int16_le", rb_get_int16_le, -1);
rb_define_method(mod_native, "get_sint16_le", rb_get_sint16_le, -1);
rb_define_method(mod_native, "get_int16_be", rb_get_int16_be, -1);
rb_define_method(mod_native, "get_sint16_be", rb_get_sint16_be, -1);
rb_define_method(mod_native, "get_int24_le", rb_get_int24_le, -1);
rb_define_method(mod_native, "get_sint24_le", rb_get_sint24_le, -1);
rb_define_method(mod_native, "get_int24_be", rb_get_int24_be, -1);
rb_define_method(mod_native, "get_sint24_be", rb_get_sint24_be, -1);
rb_define_method(mod_native, "get_int32_le", rb_get_int32_le, -1);
rb_define_method(mod_native, "get_sint32_le", rb_get_sint32_le, -1);
rb_define_method(mod_native, "get_int32_be", rb_get_int32_be, -1);
rb_define_method(mod_native, "get_sint32_be", rb_get_sint32_be, -1);
rb_define_method(mod_native, "get_int40_le", rb_get_int40_le, -1);
rb_define_method(mod_native, "get_sint40_le", rb_get_sint40_le, -1);
rb_define_method(mod_native, "get_int40_be", rb_get_int40_be, -1);
rb_define_method(mod_native, "get_sint40_be", rb_get_sint40_be, -1);
rb_define_method(mod_native, "get_int48_le", rb_get_int48_le, -1);
rb_define_method(mod_native, "get_sint48_le", rb_get_sint48_le, -1);
rb_define_method(mod_native, "get_int48_be", rb_get_int48_be, -1);
rb_define_method(mod_native, "get_sint48_be", rb_get_sint48_be, -1);
rb_define_method(mod_native, "get_int56_le", rb_get_int56_le, -1);
rb_define_method(mod_native, "get_sint56_le", rb_get_sint56_le, -1);
rb_define_method(mod_native, "get_int56_be", rb_get_int56_be, -1);
rb_define_method(mod_native, "get_sint56_be", rb_get_sint56_be, -1);
rb_define_method(mod_native, "get_int64_le", rb_get_int64_le, -1);
rb_define_method(mod_native, "get_sint64_le", rb_get_sint64_le, -1);
rb_define_method(mod_native, "get_int64_be", rb_get_int64_be, -1);
rb_define_method(mod_native, "get_sint64_be", rb_get_sint64_be, -1);
rb_define_method(mod_native, "slice_ber!", rb_slice_ber, 1);
rb_define_method(mod_native, "slice_int8!", rb_slice_int8, 1);
rb_define_method(mod_native, "slice_sint8!", rb_slice_sint8, 1);
rb_define_method(mod_native, "slice_int16_le!", rb_slice_int16_le, 1);
rb_define_method(mod_native, "slice_sint16_le!", rb_slice_sint16_le, 1);
rb_define_method(mod_native, "slice_int16_be!", rb_slice_int16_be, 1);
rb_define_method(mod_native, "slice_sint16_be!", rb_slice_sint16_be, 1);
rb_define_method(mod_native, "slice_int24_le!", rb_slice_int24_le, 1);
rb_define_method(mod_native, "slice_sint24_le!", rb_slice_sint24_le, 1);
rb_define_method(mod_native, "slice_int24_be!", rb_slice_int24_be, 1);
rb_define_method(mod_native, "slice_sint24_be!", rb_slice_sint24_be, 1);
rb_define_method(mod_native, "slice_int32_le!", rb_slice_int32_le, 1);
rb_define_method(mod_native, "slice_sint32_le!", rb_slice_sint32_le, 1);
rb_define_method(mod_native, "slice_int32_be!", rb_slice_int32_be, 1);
rb_define_method(mod_native, "slice_sint32_be!", rb_slice_sint32_be, 1);
rb_define_method(mod_native, "slice_int40_le!", rb_slice_int40_le, 1);
rb_define_method(mod_native, "slice_sint40_le!", rb_slice_sint40_le, 1);
rb_define_method(mod_native, "slice_int40_be!", rb_slice_int40_be, 1);
rb_define_method(mod_native, "slice_sint40_be!", rb_slice_sint40_be, 1);
rb_define_method(mod_native, "slice_int48_le!", rb_slice_int48_le, 1);
rb_define_method(mod_native, "slice_sint48_le!", rb_slice_sint48_le, 1);
rb_define_method(mod_native, "slice_int48_be!", rb_slice_int48_be, 1);
rb_define_method(mod_native, "slice_sint48_be!", rb_slice_sint48_be, 1);
rb_define_method(mod_native, "slice_int56_le!", rb_slice_int56_le, 1);
rb_define_method(mod_native, "slice_sint56_le!", rb_slice_sint56_le, 1);
rb_define_method(mod_native, "slice_int56_be!", rb_slice_int56_be, 1);
rb_define_method(mod_native, "slice_sint56_be!", rb_slice_sint56_be, 1);
rb_define_method(mod_native, "slice_int64_le!", rb_slice_int64_le, 1);
rb_define_method(mod_native, "slice_sint64_le!", rb_slice_sint64_le, 1);
rb_define_method(mod_native, "slice_int64_be!", rb_slice_int64_be, 1);
rb_define_method(mod_native, "slice_sint64_be!", rb_slice_sint64_be, 1);
rb_define_method(mod_native, "append_ber!", rb_append_ber, -1);
rb_define_method(mod_native, "append_int8!", rb_append_int8, -1);
rb_define_method(mod_native, "append_int16_le!", rb_append_int16_le, -1);
rb_define_method(mod_native, "append_int16_be!", rb_append_int16_be, -1);
rb_define_method(mod_native, "append_int24_le!", rb_append_int24_le, -1);
rb_define_method(mod_native, "append_int24_be!", rb_append_int24_be, -1);
rb_define_method(mod_native, "append_int32_le!", rb_append_int32_le, -1);
rb_define_method(mod_native, "append_int32_be!", rb_append_int32_be, -1);
rb_define_method(mod_native, "append_int40_le!", rb_append_int40_le, -1);
rb_define_method(mod_native, "append_int40_be!", rb_append_int40_be, -1);
rb_define_method(mod_native, "append_int48_le!", rb_append_int48_le, -1);
rb_define_method(mod_native, "append_int48_be!", rb_append_int48_be, -1);
rb_define_method(mod_native, "append_int56_le!", rb_append_int56_le, -1);
rb_define_method(mod_native, "append_int56_be!", rb_append_int56_be, -1);
rb_define_method(mod_native, "append_int64_le!", rb_append_int64_le, -1);
rb_define_method(mod_native, "append_int64_be!", rb_append_int64_be, -1);
rb_define_method(mod_native, "append_bersize_ber!", rb_append_bersize_ber, -1);
rb_define_method(mod_native, "append_bersize_int8!", rb_append_bersize_int8, -1);
rb_define_method(mod_native, "append_bersize_int16_le!", rb_append_bersize_int16_le, -1);
rb_define_method(mod_native, "append_bersize_int16_be!", rb_append_bersize_int16_be, -1);
rb_define_method(mod_native, "append_bersize_int24_le!", rb_append_bersize_int24_le, -1);
rb_define_method(mod_native, "append_bersize_int24_be!", rb_append_bersize_int24_be, -1);
rb_define_method(mod_native, "append_bersize_int32_le!", rb_append_bersize_int32_le, -1);
rb_define_method(mod_native, "append_bersize_int32_be!", rb_append_bersize_int32_be, -1);
rb_define_method(mod_native, "append_bersize_int40_le!", rb_append_bersize_int40_le, -1);
rb_define_method(mod_native, "append_bersize_int40_be!", rb_append_bersize_int40_be, -1);
rb_define_method(mod_native, "append_bersize_int48_le!", rb_append_bersize_int48_le, -1);
rb_define_method(mod_native, "append_bersize_int48_be!", rb_append_bersize_int48_be, -1);
rb_define_method(mod_native, "append_bersize_int56_le!", rb_append_bersize_int56_le, -1);
rb_define_method(mod_native, "append_bersize_int56_be!", rb_append_bersize_int56_be, -1);
rb_define_method(mod_native, "append_bersize_int64_le!", rb_append_bersize_int64_le, -1);
rb_define_method(mod_native, "append_bersize_int64_be!", rb_append_bersize_int64_be, -1);
rb_define_method(mod_native, "append_int32size_ber_le!", rb_append_int32size_ber_le, -1);
rb_define_method(mod_native, "append_int32size_int8_le!", rb_append_int32size_int8_le, -1);
rb_define_method(mod_native, "append_int32size_int16_le!", rb_append_int32size_int16_le, -1);
rb_define_method(mod_native, "append_int32size_int24_le!", rb_append_int32size_int24_le, -1);
rb_define_method(mod_native, "append_int32size_int32_le!", rb_append_int32size_int32_le, -1);
rb_define_method(mod_native, "append_int32size_int40_le!", rb_append_int32size_int40_le, -1);
rb_define_method(mod_native, "append_int32size_int48_le!", rb_append_int32size_int48_le, -1);
rb_define_method(mod_native, "append_int32size_int56_le!", rb_append_int32size_int56_le, -1);
rb_define_method(mod_native, "append_int32size_int64_le!", rb_append_int32size_int64_le, -1);
rb_define_method(mod_native, "append_int32size_ber_be!", rb_append_int32size_ber_be, -1);
rb_define_method(mod_native, "append_int32size_int8_be!", rb_append_int32size_int8_be, -1);
rb_define_method(mod_native, "append_int32size_int16_be!", rb_append_int32size_int16_be, -1);
rb_define_method(mod_native, "append_int32size_int24_be!", rb_append_int32size_int24_be, -1);
rb_define_method(mod_native, "append_int32size_int32_be!", rb_append_int32size_int32_be, -1);
rb_define_method(mod_native, "append_int32size_int40_be!", rb_append_int32size_int40_be, -1);
rb_define_method(mod_native, "append_int32size_int48_be!", rb_append_int32size_int48_be, -1);
rb_define_method(mod_native, "append_int32size_int56_be!", rb_append_int32size_int56_be, -1);
rb_define_method(mod_native, "append_int32size_int64_be!", rb_append_int32size_int64_be, -1);
rb_define_method(mod_native, "append_string!", rb_append_string, 2);
rb_define_method(mod_native, "append_bersize_string!", rb_append_bersize_string, 2);
rb_define_method(mod_native, "append_int32size_string_le!", rb_append_int32size_string_le, 2);
rb_define_method(mod_native, "append_int32size_string_be!", rb_append_int32size_string_be, 2);
rb_define_method(mod_native, "append_int8_ber!", rb_append_int8_ber, -1);
rb_define_method(mod_native, "append_ber_int8!", rb_append_ber_int8, -1);
rb_define_method(mod_native, "append_int8_int16_le!", rb_append_int8_int16_le, -1);
rb_define_method(mod_native, "append_int8_int24_le!", rb_append_int8_int24_le, -1);
rb_define_method(mod_native, "append_int8_int32_le!", rb_append_int8_int32_le, -1);
rb_define_method(mod_native, "append_int8_int16_be!", rb_append_int8_int16_be, -1);
rb_define_method(mod_native, "append_int8_int24_be!", rb_append_int8_int24_be, -1);
rb_define_method(mod_native, "append_int8_int32_be!", rb_append_int8_int32_be, -1);
rb_define_method(mod_native, "append_int16_int8_le!", rb_append_int16_int8_le, -1);
rb_define_method(mod_native, "append_int16_int24_le!", rb_append_int16_int24_le, -1);
rb_define_method(mod_native, "append_int16_int32_le!", rb_append_int16_int32_le, -1);
rb_define_method(mod_native, "append_int16_int8_be!", rb_append_int16_int8_be, -1);
rb_define_method(mod_native, "append_int16_int24_be!", rb_append_int16_int24_be, -1);
rb_define_method(mod_native, "append_int16_int32_be!", rb_append_int16_int32_be, -1);
rb_define_method(mod_native, "append_int24_int16_le!", rb_append_int24_int16_le, -1);
rb_define_method(mod_native, "append_int24_int8_le!", rb_append_int24_int8_le, -1);
rb_define_method(mod_native, "append_int24_int32_le!", rb_append_int24_int32_le, -1);
rb_define_method(mod_native, "append_int24_int16_be!", rb_append_int24_int16_be, -1);
rb_define_method(mod_native, "append_int24_int8_be!", rb_append_int24_int8_be, -1);
rb_define_method(mod_native, "append_int24_int32_be!", rb_append_int24_int32_be, -1);
rb_define_method(mod_native, "append_int32_int16_le!", rb_append_int32_int16_le, -1);
rb_define_method(mod_native, "append_int32_int24_le!", rb_append_int32_int24_le, -1);
rb_define_method(mod_native, "append_int32_int8_le!", rb_append_int32_int8_le, -1);
rb_define_method(mod_native, "append_int32_int16_be!", rb_append_int32_int16_be, -1);
rb_define_method(mod_native, "append_int32_int24_be!", rb_append_int32_int24_be, -1);
rb_define_method(mod_native, "append_int32_int8_be!", rb_append_int32_int8_be, -1);
rb_define_method(mod_native, "append_ber_int16_le!", rb_append_ber_int16_le, -1);
rb_define_method(mod_native, "append_ber_int24_le!", rb_append_ber_int24_le, -1);
rb_define_method(mod_native, "append_ber_int32_le!", rb_append_ber_int32_le, -1);
rb_define_method(mod_native, "append_ber_int16_be!", rb_append_ber_int16_be, -1);
rb_define_method(mod_native, "append_ber_int24_be!", rb_append_ber_int24_be, -1);
rb_define_method(mod_native, "append_ber_int32_be!", rb_append_ber_int32_be, -1);
rb_define_method(mod_native, "append_int16_ber_le!", rb_append_int16_ber_le, -1);
rb_define_method(mod_native, "append_int24_ber_le!", rb_append_int24_ber_le, -1);
rb_define_method(mod_native, "append_int32_ber_le!", rb_append_int32_ber_le, -1);
rb_define_method(mod_native, "append_int16_ber_be!", rb_append_int16_ber_be, -1);
rb_define_method(mod_native, "append_int24_ber_be!", rb_append_int24_ber_be, -1);
rb_define_method(mod_native, "append_int32_ber_be!", rb_append_int32_ber_be, -1);
rb_extend_object(mod_native, mod_native);
}
|
rburgst/AutotypeURL
|
AutotypeURL/ATUAutotypeURL.h
|
//
// ATUAutotypeURL.h
// AutotypeURL
//
// Created by <NAME> on 07.05.19.
// Copyright © 2019 HicknHack Software GmbH. All rights reserved.
//
#import <Cocoa/Cocoa.h>
#import "MPPlugin.h"
NS_ASSUME_NONNULL_BEGIN
FOUNDATION_EXPORT NSString *const kMPASettingsKeyFullMatch;
@interface ATUAutotypeURL : MPPlugin <MPPluginSettings, MPAutotypeWindowTitleResolverPlugin>;
@end
NS_ASSUME_NONNULL_END
|
rburgst/AutotypeURL
|
AutotypeURL/Settings/ATUSettingsViewController.h
|
<gh_stars>10-100
//
// ATUSettingsViewController.h
// AutotypeURL
//
// Created by <NAME> on 7/10/19.
// Copyright © 2019 <NAME> All rights reserved.
//
#import <Cocoa/Cocoa.h>
@class ATUAutotypeURL;
@interface ATUSettingsViewController : NSViewController
@end
|
rburgst/AutotypeURL
|
AutotypeURL/ATUChromeExtractor.h
|
//
// ATUChromeExtractor.h
// AutotypeURL
//
// Created by <NAME> on 10.05.19.
// Copyright © 2019 HicknHack Software GmbH. All rights reserved.
//
#import <Foundation/Foundation.h>
#import "ATUURLExtraction.h"
NS_ASSUME_NONNULL_BEGIN
@interface ATUChromeExtractor : NSObject <ATUURLExtraction>
@end
NS_ASSUME_NONNULL_END
|
rburgst/AutotypeURL
|
AutotypeURL/ATUSafariExtractor.h
|
//
// ATUSafariExtractor.h
// AutotypeURL
//
// Created by <NAME> on 10.05.19.
// Copyright © 2019 HicknHack Software GmbH. All rights reserved.
//
#import <Foundation/Foundation.h>
#import "ATUURLExtraction.h"
NS_ASSUME_NONNULL_BEGIN
@interface ATUSafariExtractor : NSObject <ATUURLExtraction>
@end
NS_ASSUME_NONNULL_END
|
rburgst/AutotypeURL
|
AutotypeURL/ATUURLExtraction.h
|
<reponame>rburgst/AutotypeURL
//
// ATUURLExtraction.h
// AutotypeURL
//
// Created by <NAME> on 10.05.19.
// Copyright © 2019 HicknHack Software GmbH. All rights reserved.
//
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
@class NSRunningApplication;
@protocol ATUURLExtraction <NSObject>
@required
@property (readonly, nonatomic) NSArray<NSString *> *supportedBundleIdentifiers;
- (NSString *)URLForRunningApplication:(NSRunningApplication *)runningApplication;
@end
NS_ASSUME_NONNULL_END
|
CS-131-Repo/cs-131-project-AlbertBoll
|
Asymmetric encryption.h
|
#pragma once
#include <iostream>
#include <vector>
#include<optional>
// 8 hash initial message digest, each element represents HEX representation of small fraction of square root of 2,3,5,7,11,13,17,19(first 8 prime numbers)
//uint32_t H[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 };
//
//
//
//constexpr uint32_t K[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
// 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
// 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
// 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
// 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
// 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
// 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
// 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 };
class Sha256
{
// 8 hash initial message digest, each element represents HEX representation of small fraction of square root of 2,3,5,7,11,13,17,19(first 8 prime numbers)
inline static std::vector<uint32_t> H = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 };
//64 hash constants, each element represents HEX representation of small fraction of cubic root of 2,3,5,7,11,13,17,19... (first 64 prime numbers)
inline static const uint32_t K[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 };
public:
static bool encrypt(const std::vector<uint8_t>& original_message,
std::vector<uint8_t>* encrypted_digest);
private:
// preprocess the original string to met sha256 requirement(total bits in a text % 512 bit = 448 bits,
// finally append additional length bits (64 bits in size) to the end of text, the result size of text message in bits is multiple of 64 bytes(512 bits)
static bool preprocessing(std::vector<uint8_t>* message);
//*************************** Calculate the message digest ***********************************
//first breaking text into multiple chunks with each chunk 64 bytes in size
static bool decomposeTextInto64BytesChunks(const std::vector<uint8_t>& message,
std::vector<std::vector<uint8_t>>* chunks);
//for each chunk(chunk size is 64 bytes, construct 64 words, each word is 4 bytes(32 bits)
//for the first 16 words, i-th word is obtained by merging the chunk[4*i], chunk[4*i+1], chunk[4*i+2] and chunk[4*i+3] in big-endian format
static bool constructWords(const std::vector<uint8_t>& chunk,
std::vector<uint32_t>* words);
//original message digest has 8 hash initial values, loop through 64 times to update each element according to sha256 iteration formula
static bool transform(const std::vector<uint32_t>& words, std::vector<uint32_t>* message_digest);
static std::optional<std::string> getHexEncryptedDigest(const std::string& message);
//friend std::ostream& operator<<(std::ostream& os, const Sha256& other);
private:
// Rotates right(circular right shift) value x by n positions
static uint32_t circular_right_shift(int n, uint32_t x) {
return (x >> n) | (x << (32 - n));
}
//sha 256 hash function requires following 6 bitwise functions
static uint32_t Sigma_0(uint32_t x) { return circular_right_shift(2, x) ^ circular_right_shift(13, x) ^ circular_right_shift(22, x); }
static uint32_t Sigma_1(uint32_t x) { return circular_right_shift(6, x) ^ circular_right_shift(11, x) ^ circular_right_shift(25, x); }
static uint32_t sigma_0(uint32_t x) { return circular_right_shift(7, x) ^ circular_right_shift(18, x) ^ (x >> 3); }
static uint32_t sigma_1(uint32_t x) { return circular_right_shift(17, x) ^ circular_right_shift(19, x) ^ (x >> 10); }
static uint32_t Ch(uint32_t x, uint32_t y, uint32_t z) { return (x & y) ^ (~x & z); }
static uint32_t Ma(uint32_t x, uint32_t y, uint32_t z) { return (x & y) ^ (x & z) ^ (y & z); }
static bool HashValue(const std::vector<uint32_t>& input, std::vector<uint8_t>* outHashVal);
};
|
gosaliajigar/Languages
|
C/roundrobin.c
|
#include<stdio.h>
void init(int arrival_time[], int burst_time[], int remaining_time[]) {
arrival_time[0]=0;arrival_time[1]=1;arrival_time[2]=2;arrival_time[3]=3;
burst_time[0]=9;burst_time[1]=5;burst_time[2]=3;burst_time[3]=4;
remaining_time[0]=9;remaining_time[1]=5;remaining_time[2]=3;remaining_time[3]=4;
}
int user_input() {
int isValid=0, quantum;
while(isValid==0) {
printf("\nEnter Time Quantum(>0) : ");
scanf("%d", &quantum);
if (quantum>0) {
isValid=1;
} else {
printf("ERR: Invalid Time Quantum : %d", quantum);
}
}
return quantum;
}
void record_execution(int start, int end, int process_number, int execution[]) {
int i=start,j=start+end;
while(i<j) {
execution[i++]=process_number;
}
}
void display_execution(int total, int execution[]) {
int counter;
printf("---------------------------------\n");
printf(" TIME\t| PROCESS EXECUTING\n");
printf("---------------------------------\n");
for(counter=0;counter<total;counter++){
printf(" %d\t|\tP[%d]\n", counter, execution[counter]);
}
printf("---------------------------------\n");
}
void display_statistics(int time_quantum, int time, int wait_time, int turnaround_time, int no_of_processes, int sequence[], int sequence_count) {
int counter;
printf("---------------------------------------------------------------------\n");
printf(" STATISTICS \n");
printf("---------------------------------------------------------------------\n");
printf("Time Quantum for Round-Robin : %d\n", time_quantum);
printf("Sequence of processes execution : ");
for(counter=0;counter<sequence_count;counter++){
printf("P[%d] ", sequence[counter]);
}
printf("\n");
printf("Total Exceution Time : %d\n",time);
printf("Average Waiting Time : %.2f\n",wait_time*1.0/no_of_processes);
printf("Average Turnaround Time : %.2f\n",turnaround_time*1.0/no_of_processes);
printf("\n---------------------------------------------------------------------\n");
printf("\n");
}
void display_header() {
printf("\n");
printf("---------------------------------------------------------------------\n");
printf(" SEQUENCE OF PROCESS COMPLETION \n");
printf("---------------------------------------------------------------------\n");
printf("Process\t| Arrival Time | Burst Time |Turnaround Time|Waiting Time\n");
printf("--------|---------------|---------------|---------------|------------\n");
}
int main() {
// default fields
int no_of_processes=4, isProcessComplete=0, wait_time=0, turnaround_time=0;
// processed fields
int process_number, time, pending_process, time_quantum, sequence_count;
// process arrays
int arrival_time[no_of_processes], burst_time[no_of_processes], remaining_time[no_of_processes];
// sequence of processes
int sequence[1000], execution[1000];
pending_process=no_of_processes;
init(arrival_time, burst_time, remaining_time);
time_quantum = user_input();
display_header();
for(time=0,process_number=0,sequence_count=0;pending_process!=0;) {
if(remaining_time[process_number]>0) {
record_execution(time, remaining_time[process_number], process_number+1, execution);
if(remaining_time[process_number]<=time_quantum) {
time+=remaining_time[process_number];
remaining_time[process_number]=0;
isProcessComplete=1;
} else {
remaining_time[process_number]-=time_quantum;
time+=time_quantum;
}
sequence[sequence_count++]=process_number + 1;
}
if(remaining_time[process_number]==0 && isProcessComplete==1) {
pending_process--;
printf("P[%d]\t|\t%3d\t|\t%3d\t|\t%3d\t|\t%3d\t\n",process_number+1,arrival_time[process_number],burst_time[process_number],time-arrival_time[process_number],time-arrival_time[process_number]-burst_time[process_number]);
wait_time+=time-arrival_time[process_number]-burst_time[process_number];
turnaround_time+=time-arrival_time[process_number];
isProcessComplete=0;
}
if(process_number==no_of_processes-1)
process_number=0;
else if(arrival_time[process_number+1]<=time)
process_number++;
else
process_number=0;
}
printf("---------------------------------------------------------------------\n\n");
display_execution(time, execution);
display_statistics(time_quantum, time, wait_time, turnaround_time, no_of_processes, sequence, sequence_count);
return 0;
}
|
10088/mars
|
mars/log/appender.h
|
// Tencent is pleased to support the open source community by making Mars available.
// Copyright (C) 2016 THL A29 Limited, a Tencent company. All rights reserved.
// Licensed under the MIT License (the "License"); you may not use this file except in
// compliance with the License. You may obtain a copy of the License at
// http://opensource.org/licenses/MIT
// Unless required by applicable law or agreed to in writing, software distributed under the License is
// distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions and
// limitations under the License.
/*
* appender.h
*
* Created on: 2013-3-7
* Author: yerungui
*/
#ifndef APPENDER_H_
#define APPENDER_H_
#include <string>
#include <vector>
#include <stdint.h>
namespace mars {
namespace xlog {
enum TAppenderMode
{
kAppenderAsync,
kAppenderSync,
};
enum TCompressMode{
kZlib,
kZstd,
};
struct XLogConfig{
TAppenderMode mode_ = kAppenderAsync;
std::string logdir_;
std::string nameprefix_;
std::string pub_key_;
TCompressMode compress_mode_ = kZlib;
int compress_level_ = 6;
std::string cachedir_;
int cache_days_ = 0;
};
#ifdef __APPLE__
enum TConsoleFun {
kConsolePrintf,
kConsoleNSLog,
};
#endif
void appender_open(const XLogConfig& _config);
void appender_flush();
void appender_flush_sync();
void appender_close();
void appender_setmode(TAppenderMode _mode);
bool appender_getfilepath_from_timespan(int _timespan, const char* _prefix, std::vector<std::string>& _filepath_vec);
bool appender_make_logfile_name(int _timespan, const char* _prefix, std::vector<std::string>& _filepath_vec);
bool appender_get_current_log_path(char* _log_path, unsigned int _len);
bool appender_get_current_log_cache_path(char* _logPath, unsigned int _len);
void appender_set_console_log(bool _is_open);
#ifdef __APPLE__
void appender_set_console_fun(TConsoleFun _fun);
#endif
/*
* By default, all logs will write to one file everyday. You can split logs to multi-file by changing max_file_size.
*
* @param _max_byte_size Max byte size of single log file, default is 0, meaning do not split.
*/
void appender_set_max_file_size(uint64_t _max_byte_size);
/*
* By default, all logs lives 10 days at most.
*
* @param _max_time Max alive duration of a single log file in seconds, default is 10 days
*/
void appender_set_max_alive_duration(long _max_time);
}
}
#endif /* APPENDER_H_ */
|
10088/mars
|
mars/sdt/src/checkimpl/pingquery.h
|
// Tencent is pleased to support the open source community by making Mars available.
// Copyright (C) 2016 THL A29 Limited, a Tencent company. All rights reserved.
// Licensed under the MIT License (the "License"); you may not use this file except in
// compliance with the License. You may obtain a copy of the License at
// http://opensource.org/licenses/MIT
// Unless required by applicable law or agreed to in writing, software distributed under the License is
// distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions and
// limitations under the License.
/*
* pingquery.h
*
* Created on: 2014/06/18
* Author: wutianqiang
*/
#ifndef SDT_SRC_CHECKIMPL_PINGQUERY_H_
#define SDT_SRC_CHECKIMPL_PINGQUERY_H_
#include <string>
#include <vector>
#include "boost/bind.hpp"
#include "mars/comm/socket/unix_socket.h"
#ifdef __APPLE__
#include "mars/comm/alarm.h"
#include "mars/comm/socket/socketselect.h"
#endif
class NetCheckTrafficMonitor;
#define DISALLOW_COPY_AND_ASSIGN(cls) \
private:\
cls(const cls&); \
cls& operator=(const cls&);
namespace mars {
namespace sdt {
struct PingStatus {
std::string res;
double loss_rate;
double minrtt; // ms
double avgrtt; // ms
double maxrtt; // ms
char ip[16];
};
class PingQuery {
public:
PingQuery(NetCheckTrafficMonitor* trafficMonitor = NULL): pingresult_("")
#ifdef __APPLE__
, nsent_(0),
sockfd_(-1),
sendtimes_(0),
sendcount_(0),
readcount_(0),
interval_(0),
timeout_(0),
readwrite_breaker_(),
alarm_(boost::bind(&PingQuery::__onAlarm, this), false)
#endif
, traffic_monitor_(trafficMonitor)
{}
~PingQuery() {
}
public:
/**
* return value:
* 0---->success
* -1--->error
*/
int GetPingStatus(struct PingStatus& pingStatus);
/**
* return value:
* 0---->success
* -1--->error
*/
int RunPingQuery(int queryCount, int interval/*S*/, int timeout/*S*/,
const char* dest, unsigned int packetSize = 0);
int RunPingQuery(int _querycount, int interval/*S*/, int timeout/*S*/,
const char* dest, unsigned int packetSize, int* rtt);
#ifdef __APPLE__
private:
void proc_v4(char* ptr, ssize_t len, struct msghdr* msg, struct timeval* tvrecv);
int __prepareSendAddr(const char* dest);
int __runReadWrite(int& errCode);
void __onAlarm();
void __preparePacket(char* sendbuffer, int& len);
int __send();
int __recv();
int __initialize(const char* dest);
void __deinitialize();
#endif
DISALLOW_COPY_AND_ASSIGN(PingQuery);
private:
std::string pingresult_;
#ifdef __APPLE__
int nsent_; /* add 1 for each sendto() */
int sockfd_;
std::vector<double> vecrtts_;
int sendtimes_;
int sendcount_;
int readcount_;
int interval_;
int timeout_;
struct sockaddr sendaddr_;
struct sockaddr recvaddr_;
mars::comm::SocketBreaker readwrite_breaker_;
mars::comm::Alarm alarm_;
#endif
NetCheckTrafficMonitor* traffic_monitor_;
};
}}
#endif /* SDT_SRC_CHECKIMPL_PINGQUERY_H_ */
|
10088/mars
|
mars/comm/windows/thread/thread.h
|
<reponame>10088/mars
// Tencent is pleased to support the open source community by making Mars available.
// Copyright (C) 2016 THL A29 Limited, a Tencent company. All rights reserved.
// Licensed under the MIT License (the "License"); you may not use this file except in
// compliance with the License. You may obtain a copy of the License at
// http://opensource.org/licenses/MIT
// Unless required by applicable law or agreed to in writing, software distributed under the License is
// distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions and
// limitations under the License.
#ifndef THREAD_H_
#define THREAD_H_
#include <errno.h>
#include <stdlib.h>
#include <xtimec.h>
#include "assert/__assert.h"
#include "condition.h"
#include "thread/runnable.h"
#include "mars/openssl/include/openssl/crypto.h"
typedef HANDLE thread_handler;
#define thrd_success (0)
namespace mars {
namespace comm {
typedef DWORD thread_tid;
typedef void* (*THREAD_START_PROC)(void* arg);
class ThreadUtil {
public:
static void yield() {
::SwitchToThread();
}
static void sleep(unsigned int _sec) {
::Sleep(_sec * 1000);
}
static void usleep(unsigned int _usec) {
::Sleep(_usec / 1000);
}
static thread_tid currentthreadid() {
return ::GetCurrentThreadId();
}
static bool isruning(thread_tid _id) {
if (_id == currentthreadid())
return true;
HANDLE handle = OpenThread(THREAD_ALL_ACCESS, FALSE, _id);
if (handle == NULL) return false;
return ::WaitForSingleObject(handle, 0) == WAIT_OBJECT_0;
}
static int createThread(thread_handler& pth, THREAD_START_PROC proc, void* args) {
HANDLE handle = ::CreateThread(nullptr, 0, (LPTHREAD_START_ROUTINE)proc, args, 0, nullptr);
if (handle == nullptr) return -1;
pth = handle;
return thrd_success;
}
static void join(thread_handler& pth) {
if (pth == NULL)
return;
if (getThreadId(pth) == currentthreadid())
return;
::WaitForSingleObject(pth, INFINITE);
}
static void join (thread_tid _tid) {
ASSERT(_tid != currentthreadid());
if (_tid == currentthreadid())
return;
HANDLE handler = OpenThread(THREAD_ALL_ACCESS, FALSE, _tid);
if (NULL == handler) {
return;
}
::WaitForSingleObject(handler, INFINITE);
}
static void detach(thread_handler& pth) {
if (pth == NULL)
return ;
::CloseHandle(pth);
pth = NULL;
}
static thread_tid getThreadId(thread_handler& pth) {
if (pth == NULL)
return 0;
return ::GetThreadId(pth);
}
};
class Thread {
private:
class RunnableReference {
public:
RunnableReference(Runnable* _target)
: target(_target), count(0), isjoined(false), isended(true) ,
aftertime(UINT_MAX), periodictime(UINT_MAX), iscanceldelaystart(false)
, isinthread(false), killsig(0) {
// tid._Hnd = 0;
// tid._Id = 0;
m_th = NULL;
// ASSERT(target);
}
~RunnableReference() {
delete target;
ASSERT(0 == count);
ASSERT(isended);
if (m_th != NULL) {
::CloseHandle(m_th);
m_th = NULL;
}
}
void AddRef() { count++;}
void RemoveRef(ScopedSpinLock& _lock) {
ASSERT(0 < count);
ASSERT(_lock.islocked());
bool willdel = false;
count--;
if (0 == count) willdel = true;
_lock.unlock();
if (willdel) delete this;
}
private:
RunnableReference(const RunnableReference&);
RunnableReference& operator=(const RunnableReference&);
public:
Runnable* target;
int count;
// thrd_t tid;
thread_handler m_th = NULL;
bool isjoined;
bool isended;
unsigned int aftertime;
unsigned int periodictime;
bool iscanceldelaystart;
Condition condtime;
SpinLock splock;
bool isinthread;
int killsig;
};
public:
template<class T>
explicit Thread(const T& op, const char* _thread_name = NULL, bool _outside_join = false)
: m_runableref(NULL), outside_join_(_outside_join) {
m_runableref = new RunnableReference(detail::transform(op));
ScopedSpinLock lock(m_runableref->splock);
m_runableref->AddRef();
}
Thread(const char* _thread_name = NULL, bool _outside_join = false)
: m_runableref(NULL), outside_join_(_outside_join) {
m_runableref = new RunnableReference(NULL);
ScopedSpinLock lock(m_runableref->splock);
m_runableref->AddRef();
}
virtual ~Thread() {
ScopedSpinLock lock(m_runableref->splock);
m_runableref->RemoveRef(lock);
}
int start(bool* _newone = NULL) {
ScopedSpinLock lock(m_runableref->splock);
if (_newone) *_newone = false;
if (isruning())return 0;
m_runableref->isended = false;
m_runableref->isjoined = outside_join_;
m_runableref->AddRef();
// int ret = thrd_create(&m_runableref->tid, (thrd_start_t)&start_routine, (void*)m_runableref);
int ret = ThreadUtil::createThread(m_runableref->m_th, &start_routine, (void*)m_runableref);
ASSERT(thrd_success == ret);
if (_newone) *_newone = true;
if (thrd_success != ret) {
m_runableref->isended = true;
m_runableref->RemoveRef(lock);
}
return ret;
}
template <typename T>
int start(const T& op, bool* _newone = NULL) {
ScopedSpinLock lock(m_runableref->splock);
if (_newone) *_newone = false;
if (isruning())return 0;
delete m_runableref->target;
m_runableref->target = detail::transform(op);
m_runableref->isended = false;
m_runableref->isjoined = outside_join_;
m_runableref->AddRef();
// int ret = thrd_create(&m_runableref->tid, (thrd_start_t)&start_routine, (void*)m_runableref);
int ret = ThreadUtil::createThread(m_runableref->m_th, &start_routine, (void*)m_runableref);
ASSERT(thrd_success == ret);
if (_newone) *_newone = true;
if (thrd_success != ret) {
m_runableref->isended = true;
m_runableref->RemoveRef(lock);
}
return ret;
}
int start_after(unsigned int after) {
ScopedSpinLock lock(m_runableref->splock);
if (isruning())return 0;
m_runableref->condtime.cancelAnyWayNotify();
m_runableref->iscanceldelaystart = false;
m_runableref->isended = false;
m_runableref->isjoined = outside_join_;
m_runableref->aftertime = after;
m_runableref->AddRef();
// int ret = thrd_create(&m_runableref->tid, (thrd_start_t)&start_routine_after, (void*)m_runableref);
int ret = ThreadUtil::createThread(m_runableref->m_th, &start_routine_after, (void*)m_runableref);
ASSERT(thrd_success == ret);
if (thrd_success != ret) {
m_runableref->isended = true;
m_runableref->aftertime = UINT_MAX;
m_runableref->RemoveRef(lock);
}
return ret;
}
void cancel_after() {
ScopedSpinLock lock(m_runableref->splock);
if (!isruning()) return;
m_runableref->iscanceldelaystart = true;
m_runableref->condtime.notifyAll(true);
}
int start_periodic(unsigned int after, unsigned int periodic) { // ms
ScopedSpinLock lock(m_runableref->splock);
if (isruning()) return 0;
m_runableref->condtime.cancelAnyWayNotify();
m_runableref->iscanceldelaystart = false;
m_runableref->isended = false;
m_runableref->isjoined = outside_join_;
m_runableref->aftertime = after;
m_runableref->periodictime = periodic;
m_runableref->AddRef();
// int ret = thrd_create(&m_runableref->tid, (thrd_start_t)&start_routine_periodic, (void*)m_runableref);
int ret = ThreadUtil::createThread(m_runableref->m_th, &start_routine_periodic, (void*)m_runableref);
ASSERT(thrd_success == ret);
if (thrd_success != ret) {
m_runableref->isended = true;
m_runableref->aftertime = UINT_MAX;
m_runableref->periodictime = UINT_MAX;
m_runableref->RemoveRef(lock);
}
return ret;
}
void cancel_periodic() {
ScopedSpinLock lock(m_runableref->splock);
if (!isruning()) return;
m_runableref->iscanceldelaystart = true;
m_runableref->condtime.notifyAll(true);
}
void join() const {
ScopedSpinLock lock(m_runableref->splock);
ASSERT(!m_runableref->isjoined);
if (isruning()) {
m_runableref->isjoined = true;
lock.unlock();
ThreadUtil::join(m_runableref->m_th);
// thrd_join(m_runableref->tid, 0);
}
}
int kill(int /*sig*/) const;
// {
// ASSERT(false);
// }
thread_tid tid() const {
return ThreadUtil::getThreadId(m_runableref->m_th);
// return m_runableref->tid._Id;
}
bool isruning() const {
return !m_runableref->isended;
}
private:
static void init(void* arg) {
volatile RunnableReference* runableref = static_cast<RunnableReference*>(arg);
ScopedSpinLock lock((const_cast<RunnableReference*>(runableref))->splock);
ASSERT(runableref != 0);
ASSERT(runableref->target != 0);
ASSERT(!runableref->isinthread);
runableref->isinthread = true;
if (!(0 < runableref->killsig && runableref->killsig <= 32))
return;
lock.unlock();
}
static void cleanup(void* arg) {
// cleanup tls alloctions for openssl.
OPENSSL_thread_stop();
volatile RunnableReference* runableref = static_cast<RunnableReference*>(arg);
ScopedSpinLock lock((const_cast<RunnableReference*>(runableref))->splock);
ASSERT(runableref != 0);
ASSERT(runableref->target != 0);
ASSERT(runableref->isinthread);
runableref->isinthread = false;
runableref->killsig = 0;
runableref->isended = true;
if (!runableref->isjoined) {
// thrd_detach(const_cast<RunnableReference*>(runableref)->tid);
ThreadUtil::detach(const_cast<RunnableReference*>(runableref)->m_th);
}
runableref->isjoined = false;
(const_cast<RunnableReference*>(runableref))->RemoveRef(lock);
}
static void* start_routine(void* arg) {
init(arg);
volatile RunnableReference* runableref = static_cast<RunnableReference*>(arg);
runableref->target->run();
cleanup(arg);
return 0;
}
static void* start_routine_after(void* arg) {
init(arg);
volatile RunnableReference* runableref = static_cast<RunnableReference*>(arg);
if (!runableref->iscanceldelaystart) {
(const_cast<RunnableReference*>(runableref))->condtime.wait(runableref->aftertime);
if (!runableref->iscanceldelaystart)
runableref->target->run();
}
cleanup(arg);
return 0;
}
static void* start_routine_periodic(void* arg) {
init(arg);
volatile RunnableReference* runableref = static_cast<RunnableReference*>(arg);
if (!runableref->iscanceldelaystart) {
(const_cast<RunnableReference*>(runableref))->condtime.wait(runableref->aftertime);
while (!runableref->iscanceldelaystart) {
runableref->target->run();
if (!runableref->iscanceldelaystart)
(const_cast<RunnableReference*>(runableref))->condtime.wait(runableref->periodictime);
}
}
cleanup(arg);
return 0;
}
private:
Thread(const Thread&);
Thread& operator=(const Thread&);
private:
RunnableReference* m_runableref;
bool outside_join_;
};
}
}
// inline bool operator==(const thread_t& lhs, const thread_t& rhs)
//{
// return lhs== rhs;
//}
#endif /* THREAD_H_ */
|
10088/mars
|
mars/stn/src/socket_operator.h
|
<filename>mars/stn/src/socket_operator.h
//
// Created by <NAME> on 2020/7/30.
//
#ifndef MMNET_SOCKET_OPERATER_H
#define MMNET_SOCKET_OPERATER_H
#include "comm/autobuffer.h"
#include "comm/socket/unix_socket.h"
#include "comm/socket/socket_address.h"
#include "comm/comm_data.h"
#include <memory>
#include <vector>
#include <string>
namespace mars {
namespace stn {
typedef int(*SocketCloseFunc)(SOCKET);
typedef SOCKET(*CreateStreamFunc)(SOCKET);
typedef bool (*IsSubStreamFunc)(SOCKET);
struct SocketProfile{
uint32_t rtt = 0;
int index = 0;
int errorCode = 0;
uint32_t totalCost = 0;
int is0rtt = 0;
};
class OPBreaker {
public:
virtual ~OPBreaker() {}
virtual bool IsBreak() = 0;
virtual bool Break() = 0;
};
class SocketOperator {
public:
SocketOperator(){}
virtual ~SocketOperator() {}
virtual SOCKET Connect(const std::vector<socket_address>& _vecaddr, mars::comm::ProxyType _proxy_type = mars::comm::kProxyNone, const socket_address* _proxy_addr = NULL,
const std::string& _proxy_username = "", const std::string& _proxy_pwd = "") = 0;
virtual int Send(SOCKET _sock, const void* _buffer, size_t _len, int &_errcode, int _timeout = -1) = 0;
virtual int Recv(SOCKET _sock, AutoBuffer& _buffer, size_t _max_size, int &_errcode, int _timeout, bool _wait_full_size=false) = 0;
virtual void Close(SOCKET _sock) = 0;
virtual SocketCloseFunc GetCloseFunction() const = 0;
virtual CreateStreamFunc GetCreateStreamFunc() const = 0;
virtual IsSubStreamFunc GetIsSubStreamFunc() const = 0;
virtual std::string ErrorDesc(int _errcode) = 0;
virtual const SocketProfile& Profile() { return profile_; }
virtual OPBreaker& Breaker() { return *breaker_.get(); }
virtual std::string Identify(SOCKET _sock) const = 0;
virtual int Protocol() const = 0; // return Task::kTransportProtocol xxx
virtual SOCKET CreateStream(SOCKET _sock) = 0;
virtual void SetIpConnectionTimeout(uint32_t _v4_timeout, uint32_t _v6_timeout) {}
protected:
SocketProfile profile_;
std::unique_ptr<OPBreaker> breaker_;
};
}
}
#endif //MMNET_SOCKET_OPERATER_H
|
10088/mars
|
mars/openssl/include/openssl/opensslconf-windows.h
|
/*
* Building OpenSSL for the different architectures of all iOS and tvOS devices requires different settings.
* In order to be able to use assembly code on all devices, the choice was made to keep optimal settings for all
* devices and use this intermediate header file to use the proper opensslconf.h file for each architecture.
* See also https://github.com/x2on/OpenSSL-for-iPhone/issues/126 and referenced pull requests
*/
// see https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros?view=msvc-160
#if defined(_WIN64)
#include <openssl/opensslconf_windows-x86_64.h>
#else
#include <openssl/opensslconf_windows-x86.h>
#endif
|
10088/mars
|
mars/app/app.h
|
// Tencent is pleased to support the open source community by making Mars available.
// Copyright (C) 2016 THL A29 Limited, a Tencent company. All rights reserved.
// Licensed under the MIT License (the "License"); you may not use this file except in
// compliance with the License. You may obtain a copy of the License at
// http://opensource.org/licenses/MIT
// Unless required by applicable law or agreed to in writing, software distributed under the License is
// distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions and
// limitations under the License.
/*
* appcomm.h
*
* Created on: 2016年3月3日
* Author: caoshaokun
*/
#ifndef APPCOMM_INTERFACE_APPCOMM_H_
#define APPCOMM_INTERFACE_APPCOMM_H_
#include <string>
#include <stdint.h>
#include <memory>
#include "mars/comm/comm_data.h"
namespace mars {
namespace app {
struct AccountInfo {
AccountInfo():uin(0), is_logoned(false){}
int64_t uin;
std::string username;
bool is_logoned;
};
struct DeviceInfo {
std::string devicename;
std::string devicetype;
};
extern mars::comm::ProxyInfo GetProxyInfo(const std::string& _host);
extern std::string GetAppFilePath();
extern AccountInfo GetAccountInfo();
extern std::string GetUserName();
extern std::string GetRecentUserName();
extern unsigned int GetClientVersion();
extern DeviceInfo GetDeviceInfo();
extern double GetOsVersion();
#ifdef NATIVE_CALLBACK
class AppLogicNativeCallback {
public:
AppLogicNativeCallback() = default;
virtual ~AppLogicNativeCallback() = default;
virtual mars::comm::ProxyInfo GetProxyInfo(const std::string& _host) {return mars::comm::ProxyInfo();}
virtual std::string GetAppFilePath() {return "";}
virtual AccountInfo GetAccountInfo() {return AccountInfo();}
virtual std::string GetUserName() {return "";}
virtual std::string GetRecentUserName() {return "";}
virtual unsigned int GetClientVersion() {return 0;}
virtual DeviceInfo GetDeviceInfo() {return DeviceInfo();}
};
extern void SetAppLogicNativeCallback(std::shared_ptr<AppLogicNativeCallback> _cb);
#endif //NATIVE_CALLBACK
}}
#endif /* APPCOMM_INTERFACE_APPCOMM_H_ */
|
10088/mars
|
mars/comm/unique_resource.h
|
#pragma once
#include <utility>
#include "mars/comm/xlogger/xlogger.h"
#include "mars/comm/socket/unix_socket.h"
#ifndef NDEBUG
#include "mars/comm/fd_info.h"
#endif
namespace internal{
struct SocketTraits{
static SOCKET InvalidValue(){
return INVALID_SOCKET;
}
static void Free(SOCKET sock){
#ifndef NDEBUG
xassert2(mars::comm::FDInfo::QueryFD(sock).IsSocket());
#endif
socket_close(sock);
}
};
struct FDTraits{
static int InvalidValue(){
return -1;
}
static void Free(int fd){
#ifndef NDEBUG
xassert2(mars::comm::FDInfo::QueryFD(fd).IsFile());
#endif
close(fd);
}
};
struct FileTraits{
static FILE* InvalidValue(){
return nullptr;
}
static void Free(FILE* fp){
#ifndef NDEBUG
xassert2(fp != nullptr);
#endif
fclose(fp);
}
};
};
template<typename T, typename Traits>
class UniqueResource{
public:
struct Data : public Traits{
explicit Data(const T& t):v(t){
xdebug2_if(v != Traits::InvalidValue(), TSF"%_ resource %_ acquired.", this, v);
}
T v;
}data_;
public:
typedef T element_type;
typedef Traits traits_type;
UniqueResource():data_(traits_type::InvalidValue()){}
explicit UniqueResource(const element_type& v):data_(v){}
UniqueResource(UniqueResource&& rhs):data_(rhs.release()){}
UniqueResource& operator=(UniqueResource&& rhs){
reset(rhs.release());
return *this;
}
~UniqueResource(){
_Free();
}
const element_type& get() const{
return data_.v;
}
element_type invalid_value() const{
return traits_type::InvalidValue();
}
void reset(const element_type& v = traits_type::InvalidValue()){
if (data_.v != traits_type::InvalidValue() && data_.v == v){
xassert2(false, "can't reset self!!!!");
return;
}
_Free();
data_.v = v;
}
element_type release(){
element_type old = data_.v;
data_.v = traits_type::InvalidValue();
return old;
}
bool is_valid() const{
return data_.v != traits_type::InvalidValue();
}
void swap(UniqueResource& rhs){
std::swap(static_cast<Traits&>(data_), static_cast<Traits&>(rhs.data_));
std::swap(data_.v, rhs.data_.v);
}
bool operator==(const UniqueResource& rhs) const{
return data_.v == rhs.data_.v;
}
bool operator!=(const UniqueResource& rhs) const{
return data_.v != rhs.data_.v;
}
private:
void _Free(){
if (data_.v != traits_type::InvalidValue()){
data_.Free(data_.v);
xdebug2(TSF"%_ resource %_ released.", this, data_.v);
data_.v = traits_type::InvalidValue();
}
}
private:
UniqueResource(const UniqueResource& rhs) = delete;
UniqueResource& operator=(const UniqueResource& rhs) = delete;
};
using UniqueSocketResource = UniqueResource<SOCKET, internal::SocketTraits>;
using UniqueFDResource = UniqueResource<int, internal::FDTraits>;
using UniqueFileResource = UniqueResource<FILE*, internal::FileTraits>;
|
10088/mars
|
mars/stn/jni/stn_logic_C2Java.h
|
<reponame>10088/mars
// Tencent is pleased to support the open source community by making Mars available.
// Copyright (C) 2016 THL A29 Limited, a Tencent company. All rights reserved.
// Licensed under the MIT License (the "License"); you may not use this file except in
// compliance with the License. You may obtain a copy of the License at
// http://opensource.org/licenses/MIT
// Unless required by applicable law or agreed to in writing, software distributed under the License is
// distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions and
// limitations under the License.
#ifndef STN_JNI_STN_LOGIC_C2JAVA_H_
#define STN_JNI_STN_LOGIC_C2JAVA_H_
#include <vector>
#include <memory>
#include "mars/comm/autobuffer.h"
#include "mars/stn/task_profile.h"
namespace mars {
namespace stn {
int C2Java_OnTaskEnd(uint32_t _taskid,
void* const _user_context,
const std::string& _user_id,
int _error_type,
int _error_code,
const ConnectProfile& _profile);
void C2Java_OnPush(const std::string& _channel_id,
uint32_t _cmdid,
uint32_t _taskid,
const AutoBuffer& _body,
const AutoBuffer& _extend);
std::vector<std::string> C2Java_OnNewDns(const std::string& _host);
bool C2Java_Req2Buf(uint32_t _taskid,
void* const _user_context,
const std::string& _user_id,
AutoBuffer& _outbuffer,
AutoBuffer& _extend,
int& _error_code,
const int _channel_select,
const std::string& _host);
int C2Java_Buf2Resp(uint32_t _taskid,
void* const _user_context,
const std::string& _user_id,
const AutoBuffer& _inbuffer,
const AutoBuffer& _extend,
int& _error_code,
const int _channel_select);
bool C2Java_MakesureAuthed(const std::string& _host, const std::string& _user_id);
int C2Java_GetLonglinkIdentifyCheckBuffer(const std::string& _channel_id, AutoBuffer& _identify_buffer, AutoBuffer& _buffer_hash, int32_t& _cmdid);
bool C2Java_OnLonglinkIdentifyResponse(const std::string& _channel_id, const AutoBuffer& _response_buffer, const AutoBuffer& _identify_buffer_hash);
void C2Java_TrafficData(ssize_t _send, ssize_t _recv);
void C2Java_ReportConnectStatus(int _all_connstatus, int _longlink_connstatus);
void C2Java_RequestSync();
void C2Java_RequestNetCheckShortLinkHosts(std::vector<std::string>& _hostlist);
void C2Java_ReportTaskProfile(const TaskProfile& _task_profile);
#ifdef NATIVE_CALLBACK
class StnNativeCallback {
public:
StnNativeCallback() = default;
virtual ~StnNativeCallback() = default;
virtual int OnTaskEnd(uint32_t _taskid,
void* const _user_context,
const std::string& _user_id,
int _error_type,
int _error_code) {return -1;}
virtual void OnPush(const std::string& _channel_id,
uint32_t _cmdid,
uint32_t _taskid,
const AutoBuffer& _body,
const AutoBuffer& _extend) {}
virtual std::vector<std::string> OnNewDns(const std::string& _host) {return std::vector<std::string>();}
virtual bool Req2Buf(uint32_t _taskid,
void* const _user_context,
const std::string& _user_id,
AutoBuffer& _outbuffer,
AutoBuffer& _extend,
int& _error_code,
const int _channel_select,
const std::string& _host) {return false;}
virtual int Buf2Resp(uint32_t _taskid,
void* const _user_context,
const std::string& _user_id,
const AutoBuffer& _inbuffer,
const AutoBuffer& _extend,
int& _error_code,
const int _channel_select) {return -1;}
virtual bool MakesureAuthed(const std::string& _host, const std::string& _user_id) {return false;}
virtual int GetLonglinkIdentifyCheckBuffer(const std::string& _channel_id, AutoBuffer& _identify_buffer, AutoBuffer& _buffer_hash, int32_t& _cmdid) {return -1;}
virtual bool OnLonglinkIdentifyResponse(const std::string& _channel_id, const AutoBuffer& _response_buffer, const AutoBuffer& _identify_buffer_hash) {return false;}
virtual void TrafficData(ssize_t _send, ssize_t _recv) {}
virtual void ReportConnectStatus(int _all_connstatus, int _longlink_connstatus) {}
virtual void RequestSync() {}
virtual void RequestNetCheckShortLinkHosts(std::vector<std::string>& _hostlist) {}
virtual void ReportTaskProfile(const TaskProfile& _task_profile) {}
};
extern void SetStnNativeCallback(std::shared_ptr<StnNativeCallback> _cb) ;
#endif
}
}
#endif //STN_JNI_STN_LOGIC_C2JAVA_H_
|
10088/mars
|
mars/comm/verinfo.h
|
#ifndef Mars_verinfo_h
#define Mars_verinfo_h
#define MARS_REVISION "1cfaf5c2"
#define MARS_PATH "master"
#define MARS_URL ""
#define MARS_BUILD_TIME "2022-05-27 15:31:05"
#define MARS_TAG ""
#endif
|
humeniuc/vim9-nox11
|
native/app/vim9-nox11.c
|
#include "json_msg_handler.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <uv.h>
static void alloc_buffer(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
(void)handle;
(void)suggested_size;
static char buf_base[MAX_VIM_INPUT];
memset(buf_base, 0, MAX_VIM_INPUT);
*buf = uv_buf_init(buf_base, MAX_VIM_INPUT);
}
static void read_pipe_in(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf) {
(void)nread;
static char line[MAX_VIM_INPUT] = {0};
memset(line, 0, MAX_VIM_INPUT);
int counter = 0;
for (size_t i = 0; i < strlen(buf->base); i++) {
if (buf->base[i] == '\n') {
handle_msg(line);
memset(line, 0, MAX_VIM_INPUT);
counter = 0;
} else {
line[counter] = buf->base[i];
counter++;
}
}
uv_close((uv_handle_t *)stream, NULL);
}
static void on_connection(uv_stream_t *server, int status) {
(void)status;
static uv_pipe_t stream = {0};
int ret = uv_pipe_init(server->loop, &stream, 0);
stream.data = server;
if (ret == 0) {
ret = uv_accept(server, (uv_stream_t *)&stream);
}
if (ret == 0) {
ret = uv_read_start((uv_stream_t *)&stream, alloc_buffer, read_pipe_in);
}
}
static void on_signal(uv_signal_t *handle, int signum) {
if (signum == SIGINT) {
uv_stop(handle->loop);
}
}
int main(int argc, char *argv[]) {
static uv_loop_t loop;
static uv_signal_t sig_handle;
static uv_pipe_t pipe_handle;
int ret;
static char sock_path[PATH_MAX] = {0};
if (argc == 2) {
strcpy(sock_path, argv[1]);
} else {
fprintf(stderr,"%s %i %i\n", __func__, __LINE__, argc);
return -1;
}
uv_loop_init(&loop);
ret = uv_pipe_init(&loop, &pipe_handle, 0);
int b_ret = uv_pipe_bind(&pipe_handle, sock_path);
ret = uv_listen((uv_stream_t *)&pipe_handle, 0, on_connection);
if (ret) {
fprintf(stderr, "%s %i %i\n", __func__, __LINE__, ret);
if (b_ret == 0) {
unlink(sock_path);
}
return -2;
}
uv_signal_init(&loop, &sig_handle);
uv_signal_start(&sig_handle, on_signal, SIGINT);
uv_run(&loop, UV_RUN_DEFAULT);
unlink(sock_path);
return 0;
}
|
humeniuc/vim9-nox11
|
native/app/json_msg_handler.c
|
<reponame>humeniuc/vim9-nox11<filename>native/app/json_msg_handler.c
#include "json_msg_handler.h"
#include <string.h>
void handle_msg(const char *msg_str) {
printf("{\"cmd\": \"remote_tab\", \"file_path\": \"%s\"}", msg_str);
fflush(stdout);
}
|
humeniuc/vim9-nox11
|
native/app/json_msg_handler.h
|
<gh_stars>0
#ifndef JSON_MSG_HANDLER_H
#define JSON_MSG_HANDLER_H
#include <uv.h>
#define MAX_VIM_INPUT (PATH_MAX + 10)
#define MAX_REAL_RESPONSE_SIZE (PATH_MAX + 100)
#ifdef __cplusplus
extern "C" {
#endif
void handle_msg(const char *msg_str);
#ifdef __cplusplus
}
#endif
#endif /* JSON_MSG_HANDLER_H */
|
VP-Projects/adc-reader
|
include/AdcReader.h
|
/**
* @file AdcReader.h
* @author <NAME> (<EMAIL>)
* @brief Declaration of the AdcReader class.
* @version 0.1.0
* @date 17-12-2020
*
* Copyright (c) 2020 <NAME>
*
*/
#ifndef AdcReader_h
#define AdcReader_h
#include <Arduino.h>
/**
* @brief Class to represent a ADC reader on Arduino. Includes functionality for
* convertion to desired range, check for invalid values and optional voltage
* calibration (values must be provided by the user).
*/
class AdcReader
{
/////////////////////////////// PUBLIC ////////////////////////////////////////
public:
enum AdcRange
{
ADC_8_BITS = 256,
ADC_10_BITS = 1024,
ADC_12_BITS = 4096
};
enum
{
CAL_N_PARAMS = 4
};
//============================= LIFECYCLE ====================================
AdcReader();
//============================= OPERATIONS ====================================
bool ReadValue();
bool SetAdcRange(AdcRange adcRange);
bool SetAnalogPin(uint8_t analogPin);
bool SetCalParams(float (&calParams) [CAL_N_PARAMS]);
bool SetInOutRange(float inMin, float inMax, float outMin, float outMax);
bool SetVcc(float vcc);
bool UseAdcCal(bool useAdcCal);
// ============================= ACCESS ====================================
float GetValue() const;
float GetVoltage() const;
//============================= INQUIRY ====================================
/////////////////////////////// PRIVATE ///////////////////////////////////////
private:
uint8_t _analogPin;
AdcRange _adcRange;
float _inMin;
float _inMax;
float _outMin;
float _outMax;
bool _useAdcCal;
float _val;
float _vcc;
float _voltage;
float _calParams[CAL_N_PARAMS];
bool Init();
bool ReadVoltage();
};
#endif // AdcReader_h
|
Kamigami55/time-gloves
|
timegloves-openstm32-project/inc/gpio.h
|
/**
* @author <NAME>
* @email <EMAIL>
* @website http://stm32f4-discovery.com
* @link http://stm32f4-discovery.com/2015/07/hal-library-1-5-gpio-library-for-stm32fxxx/
* @version v1.0
* @ide Keil uVision
* @license MIT
* @brief GPIO Library for STM32F4xx and STM32F7xx devices
*
\verbatim
----------------------------------------------------------------------
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------
\endverbatim
*/
#ifndef GPIO_H_
#define GPIO_H_
#include "stm32l476xx.h"
#define TM_GPIO_SetPinLow(GPIOx, GPIO_Pin) ((GPIOx)->BRR = (1<<(uint32_t)(GPIO_Pin)))
#define TM_GPIO_SetPinHigh(GPIOx, GPIO_Pin) ((GPIOx)->BSRR = (1<<(uint32_t)(GPIO_Pin)))
#define TM_GPIO_GetInputPinValue(GPIOx, GPIO_Pin) (((GPIOx)->IDR & (1<<(uint32_t)(GPIO_Pin))) == 0 ? 0 : 1)
/*********************GPIO Define*********************/
//typedef struct
//{
// __IO uint32_t MODER; /*!< GPIO port mode register, Address offset: 0x00 */
// __IO uint32_t OTYPER; /*!< GPIO port output type register, Address offset: 0x04 */
// __IO uint32_t OSPEEDR; /*!< GPIO port output speed register, Address offset: 0x08 */
// __IO uint32_t PUPDR; /*!< GPIO port pull-up/pull-down register, Address offset: 0x0C */
// __IO uint32_t IDR; /*!< GPIO port input data register, Address offset: 0x10 */
// __IO uint32_t ODR; /*!< GPIO port output data register, Address offset: 0x14 */
// __IO uint32_t BSRR; /*!< GPIO port bit set/reset register, Address offset: 0x18 */
// __IO uint32_t LCKR; /*!< GPIO port configuration lock register, Address offset: 0x1C */
// __IO uint32_t AFR[2]; /*!< GPIO alternate function registers, Address offset: 0x20-0x24 */
// __IO uint32_t BRR; /*!< GPIO Bit Reset register, Address offset: 0x28 */
// __IO uint32_t ASCR; /*!< GPIO analog switch control register, Address offset: 0x2C */
//} GPIO_TypeDef;
/*********************GPIO Enum*********************/
typedef enum {
TM_GPIO_Mode_IN = 0x00, /*!< GPIO Pin as General Purpose Input */
TM_GPIO_Mode_OUT = 0x01, /*!< GPIO Pin as General Purpose Output */
TM_GPIO_Mode_AF = 0x02, /*!< GPIO Pin as Alternate Function */
TM_GPIO_Mode_AN = 0x03, /*!< GPIO Pin as Analog input/output */
} TM_GPIO_Mode_t;
typedef enum {
TM_GPIO_OType_PP = 0x00, /*!< GPIO Output Type Push-Pull */
TM_GPIO_OType_OD = 0x01 /*!< GPIO Output Type Open-Drain */
} TM_GPIO_OType_t;
typedef enum {
TM_GPIO_Speed_Low = 0x00, /*!< GPIO Speed Low */
TM_GPIO_Speed_Medium = 0x01, /*!< GPIO Speed Medium */
TM_GPIO_Speed_Fast = 0x02, /*!< GPIO Speed Fast, not available on STM32F0xx devices */
TM_GPIO_Speed_High = 0x03 /*!< GPIO Speed High */
} TM_GPIO_Speed_t;
typedef enum {
TM_GPIO_PuPd_NOPULL = 0x00, /*!< No pull resistor */
TM_GPIO_PuPd_UP = 0x01, /*!< Pull up resistor enabled */
TM_GPIO_PuPd_DOWN = 0x02 /*!< Pull down resistor enabled */
} TM_GPIO_PuPd_t;
void TM_GPIO_SetPullResistor(GPIO_TypeDef* GPIOx, uint32_t GPIO_Pin, TM_GPIO_PuPd_t GPIO_PuPd);
void TM_GPIO_SetPinAsInput(GPIO_TypeDef* GPIOx, uint32_t GPIO_Pin);
void TM_GPIO_SetPinAsOutput(GPIO_TypeDef* GPIOx, uint32_t GPIO_Pin);
void TM_GPIO_Init(GPIO_TypeDef* GPIOx, uint32_t GPIO_Pin, TM_GPIO_Mode_t GPIO_Mode, TM_GPIO_OType_t GPIO_OType, TM_GPIO_PuPd_t GPIO_PuPd, TM_GPIO_Speed_t GPIO_Speed);
#endif /* GPIO_H_ */
|
Kamigami55/time-gloves
|
timegloves-openstm32-project/src/main.c
|
/**
******************************************************************************
* @file main.c
* @author Ac6
* @version V1.0
* @date 01-December-2013
* @brief Default main function.
******************************************************************************
*/
#include "stm32l4xx.h"
#include "stm32l4xx_nucleo.h"
#include "gpio.h"
#include "common.h"
#include "lcd.h"
#include "rotary.h"
#define LIGHT_PORT GPIOC
#define LIGHT_PIN 1
#define BUTTON_PORT GPIOA
#define BUTTON_PIN 1
int newdark = 20000;
uint32_t oldAdcVal = 0;
uint32_t adcVal;
void SystemClock_Config(int us){
// Enable SysTick Interrupt
//SET_REG(SysTick->CTRL, 1, 0); // Disable SysTick
SET_REG(SysTick->LOAD, 0x00FFFFFF, us/2) // Set reload value (4M freq / 8tic per count * us / 1M)
//SET_REG(SysTick->VAL, 0x00FFFFFF, 0) // Clear counter value
SET_REG(SysTick->CTRL, 11, 11); // Enable SysTick
}
// note: I've comment out the original version of SysTick_Handler() in /inc/stm32l4xx_it.c
void SysTick_Handler(void) {
TM_GPIO_SetPinHigh(LIGHT_PORT, LIGHT_PIN);
delay_us(LIGHT);
TM_GPIO_SetPinLow(LIGHT_PORT, LIGHT_PIN);
//delay_us(newdark);
}
void LED_GPIO_Init(){
// open GPIO clock
RCC->AHB2ENR |= (RCC_AHB2ENR_GPIOAEN | RCC_AHB2ENR_GPIOBEN | RCC_AHB2ENR_GPIOCEN);
// set GPIO
TM_GPIO_Init(LIGHT_PORT, LIGHT_PIN, TM_GPIO_Mode_OUT, TM_GPIO_OType_PP, TM_GPIO_PuPd_NOPULL, TM_GPIO_Speed_Fast);
// // button
// TM_GPIO_Init(BUTTON_PORT, BUTTON_PIN, TM_GPIO_Mode_IN, TM_GPIO_OType_OD, TM_GPIO_PuPd_DOWN, TM_GPIO_Speed_Fast);
}
void ConfigureADC() {
RCC->AHB2ENR |= (RCC_AHB2ENR_GPIOAEN | RCC_AHB2ENR_GPIOBEN | RCC_AHB2ENR_GPIOCEN);
// ADC Pin
TM_GPIO_Init(GPIOC, 0, TM_GPIO_Mode_AN, TM_GPIO_OType_PP, TM_GPIO_PuPd_NOPULL, TM_GPIO_Speed_Medium);
MODIFY_REG(GPIOC->ASCR,GPIO_ASCR_ASC0,GPIO_ASCR_ASC0);//connect analog switch to adc input pa0 ADC1IN1
__HAL_RCC_ADC_CLK_ENABLE();
// Clear Deep Sleep
CLEAR_BIT(ADC1->CR, ADC_CR_DEEPPWD);
// Turn on Voltage Regulator
SET_BIT(ADC1->CR, ADC_CR_ADVREGEN);
delay_us(200);
// Prescaler
MODIFY_REG(ADC123_COMMON->CCR, ADC_CCR_PRESC|ADC_CCR_CKMODE, ADC_CCR_CKMODE_0);
MODIFY_REG(ADC1->CFGR, ADC_CFGR_CONT, ADC_CFGR_CONT);
CLEAR_BIT(ADC1->CFGR2, ADC_CFGR2_ROVSE);
CLEAR_BIT(ADC1->SQR1, ADC_SQR1_L);
MODIFY_REG(ADC1->SQR1, (0xF)|(0x1F<<24)|(0x1F<<18)|(0x1F<<12)|(0x1F<<6), (0x1<<6)); // Channel 1, Rank 1
MODIFY_REG(ADC1->SMPR1, (0x3FFFFFFFFF), (0x6<<3)); // Channel 1, Sampling Time: 247.5 ADC cycles
}
void startADC() {
while (!(ADC1->ISR & ADC_ISR_ADRDY)) ADC1->CR |= ADC_CR_ADEN; // TURN ON
ADC1->ISR = ADC_ISR_EOC | ADC_ISR_EOS | ADC_ISR_OVR; // Clear flags
SET_BIT(ADC1->CR, ADC_CR_ADSTART); // START CONV
}
void read_rotary() {
startADC();
while (!(ADC1->ISR & ADC_ISR_EOC));
adcVal = ADC1->DR;
if(adcVal > oldAdcVal + 10 || adcVal < oldAdcVal - 10){
newdark = adcVal * 100000 / 4096 + 5000; // range: 10000 ~ 30000
SystemClock_Config(newdark);
DURATION = newdark + LIGHT; // us
FREQUENCY = 1000000/DURATION; // Hz
char buff1[16];
char buff2[16];
sprintf(buff1, "FREQ:%dHz", FREQUENCY);
sprintf(buff2, "DURA:%dus", DURATION);
LCD_Display_Two_Line(buff1, buff2);
oldAdcVal = adcVal;
}
}
int main(void)
{
LCD_Init();
LED_GPIO_Init();
ConfigureADC();
LCD_Display_Two_Line(" += Time =+", " += Gloves =+");
delay_us(1000000);
//ROTARY_Init();
while(1){
read_rotary();
}
}
|
Kamigami55/time-gloves
|
timegloves-openstm32-project/inc/rotary.h
|
#ifndef ROTARY_H_
#define ROTARY_H_
#include "gpio.h"
#include "stm32l476xx.h"
#include "common.h"
#include "lcd.h"
int DARK; // dark duration
#define LIGHT 200 // light duration, should be a constant
int DURATION;
int FREQUENCY;
void ROTARY_Init(void);
void pinChangeISR();
void ROTARY_EXIT_Setup();
#endif
|
Kamigami55/time-gloves
|
timegloves-openstm32-project/inc/gpio.c
|
/**
* |----------------------------------------------------------------------
* | Copyright (c) 2016 <NAME>
* |
* | Permission is hereby granted, free of charge, to any person
* | obtaining a copy of this software and associated documentation
* | files (the "Software"), to deal in the Software without restriction,
* | including without limitation the rights to use, copy, modify, merge,
* | publish, distribute, sublicense, and/or sell copies of the Software,
* | and to permit persons to whom the Software is furnished to do so,
* | subject to the following conditions:
* |
* | The above copyright notice and this permission notice shall be
* | included in all copies or substantial portions of the Software.
* |
* | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
* | AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* | OTHER DEALINGS IN THE SOFTWARE.
* |----------------------------------------------------------------------
*/
#include "gpio.h"
void TM_GPIO_INT_EnableClock(GPIO_TypeDef* GPIOx);
void TM_GPIO_INT_Init(GPIO_TypeDef* GPIOx, uint32_t GPIO_Pin, TM_GPIO_Mode_t GPIO_Mode, TM_GPIO_OType_t GPIO_OType, TM_GPIO_PuPd_t GPIO_PuPd, TM_GPIO_Speed_t GPIO_Speed);
void TM_GPIO_Init(GPIO_TypeDef* GPIOx, uint32_t GPIO_Pin, TM_GPIO_Mode_t GPIO_Mode, TM_GPIO_OType_t GPIO_OType, TM_GPIO_PuPd_t GPIO_PuPd, TM_GPIO_Speed_t GPIO_Speed) {
/* Check input */
if (GPIO_Pin == 0x00) {
return;
}
/* Enable clock for GPIO */
TM_GPIO_INT_EnableClock(GPIOx);
/* Do initialization */
TM_GPIO_INT_Init(GPIOx, GPIO_Pin, GPIO_Mode, GPIO_OType, GPIO_PuPd, GPIO_Speed);
}
void TM_GPIO_SetPullResistor(GPIO_TypeDef* GPIOx, uint32_t GPIO_Pin, TM_GPIO_PuPd_t GPIO_PuPd) {
/* Set GPIO PUPD register */
GPIOx->PUPDR = (GPIOx->PUPDR & ~(0x03 << (2 * GPIO_Pin))) | ((uint32_t)(GPIO_PuPd << (2 * GPIO_Pin)));
}
void TM_GPIO_SetPinAsInput(GPIO_TypeDef* GPIOx, uint32_t GPIO_Pin) {
/* Set 00 bits combination for input */
GPIOx->MODER &= ~(0x03 << (2 * GPIO_Pin));
}
void TM_GPIO_SetPinAsOutput(GPIO_TypeDef* GPIOx, uint32_t GPIO_Pin) {
/* Set 01 bits combination for output */
GPIOx->MODER = (GPIOx->MODER & ~(0x03 << (2 * GPIO_Pin))) | (0x01 << (2 * GPIO_Pin));
}
uint32_t TM_GPIO_GetPortSource(GPIO_TypeDef* GPIOx) {
/* Get port source number */
/* Offset from GPIOA Difference between 2 GPIO addresses */
return ((uint32_t)GPIOx - (GPIOA_BASE)) / ((GPIOB_BASE) - (GPIOA_BASE));
}
void TM_GPIO_INT_EnableClock(GPIO_TypeDef* GPIOx) {
RCC->AHB2ENR |= (1 << TM_GPIO_GetPortSource(GPIOx));
}
void TM_GPIO_INT_Init(GPIO_TypeDef* GPIOx, uint32_t GPIO_Pin, TM_GPIO_Mode_t GPIO_Mode, TM_GPIO_OType_t GPIO_OType, TM_GPIO_PuPd_t GPIO_PuPd, TM_GPIO_Speed_t GPIO_Speed) {
uint8_t pinpos;
/* Set GPIO PUPD register */
GPIOx->PUPDR = (GPIOx->PUPDR & ~(0x03 << (2 * GPIO_Pin))) | ((uint32_t)(GPIO_PuPd << (2 * GPIO_Pin)));
/* Set GPIO MODE register */
GPIOx->MODER = (GPIOx->MODER & ~((uint32_t)(0x03 << (2 * GPIO_Pin)))) | ((uint32_t)(GPIO_Mode << (2 * GPIO_Pin)));
/* Set only if output or alternate functions */
if (GPIO_Mode == TM_GPIO_Mode_OUT || GPIO_Mode == TM_GPIO_Mode_AF) {
/* Set GPIO OTYPE register */
GPIOx->OTYPER = (GPIOx->OTYPER & ~(uint32_t)(0x01 << GPIO_Pin)) | ((uint32_t)(GPIO_OType << GPIO_Pin));
/* Set GPIO OSPEED register */
GPIOx->OSPEEDR = (GPIOx->OSPEEDR & ~((uint32_t)(0x03 << (2 * GPIO_Pin)))) | ((uint32_t)(GPIO_Speed << (2 * GPIO_Pin)));
}
}
|
Kamigami55/time-gloves
|
timegloves-openstm32-project/inc/common.c
|
<reponame>Kamigami55/time-gloves
#include "common.h"
void delay_us(int n)
{
asm("push {r0}\r\n"
"mov r0, r0\r\n"
"LOOP_US:\r\n"
"nop\r\n"
"subs r0, #1\r\n"
"BGT LOOP_US\r\n"
"POP {r0}\r\n"
:: "r" (n));
}
|
Kamigami55/time-gloves
|
timegloves-openstm32-project/inc/rotary.c
|
#include "rotary.h"
//#define ROTARY_CLK_PORT GPIOA
//#define ROTARY_CLK_PIN 1
////#define ROTARY_CLK_PORT GPIOB
////#define ROTARY_CLK_PIN 0
//#define ROTARY_DT_PORT GPIOA
//#define ROTARY_DT_PIN 4
#define ROTARY_CLK_PORT GPIOC
#define ROTARY_CLK_PIN 2
//#define ROTARY_CLK_PORT GPIOB
//#define ROTARY_CLK_PIN 0
#define ROTARY_DT_PORT GPIOC
#define ROTARY_DT_PIN 3
#define ROTARY_UPPER 50000
#define ROTARY_LOWER 0
#define ROTARY_STEP 10
#define ROTARY_DEFAULT 20000
int abOld; // Initialize state
int old_count; // old rotary count
void ROTARY_Init(void){
// open GPIO clock
RCC->AHB2ENR |= (RCC_AHB2ENR_GPIOAEN | RCC_AHB2ENR_GPIOBEN | RCC_AHB2ENR_GPIOCEN);
// set GPIO
TM_GPIO_Init(ROTARY_CLK_PORT, ROTARY_CLK_PIN, TM_GPIO_Mode_IN, TM_GPIO_OType_PP, TM_GPIO_PuPd_NOPULL, TM_GPIO_Speed_Fast);
TM_GPIO_Init(ROTARY_DT_PORT, ROTARY_DT_PIN, TM_GPIO_Mode_IN, TM_GPIO_OType_PP, TM_GPIO_PuPd_NOPULL, TM_GPIO_Speed_Fast);
// set INTERRUPT
ROTARY_EXIT_Setup();
abOld = DARK = old_count = ROTARY_DEFAULT;
}
void pinChangeISR() {
// enum { upMask = 0x66, downMask = 0x99 };
// int abNew = (TM_GPIO_GetInputPinValue(ROTARY_CLK_PORT, ROTARY_CLK_PIN) << 1) | TM_GPIO_GetInputPinValue(ROTARY_DT_PORT, ROTARY_DT_PIN);
// int criterion = abNew^abOld;
// if (criterion==1 || criterion==2) {
// if (upMask & (1 << (2*abOld + abNew/2))){
// if(DARK > ROTARY_LOWER + ROTARY_STEP)
// DARK -= ROTARY_STEP;
// }
// else {
// if(DARK < ROTARY_UPPER - ROTARY_STEP)
// DARK += ROTARY_STEP; // upMask = ~downMask
// }
// }
// abOld = abNew; // Save new state
// DURATION = DARK + LIGHT; // us
// FREQUENCY = 1000000/DURATION; // Hz
//
// char buff1[16];
// char buff2[16];
// sprintf(buff1, "FREQ: %d", FREQUENCY);
// sprintf(buff2, "DURA: %d", DURATION);
// LCD_Display_Two_Line(buff1, buff2);
}
void ROTARY_EXIT_Setup(){
//ENABLE PERIPHERAL CLOCK
SET_REG(RCC->APB2ENR,RCC_APB2ENR_SYSCFGEN,1);
//SYSCFG MODULE
SET_REG(SYSCFG->EXTICR[0],SYSCFG_EXTICR1_EXTI2,SYSCFG_EXTICR1_EXTI2_PC);//PC0
SET_REG(SYSCFG->EXTICR[0],SYSCFG_EXTICR1_EXTI3,SYSCFG_EXTICR1_EXTI3_PC);//PC1
//EXTI module
SET_REG(EXTI->RTSR1,EXTI_RTSR1_RT2,EXTI_RTSR1_RT2);
SET_REG(EXTI->FTSR1,EXTI_FTSR1_FT2,EXTI_FTSR1_FT2);
SET_REG(EXTI->IMR1,EXTI_IMR1_IM2,EXTI_IMR1_IM2);
SET_REG(EXTI->PR1,EXTI_PR1_PIF2,EXTI_PR1_PIF2);
SET_REG(EXTI->RTSR1,EXTI_RTSR1_RT3,EXTI_RTSR1_RT3);
SET_REG(EXTI->FTSR1,EXTI_FTSR1_FT3,EXTI_FTSR1_FT3);
SET_REG(EXTI->IMR1,EXTI_IMR1_IM3,EXTI_IMR1_IM3);
SET_REG(EXTI->PR1,EXTI_PR1_PIF3,EXTI_PR1_PIF3);
//NVIC
NVIC_EnableIRQ(EXTI2_IRQn);
NVIC_EnableIRQ(EXTI3_IRQn);
//clear NVIC pending
NVIC_ClearPendingIRQ(EXTI2_IRQn);
NVIC_ClearPendingIRQ(EXTI3_IRQn);
//set priority
NVIC_SetPriority(EXTI2_IRQn, 3);
NVIC_SetPriority(EXTI3_IRQn, 3);
}
void EXTI2_IRQHandler(void){
pinChangeISR();
NVIC_ClearPendingIRQ(EXTI2_IRQn);
//clear pending interrupt flag of line 2
SET_REG(EXTI->PR1,EXTI_PR1_PIF2,EXTI_PR1_PIF2);
}
void EXTI3_IRQHandler(void){
pinChangeISR();
NVIC_ClearPendingIRQ(EXTI3_IRQn);
//clear pending interrupt flag of line 3
SET_REG(EXTI->PR1,EXTI_PR1_PIF3,EXTI_PR1_PIF3);
}
//
//void ROTARY_EXIT_Setup(){
// //ENABLE PERIPHERAL CLOCK
// SET_REG(RCC->APB2ENR,RCC_APB2ENR_SYSCFGEN,1);
//
// //SYSCFG MODULE
// SET_REG(SYSCFG->EXTICR[0],SYSCFG_EXTICR1_EXTI1,SYSCFG_EXTICR1_EXTI1_PA);//PB0
// SET_REG(SYSCFG->EXTICR[1],SYSCFG_EXTICR2_EXTI4,SYSCFG_EXTICR2_EXTI4_PA);//PA4
//
//
// //EXTI module
// SET_REG(EXTI->RTSR1,EXTI_RTSR1_RT1,EXTI_RTSR1_RT1);
// SET_REG(EXTI->FTSR1,EXTI_FTSR1_FT1,EXTI_FTSR1_FT1);
// SET_REG(EXTI->IMR1,EXTI_IMR1_IM1,EXTI_IMR1_IM1);
// SET_REG(EXTI->PR1,EXTI_PR1_PIF1,EXTI_PR1_PIF1);
//
// SET_REG(EXTI->RTSR1,EXTI_RTSR1_RT4,EXTI_RTSR1_RT4);
// SET_REG(EXTI->FTSR1,EXTI_FTSR1_FT4,EXTI_FTSR1_FT4);
// SET_REG(EXTI->IMR1,EXTI_IMR1_IM4,EXTI_IMR1_IM4);
// SET_REG(EXTI->PR1,EXTI_PR1_PIF4,EXTI_PR1_PIF4);
//
// //NVIC
// NVIC_EnableIRQ(EXTI1_IRQn);
// NVIC_EnableIRQ(EXTI4_IRQn);
//
// //clear NVIC pending
// NVIC_ClearPendingIRQ(EXTI1_IRQn);
// NVIC_ClearPendingIRQ(EXTI4_IRQn);
//
// //set priority
// NVIC_SetPriority(EXTI1_IRQn, 3);
// NVIC_SetPriority(EXTI4_IRQn, 3);
//}
//
//
//void EXTI1_IRQHandler(void){
// pinChangeISR();
// NVIC_ClearPendingIRQ(EXTI1_IRQn);
// //clear pending interrupt flag of line 2
// SET_REG(EXTI->PR1,EXTI_PR1_PIF1,EXTI_PR1_PIF1);
//}
//
//
//void EXTI4_IRQHandler(void){
// pinChangeISR();
// NVIC_ClearPendingIRQ(EXTI4_IRQn);
// //clear pending interrupt flag of line 3
// SET_REG(EXTI->PR1,EXTI_PR1_PIF4,EXTI_PR1_PIF4);
//}
//
//
//void ROTARY_EXIT_Setup(){
// //ENABLE PERIPHERAL CLOCK
// SET_REG(RCC->APB2ENR,RCC_APB2ENR_SYSCFGEN,1);
//
// //SYSCFG MODULE
// SET_REG(SYSCFG->EXTICR[0],SYSCFG_EXTICR1_EXTI0,SYSCFG_EXTICR1_EXTI0_PB);//PB0
// SET_REG(SYSCFG->EXTICR[1],SYSCFG_EXTICR2_EXTI4,SYSCFG_EXTICR2_EXTI4_PA);//PA4
//
//
// //EXTI module
// SET_REG(EXTI->RTSR1,EXTI_RTSR1_RT0,EXTI_RTSR1_RT0);
// SET_REG(EXTI->FTSR1,EXTI_FTSR1_FT0,EXTI_FTSR1_FT0);
// SET_REG(EXTI->IMR1,EXTI_IMR1_IM0,EXTI_IMR1_IM0);
// SET_REG(EXTI->PR1,EXTI_PR1_PIF0,EXTI_PR1_PIF0);
//
// SET_REG(EXTI->RTSR1,EXTI_RTSR1_RT4,EXTI_RTSR1_RT4);
// SET_REG(EXTI->FTSR1,EXTI_FTSR1_FT4,EXTI_FTSR1_FT4);
// SET_REG(EXTI->IMR1,EXTI_IMR1_IM4,EXTI_IMR1_IM4);
// SET_REG(EXTI->PR1,EXTI_PR1_PIF4,EXTI_PR1_PIF4);
//
// //NVIC
// NVIC_EnableIRQ(EXTI0_IRQn);
// NVIC_EnableIRQ(EXTI4_IRQn);
//
// //clear NVIC pending
// NVIC_ClearPendingIRQ(EXTI0_IRQn);
// NVIC_ClearPendingIRQ(EXTI4_IRQn);
//
// //set priority
// NVIC_SetPriority(EXTI0_IRQn, 3);
// NVIC_SetPriority(EXTI4_IRQn, 3);
//}
//
//
//void EXTI0_IRQHandler(void){
// pinChangeISR();
// NVIC_ClearPendingIRQ(EXTI0_IRQn);
// //clear pending interrupt flag of line 2
// SET_REG(EXTI->PR1,EXTI_PR1_PIF0,EXTI_PR1_PIF0);
//}
//
//
//void EXTI4_IRQHandler(void){
// pinChangeISR();
// NVIC_ClearPendingIRQ(EXTI4_IRQn);
// //clear pending interrupt flag of line 3
// SET_REG(EXTI->PR1,EXTI_PR1_PIF4,EXTI_PR1_PIF4);
//}
|
Kamigami55/time-gloves
|
timegloves-openstm32-project/inc/common.h
|
<filename>timegloves-openstm32-project/inc/common.h
#ifndef COMMON_H_
#define COMMON_H_
#define SET_REG(REG,SELECT,VAL) {((REG)=((REG)&(~(SELECT))) | (VAL));};
void delay_us(int n);
#endif
|
Kamigami55/time-gloves
|
timegloves-openstm32-project/inc/lcd.c
|
<gh_stars>1-10
#include "lcd.h"
/* LCD 1602A pins
*
* 1: VSS GND
* 2: VDD 5V
* 3: VS GND
* 4: RS PB3
* 5: R/W PB4
* 6: E PB5
* 7: DB0 PB10
* 8: DB1 PA8
* 9: DB2 PA9
* 10: DB3 PC7
* 11: DB4 PB6
* 12: DB5 PA7
* 13: DB6 PA6
* 14: DB7 PA5
* 15: 5V
* 16: GND
*/
#define RS_PORT GPIOB
#define RS_PIN 3
#define RW_PORT GPIOB
#define RW_PIN 5
#define E_PORT GPIOB
#define E_PIN 4
#define DB0_PORT GPIOB
#define DB0_PIN 10
#define DB1_PORT GPIOA
#define DB1_PIN 8
#define DB2_PORT GPIOA
#define DB2_PIN 9
#define DB3_PORT GPIOC
#define DB3_PIN 7
#define DB4_PORT GPIOB
#define DB4_PIN 6
#define DB5_PORT GPIOA
#define DB5_PIN 7
#define DB6_PORT GPIOA
#define DB6_PIN 6
#define DB7_PORT GPIOA
#define DB7_PIN 5
void LCD_Init(void) {
// open GPIO clock
RCC->AHB2ENR |= (RCC_AHB2ENR_GPIOAEN | RCC_AHB2ENR_GPIOBEN | RCC_AHB2ENR_GPIOCEN);
// set GPIO
TM_GPIO_Init(RS_PORT, RS_PIN, TM_GPIO_Mode_OUT, TM_GPIO_OType_OD, TM_GPIO_PuPd_UP, TM_GPIO_Speed_Medium);
TM_GPIO_Init(RW_PORT, RW_PIN, TM_GPIO_Mode_OUT, TM_GPIO_OType_OD, TM_GPIO_PuPd_UP, TM_GPIO_Speed_Medium);
TM_GPIO_Init(E_PORT, E_PIN, TM_GPIO_Mode_OUT, TM_GPIO_OType_OD, TM_GPIO_PuPd_UP, TM_GPIO_Speed_Medium);
TM_GPIO_Init(DB0_PORT, DB0_PIN, TM_GPIO_Mode_OUT, TM_GPIO_OType_OD, TM_GPIO_PuPd_UP, TM_GPIO_Speed_Medium);
TM_GPIO_Init(DB1_PORT, DB1_PIN, TM_GPIO_Mode_OUT, TM_GPIO_OType_OD, TM_GPIO_PuPd_UP, TM_GPIO_Speed_Medium);
TM_GPIO_Init(DB2_PORT, DB2_PIN, TM_GPIO_Mode_OUT, TM_GPIO_OType_OD, TM_GPIO_PuPd_UP, TM_GPIO_Speed_Medium);
TM_GPIO_Init(DB3_PORT, DB3_PIN, TM_GPIO_Mode_OUT, TM_GPIO_OType_OD, TM_GPIO_PuPd_UP, TM_GPIO_Speed_Medium);
TM_GPIO_Init(DB4_PORT, DB4_PIN, TM_GPIO_Mode_OUT, TM_GPIO_OType_OD, TM_GPIO_PuPd_UP, TM_GPIO_Speed_Medium);
TM_GPIO_Init(DB5_PORT, DB5_PIN, TM_GPIO_Mode_OUT, TM_GPIO_OType_OD, TM_GPIO_PuPd_UP, TM_GPIO_Speed_Medium);
TM_GPIO_Init(DB6_PORT, DB6_PIN, TM_GPIO_Mode_OUT, TM_GPIO_OType_OD, TM_GPIO_PuPd_UP, TM_GPIO_Speed_Medium);
TM_GPIO_Init(DB7_PORT, DB7_PIN, TM_GPIO_Mode_OUT, TM_GPIO_OType_OD, TM_GPIO_PuPd_UP, TM_GPIO_Speed_Medium);
// set LCD
LCD_Write_Command(0, 0, 0x38); // set LCD function
LCD_Write_Command(0, 0, 0x06); // set LCD mode
LCD_Write_Command(0, 0, 0x0C); // open LCD
}
void LCD_Write_Command(uint8_t RS, uint8_t RW, uint8_t DATA){
TM_GPIO_SetPinLow(E_PORT, E_PIN);
if(RS){
TM_GPIO_SetPinHigh(RS_PORT, RS_PIN);
} else {
TM_GPIO_SetPinLow(RS_PORT, RS_PIN);
}
if(RW){
TM_GPIO_SetPinHigh(RW_PORT, RW_PIN);
} else {
TM_GPIO_SetPinLow(RW_PORT, RW_PIN);
}
if((DATA & (1<<7)) != 0){
TM_GPIO_SetPinHigh(DB7_PORT, DB7_PIN);
} else {
TM_GPIO_SetPinLow(DB7_PORT, DB7_PIN);
}
if((DATA & (1<<6)) != 0){
TM_GPIO_SetPinHigh(DB6_PORT, DB6_PIN);
} else {
TM_GPIO_SetPinLow(DB6_PORT, DB6_PIN);
}
if((DATA & (1<<5)) != 0){
TM_GPIO_SetPinHigh(DB5_PORT, DB5_PIN);
} else {
TM_GPIO_SetPinLow(DB5_PORT, DB5_PIN);
}
if((DATA & (1<<4)) != 0){
TM_GPIO_SetPinHigh(DB4_PORT, DB4_PIN);
} else {
TM_GPIO_SetPinLow(DB4_PORT, DB4_PIN);
}
if((DATA & (1<<3)) != 0){
TM_GPIO_SetPinHigh(DB3_PORT, DB3_PIN);
} else {
TM_GPIO_SetPinLow(DB3_PORT, DB3_PIN);
}
if((DATA & (1<<2)) != 0){
TM_GPIO_SetPinHigh(DB2_PORT, DB2_PIN);
} else {
TM_GPIO_SetPinLow(DB2_PORT, DB2_PIN);
}
if((DATA & (1<<1)) != 0){
TM_GPIO_SetPinHigh(DB1_PORT, DB1_PIN);
} else {
TM_GPIO_SetPinLow(DB1_PORT, DB1_PIN);
}
if((DATA & (1<<0)) != 0){
TM_GPIO_SetPinHigh(DB0_PORT, DB0_PIN);
} else {
TM_GPIO_SetPinLow(DB0_PORT, DB0_PIN);
}
TM_GPIO_SetPinHigh(E_PORT, E_PIN);
delay_us(10);
TM_GPIO_SetPinLow(E_PORT, E_PIN);
delay_us(10);
if(RS == 0 && RW == 0 && DATA < 4){
delay_us(1640);
} else {
delay_us(40);
}
}
void LCD_Append_Char(char ch){
LCD_Write_Command(1, 0, ch);
}
void LCD_Display_One_Line(uint8_t* line){
LCD_Clear();
for(int i = 0; line[i] != '\0' && i < 16; ++i){
LCD_Append_Char(line[i]);
}
}
void LCD_Display_Two_Line(uint8_t* line1, uint8_t* line2){
LCD_Clear();
for(int i = 0; line1[i] != '\0' && i < 16; ++i){
LCD_Append_Char(line1[i]);
}
LCD_Write_Command(0, 0, 0xC0); // move LCD cursor to next line
for(int i = 0; line2[i] != '\0' && i < 16; ++i){
LCD_Append_Char(line2[i]);
}
}
void LCD_Clear(){
LCD_Write_Command(0, 0, 0x01); // clear screen
}
|
Kamigami55/time-gloves
|
timegloves-openstm32-project/inc/lcd.h
|
#ifndef LCD_H_
#define LCD_H_
#include "gpio.h"
#include "stm32l476xx.h"
#include "common.h"
void LCD_Init(void);
void LCD_Write_Command(uint8_t RS, uint8_t RW, uint8_t DATA);
void LCD_Append_Char(char ch);
void LCD_Display_One_Line(uint8_t* line);
void LCD_Display_Two_Line(uint8_t* line1, uint8_t* line2);
void LCD_Clear();
#endif
|
flhofer/I4S_LoRaWanTest
|
MKRWAN.h
|
<gh_stars>0
/*
This file is part of the MKRWAN library.
Copyright (C) 2017 Arduino AG (http://www.arduino.cc/)
Based on the TinyGSM library https://github.com/vshymanskyy/TinyGSM
Copyright (c) 2016 <NAME>
MKRWAN library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MKRWAN library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with MKRWAN library. If not, see <http://www.gnu.org/licenses/>.
*/
#include "Arduino.h"
#ifdef PORTENTA_CARRIER
#undef LORA_RESET
#define LORA_RESET (PD_5)
#undef LORA_BOOT0
#define LORA_BOOT0 (PJ_11)
#endif
#define DEFAULT_JOIN_TIMEOUT 60000L
template <class T, unsigned N>
class SerialFifo
{
public:
SerialFifo()
{
clear();
}
void clear()
{
_r = 0;
_w = 0;
}
// writing thread/context API
//-------------------------------------------------------------
bool writeable(void)
{
return free() > 0;
}
int free(void)
{
int s = _r - _w;
if (s <= 0)
s += N;
return s - 1;
}
bool put(const T& c)
{
int i = _w;
int j = i;
i = _inc(i);
if (i == _r) // !writeable()
return false;
_b[j] = c;
_w = i;
return true;
}
int put(const T* p, int n, bool t = false)
{
int c = n;
while (c)
{
int f;
while ((f = free()) == 0) // wait for space
{
if (!t) return n - c; // no more space and not blocking
/* nothing / just wait */;
}
// check free space
if (c < f) f = c;
int w = _w;
int m = N - w;
// check wrap
if (f > m) f = m;
memcpy(&_b[w], p, f);
_w = _inc(w, f);
c -= f;
p += f;
}
return n - c;
}
// reading thread/context API
// --------------------------------------------------------
bool readable(void)
{
return (_r != _w);
}
size_t size(void)
{
int s = _w - _r;
if (s < 0)
s += N;
return s;
}
bool get(T* p)
{
int r = _r;
if (r == _w) // !readable()
return false;
*p = _b[r];
_r = _inc(r);
return true;
}
bool peek(T* p)
{
int r = _r;
if (r == _w) // !readable()
return false;
*p = _b[r];
return true;
}
int get(T* p, int n, bool t = false)
{
int c = n;
while (c)
{
int f;
for (;;) // wait for data
{
f = size();
if (f) break; // free space
if (!t) return n - c; // no space and not blocking
/* nothing / just wait */;
}
// check available data
if (c < f) f = c;
int r = _r;
int m = N - r;
// check wrap
if (f > m) f = m;
memcpy(p, &_b[r], f);
_r = _inc(r, f);
c -= f;
p += f;
}
return n - c;
}
private:
int _inc(int i, int n = 1)
{
return (i + n) % N;
}
T _b[N];
int _w;
int _r;
};
#ifndef YIELD
#define YIELD() { delay(2); }
#endif
typedef const char* ConstStr;
#define GFP(x) x
#define GF(x) x
#ifdef LORA_DEBUG
namespace {
template<typename T>
static void DBG(T last) {
LORA_DEBUG.println(last);
}
template<typename T, typename... Args>
static void DBG(T head, Args... tail) {
LORA_DEBUG.print(head);
LORA_DEBUG.print(' ');
DBG(tail...);
}
}
#else
#define DBG(...)
#endif
template<class T>
const T& Min(const T& a, const T& b)
{
return (b < a) ? b : a;
}
template<class T>
const T& Max(const T& a, const T& b)
{
return (b < a) ? a : b;
}
#if !defined(LORA_RX_BUFFER)
#define LORA_RX_BUFFER 256
#endif
/* AT Command strings. Commands start with AT */
#define AT_RESET "+REBOOT"
#define AT_BAND "+BAND"
#define AT_DEUI "+DEVEUI"
#define AT_DADDR "+DEVADDR"
#define AT_APPKEY "+APPKEY"
#define AT_NWKSKEY "+NWKSKEY"
#define AT_APPSKEY "+APPSKEY"
#define AT_APPEUI "+APPEUI"
#define AT_ADR "+ADR"
#define AT_TXP "+RFPOWER"
#define AT_FORMAT "+DFORMAT"
#define AT_DR "+DR"
#define AT_DCS "+DUTYCYCLE"
#define AT_PNM "+NWK"
#define AT_RX2FQ "+RX2FQ"
#define AT_RX2DR "+RX2DR"
#define AT_RX1DL "+RX1DL"
#define AT_RX2DL "+RX2DL"
#define AT_JN1DL "+JN1DL"
#define AT_JN2DL "+JN2DL"
#define AT_NJM "+MODE"
#define AT_NWKID "+IDNWK"
#define AT_FCU "+FCU"
#define AT_FCD "+FCD"
#define AT_CLASS "+CLASS"
#define AT_JOIN "+JOIN"
#define AT_NJS "+NJS"
#define AT_SENDB "+SENDB"
#define AT_SEND "+SEND"
#define AT_RECVB "+RECVB"
#define AT_RECV "+RECV"
#define AT_UTX "+UTX"
#define AT_CTX "+CTX"
#define AT_PORT "+PORT"
#define AT_VER "+VER"
#define AT_DEV "+DEV"
#define AT_CFM "+CFM"
#define AT_CFS "+CFS"
#define AT_SNR "+SNR"
#define AT_RSSI "+RSSI"
#define AT_BAT "+BAT"
#define AT_TRSSI "+TRSSI"
#define AT_TTONE "+TTONE"
#define AT_TTLRA "+TTLRA"
#define AT_TRLRA "+TRLRA"
#define AT_TCONF "+TCONF"
#define AT_TOFF "+TOFF"
#define AT_CERTIF "+CERTIF"
#define AT_CHANMASK "+CHANMASK"
#define AT_CHANDEFMASK "+CHANDEFMASK"
#define AT_EVENT "+EVENT"
#define AT_UART "+UART"
#define AT_FACNEW "+FACNEW"
#define AT_SLEEP "+SLEEP"
#define AT_MSIZE "+MSIZE"
#define AT_EQ "="
#define AT_QM "?"
#define ARDUINO_LORA_MAXBUFF 64 // hard coded limit in middleware and driver
#define FMT_HEX 0
#define FMT_BIN 1
#define LORA_NL "\r"
static const char LORA_OK[] = "+OK";
static const char LORA_ERROR[] = "+ERR";
static const char LORA_ERROR_PARAM[] = "+ERR_PARAM";
static const char LORA_ERROR_BUSY[] = "+ERR_BUSY";
static const char LORA_ERROR_OVERFLOW[] = "+ERR_PARAM_OVERFLOW";
static const char LORA_ERROR_NO_NETWORK[] = "+ERR_NO_NETWORK";
static const char LORA_ERROR_RX[] = "+ERR_RX";
static const char LORA_ERROR_UNKNOWN[] = "+ERR_UNKNOWN";
static const char ARDUINO_FW_VERSION[] = "ARD-078 1.2.4";
static const char ARDUINO_FW_VERSION_AT[] = "ARD-078 1.2.4";
static const char ARDUINO_FW_IDENTIFIER[] = "ARD-078";
typedef enum {
AS923 = 0,
AU915,
CN470,
CN779,
EU433,
EU868 = 5,
KR920,
IN865,
US915,
US915_HYBRID,
} _lora_band;
typedef enum {
RFO = 0,
PABOOST,
} _rf_mode;
typedef enum {
ABP = 0,
OTAA,
} _lora_mode;
typedef enum {
APP_EUI = 0,
APP_KEY,
DEV_EUI,
DEV_ADDR,
NWKS_KEY,
APPS_KEY,
NWK_ID,
} _lora_property;
typedef enum {
CLASS_A = 'A',
CLASS_B,
CLASS_C,
} _lora_class;
class LoRaModem : public Stream // @suppress("Class has a virtual method and non-virtual destructor")
{
public:
LoRaModem(__attribute__((unused)) Stream& stream = (Stream&)Serial)
#ifdef SerialLoRa
: stream(SerialLoRa), lastPollTime(millis()), pollInterval(300000)
#else
: stream(stream), lastPollTime(millis()), pollInterval(300000)
#endif
{
network_joined = false;
mask_size = 1;
region = EU868;
compat_mode = false;
formatBin = false;
adr = true;
msize = ARDUINO_LORA_MAXBUFF;
}
public:
typedef SerialFifo<uint8_t, LORA_RX_BUFFER> RxFifo;
private:
Stream& stream;
bool network_joined;
RxFifo rx;
RxFifo tx;
String fw_version;
unsigned long lastPollTime;
unsigned long pollInterval;
uint8_t downlinkPort; // Valid values are between 1 and 223
int mask_size;
uint16_t channelsMask[6];
_lora_band region;
bool compat_mode;
bool formatBin;
bool adr;
size_t msize;
public:
virtual int joinOTAA(const char *appEui, const char *appKey, const char *devEui, uint32_t timeout) {
YIELD();
rx.clear();
changeMode(OTAA);
set(APP_EUI, appEui);
set(APP_KEY, appKey);
if (devEui != NULL) {
set(DEV_EUI, devEui);
}
network_joined = join(timeout);
delay(1000);
return network_joined;
}
virtual int joinOTAA(String appEui, String appKey, uint32_t timeout = DEFAULT_JOIN_TIMEOUT) {
return joinOTAA(appEui.c_str(), appKey.c_str(), NULL, timeout);
}
virtual int joinOTAA(String appEui, String appKey, String devEui, uint32_t timeout = DEFAULT_JOIN_TIMEOUT) {
return joinOTAA(appEui.c_str(), appKey.c_str(), devEui.c_str(), timeout);
}
virtual int joinABP(/*const char* nwkId, */const char * devAddr, const char * nwkSKey, const char * appSKey, uint32_t timeout = DEFAULT_JOIN_TIMEOUT) {
YIELD();
rx.clear();
changeMode(ABP);
//set(NWK_ID, nwkId);
set(DEV_ADDR, devAddr);
set(NWKS_KEY, nwkSKey);
set(APPS_KEY, appSKey);
network_joined = join(timeout);
delay(1000);
return (getJoinStatus() == 1);
}
virtual int joinABP(/*String nwkId, */String devAddr, String nwkSKey, String appSKey) {
return joinABP(/*nwkId.c_str(), */devAddr.c_str(), nwkSKey.c_str(), appSKey.c_str());
}
// Stream compatibility (like UDP)
void beginPacket() {
tx.clear();
}
int endPacket(bool confirmed = false) {
uint8_t buffer[LORA_RX_BUFFER];
memset(buffer, 0, LORA_RX_BUFFER);
int size = tx.get(buffer, tx.size());
return modemSend(buffer, size, confirmed);
}
size_t write(uint8_t c) {
return tx.put(c);
};
size_t write(const uint8_t *buffer, size_t size) {
return tx.put(buffer, size);
};
template <typename T> inline size_t write(T val) {return write((uint8_t*)&val, sizeof(T));};
using Print::write;
int parsePacket() {
return available();
}
virtual int available() {
YIELD();
if (!rx.size()) {
maintain();
}
return rx.size(); // + buf_available;
}
virtual int read(uint8_t *buf, size_t size) {
YIELD();
maintain();
size_t cnt = 0;
while (cnt < size) {
size_t chunk = Min(size-cnt, rx.size());
if (chunk > 0) {
rx.get(buf, chunk);
buf += chunk;
cnt += chunk;
continue;
}
// TODO: Read directly into user buffer?
maintain();
/*
if (buf_available > 0) {
modemRead(rx.free());
} else {
break;
}
*/
}
return cnt;
}
virtual int read() {
uint8_t c;
if (read(&c, 1) == 1) {
return c;
}
return -1;
}
virtual int peek() {
uint8_t c;
if (rx.peek(&c) == true) {
return c;
}
return -1;
}
virtual void flush() { stream.flush(); }
virtual uint8_t connected() {
if (available()) {
return true;
}
return network_joined;
}
virtual operator bool() { return connected(); }
public:
/*
* Basic functions
*/
bool begin(_lora_band band, uint32_t baud = 19200, uint16_t config = SERIAL_8N2) {
#ifdef SerialLoRa
SerialLoRa.begin(baud, config);
pinMode(LORA_BOOT0, OUTPUT);
digitalWrite(LORA_BOOT0, LOW);
pinMode(LORA_RESET, OUTPUT);
digitalWrite(LORA_RESET, HIGH);
delay(200);
digitalWrite(LORA_RESET, LOW);
delay(200);
digitalWrite(LORA_RESET, HIGH);
delay(200);
#endif
region = band;
if (init()) {
return configureBand(band);
} else {
return begin(band, baud, SERIAL_8N1);
}
return false;
}
bool init() {
if (!autoBaud()) {
return false;
}
// populate version field on startup
version();
if (isArduinoFW() && !isLatestFW()) {
DBG("### Please update fw using MKRWANFWUpdate_standalone.ino sketch");
}
return true;
}
bool configureClass(_lora_class _class) {
return setValue(GF(AT_CLASS), (char)_class);
}
bool configureBand(_lora_band band) {
if (!setValue(GF(AT_BAND), band)) {
return false;
}
if (band == EU868 && isArduinoFW()) {
return dutyCycle(true);
}
return true;
}
int getChannelMaskSize(_lora_band band) {
switch (band)
{
case AS923:
case CN779:
case EU433:
case EU868:
case KR920:
case IN865:
mask_size = 1;
break;
case AU915:
case CN470:
case US915:
case US915_HYBRID:
mask_size = 6;
break;
default:
break;
}
return mask_size;
}
String getChannelMask() {
int size = 4*getChannelMaskSize(region);
String channel_mask_str = "0";
sendAT(GF(AT_CHANMASK), GF(AT_QM));
if ((!compat_mode && waitResponse(GF(AT_CHANMASK)) == 1)
|| (compat_mode && waitResponse() == 1)) {
channel_mask_str = stream.readStringUntil('\r');
DBG("### Full channel mask string: ", channel_mask_str);
sscanf(channel_mask_str.c_str(), "%04hx%04hx%04hx%04hx%04hx%04hx", &channelsMask[0], &channelsMask[1], &channelsMask[2],
&channelsMask[3], &channelsMask[4], &channelsMask[5]);
}
return channel_mask_str.substring(0, size);
}
int isChannelEnabled(int pos) {
populateChannelsMask();
int row = pos / 16;
int col = pos % 16;
uint16_t channel = (uint16_t)(1 << col);
channel = ((channelsMask[row] & channel) >> col);
return channel;
}
bool disableChannel(int pos) {
populateChannelsMask();
int row = pos / 16;
int col = pos % 16;
uint16_t mask = ~(uint16_t)(1 << col);
channelsMask[row] = channelsMask[row] & mask;
return sendMask();
}
bool enableChannel(int pos) {
populateChannelsMask();
int row = pos / 16;
int col = pos % 16;
uint16_t mask = (uint16_t)(1 << col);
channelsMask[row] = channelsMask[row] | mask;
return sendMask();
}
bool sendMask() {
String newMask;
newMask.reserve(24);
/* Convert channel mask into string */
for (int i = 0; i < 6; i++) {
char hex[5];
sprintf(hex, "%04x", channelsMask[i]);
newMask.concat(hex);
}
DBG("### Newmask: ", newMask);
return sendMask(newMask);
}
void setMask(uint16_t newMask[6]){
for (int i = 0; i < 6; i++)
channelsMask[i] = newMask[i];
}
bool sendMask(String newMask) {
return setValue(GF(AT_CHANMASK), newMask);
}
void setBaud(unsigned long baud) {
sendAT(GF(AT_UART), baud);
}
bool autoBaud(unsigned long timeout = 10000L) {
for (unsigned long start = millis(); millis() - start < timeout; ) {
sendAT(GF(""));
if (waitResponse(200) == 1) {
delay(100);
return true;
}
delay(100);
}
return false;
}
String version() {
fw_version = "";
int ret = 0;
sendAT(GF(AT_DEV), GF(AT_QM));
if ((ret = waitResponse(GF(AT_DEV),GF(LORA_OK))) == 1 || ret == 2) {
fw_version = stream.readStringUntil('\r');
}
sendAT(GF(AT_VER), GF(AT_QM));
if ((ret = waitResponse(GF(AT_VER),GF(LORA_OK))) == 1 || ret == 2) {
fw_version += " " + stream.readStringUntil('\r');
}
return fw_version;
}
String deviceEUI() {
return getStringValue(GF(AT_DEUI));
}
void maintain() {
while (stream.available()) {
waitResponse(100);
}
}
void minPollInterval(unsigned long secs) {
pollInterval = secs * 1000;
}
int poll() {
if (millis() - lastPollTime < pollInterval) return 0;
lastPollTime = millis();
// simply trigger a send with no payload (no confirmation required)
uint8_t dummy = 0;
return modemSend(&dummy, 1, false);
}
bool factoryDefault() {
sendAT(GF(AT_FACNEW)); // Factory
return waitResponse() == 1;
}
/*
* Power functions
*/
bool restart() {
if (!autoBaud()) {
return false;
}
sendAT(GF(AT_RESET));
if (waitResponse(10000L, GF(AT_EVENT) GF(AT_EQ) "0,0") != 1) {
return false;
}
delay(1000);
return init();
}
bool power(_rf_mode mode, uint8_t transmitPower) { // transmitPower can be between 0 and 5
sendAT(GF(AT_TXP), GF(AT_EQ), mode, ",", transmitPower);
return (waitResponse() == 1);
}
int getPower() {
return (int)getIntValue(GF(AT_TXP));
}
#ifdef SerialLoRa
// Sends the modem into dumb mode, so the Semtech chip can be controlled directly
// The only way to exit this mode is through a begin()
void dumb() {
SerialLoRa.end();
pinMode(LORA_IRQ_DUMB, OUTPUT);
digitalWrite(LORA_IRQ_DUMB, LOW);
// Hardware reset
pinMode(LORA_BOOT0, OUTPUT);
digitalWrite(LORA_BOOT0, LOW);
pinMode(LORA_RESET, OUTPUT);
digitalWrite(LORA_RESET, HIGH);
delay(200);
digitalWrite(LORA_RESET, LOW);
delay(200);
digitalWrite(LORA_RESET, HIGH);
delay(50);
// You can now use SPI1 and LORA_IRQ_DUMB as CS to interface with the chip
}
#endif
bool dutyCycle(bool on) {
return setValue(GF(AT_DCS), on);
}
bool setPort(uint8_t port) {
return setValue(GF(AT_PORT), port);
}
uint8_t getDownlinkPort(){
return downlinkPort;
}
bool publicNetwork(bool publicNetwork) {
return setValue(GF(AT_PNM), publicNetwork);
}
bool sleep(bool on = true) {
return setValue(GF(AT_SLEEP), on);
}
bool format(bool mode) {
if (setValue(GF(AT_FORMAT), mode)){
formatBin = mode;
return true;
}
return false;
}
/*
DataRate Modulation SF BW bit/s
0 LoRa 12 125 250
1 LoRa 11 125 440
2 LoRa 10 125 980
3 LoRa 9 125 1'760
4 LoRa 8 125 3'125
5 LoRa 7 125 5'470
6 LoRa 7 250 11'000
*/
bool dataRate(uint8_t dr) {
if (setValue(GF(AT_DR), dr)){
(void)modemGetMaxSize();
return true;
}
return false;
}
int getDataRate() {
return (int)getIntValue(GF(AT_DR));
}
bool setADR(bool nadr) {
if (setValue(GF(AT_ADR), nadr)){
adr = nadr;
return true;
}
return false;
}
int getADR() {
return (int)getIntValue(GF(AT_ADR));
}
/*
* setTxConfirmed: Set confirmation (ACK) request status for send/sendB
*
* Arguments: Boolean indicating request on/off
*
* Returns: true if successful
*/
bool setTxConfirmed(bool cfm) {
return setValue(GF(AT_CFM), cfm);
}
/*
* send(B): default built-in sends for HEX or Binary (B) TX up-to 120/59 Bytes
*
* Parameters: pointer to Buffer
* total length of buffer
*
* Returns : true if send was successful
*
* Notes: Uses selected confirmed/unconfirmed option of setTxConfirmed()
* the buffer must start with the application port followed by a colon,
* eg. "24:a2e3d..."
*/
bool send(const void* buff, size_t len){
if ((!compat_mode && len > 120) // 128 - 'AT+SEND '
|| (compat_mode && len > 56))
return false;
sendAT(GF(AT_SEND), " ");
stream.write((uint8_t*)buff, len);
return (waitResponse() == 1);
}
bool sendB(const void* buff, size_t len){
if ((!compat_mode && len > 119) // 128 - 'AT+SENDB '
|| (compat_mode && len > 55))
return false;
sendAT(GF(AT_SENDB), " ");
stream.write((uint8_t*)buff, len);
return (waitResponse() == 1);
}
String getDevAddr() {
return getStringValue(GF(AT_DADDR));
}
String getNwkSKey() {
return getStringValue(GF(AT_NWKSKEY));
}
String getAppSKey() {
return getStringValue(GF(AT_APPSKEY));
}
int getRX2DR() {
return (int)getIntValue(GF(AT_RX2DR));
}
bool setRX2DR(uint8_t dr) {
return setValue(GF(AT_RX2DR),dr);
}
uint32_t getRX2Freq() {
return getUIntValue(GF(AT_RX2FQ));
}
bool setRX2Freq(uint32_t freq) {
return setValue(GF(AT_RX2FQ),freq);
}
bool setFCU(uint32_t fcu) {
return setValue(GF(AT_FCU), fcu);
}
uint32_t getFCU() {
return getUIntValue(GF(AT_FCU));
}
bool setFCD(uint32_t fcd) {
return setValue(GF(AT_FCD), fcd);
}
uint32_t getFCD() {
return getUIntValue(GF(AT_FCD));
}
int getRSSI() {
return (int)getIntValue(GF(AT_RSSI));
}
int getSNR() {
return (int)getIntValue(GF(AT_SNR));
}
bool getMsgConfirmed() {
return getIntValue(GF(AT_CFS)) == 1;
}
bool getJoinStatus() {
return (getIntValue(GF(AT_NJS)));
}
private:
bool isArduinoFW() {
return (fw_version.indexOf(ARDUINO_FW_IDENTIFIER) >= 0);
}
bool isLatestFW() {
compat_mode = (fw_version.compareTo(ARDUINO_FW_VERSION_AT) < 0);
return (fw_version == ARDUINO_FW_VERSION);
}
bool changeMode(_lora_mode mode) {
return setValue(GF(AT_NJM), mode);
}
bool join(uint32_t timeout) {
sendAT(GF(AT_JOIN));
sendAT();
if (waitResponse(timeout, GF(AT_EVENT) GF(AT_EQ) "1,1") != 1) {
return false;
}
(void)streamSkipUntil('\r');
return true;
}
bool set(_lora_property prop, const char* value) {
switch (prop) {
case APP_EUI:
return setValue(GF(AT_APPEUI), value);
case APP_KEY:
return setValue(GF(AT_APPKEY), value);
case DEV_EUI:
return setValue(GF(AT_DEUI), value);
case DEV_ADDR:
return setValue(GF(AT_DADDR), value);
case NWKS_KEY:
return setValue(GF(AT_NWKSKEY), value);
case NWK_ID:
return setValue(GF(AT_NWKID), value);
case APPS_KEY:
return setValue(GF(AT_APPSKEY), value);
default:
return false;
}
}
/**
* @brief transmit uplink
*
* @param buff data to transmit
* @param len length of the buffer
* @param confirmed true = transmit confirmed uplink
* @return int a positive number indicate success and is the number of bytes transmitted
* -1 indicates a timeout error
* -2 indicates LORA_ERROR
* -3 indicates LORA_ERROR_PARAM
* -4 indicates LORA_ERROR_BUSY
* -5 indicates LORA_ERROR_OVERFLOW
* -6 indicates LORA_ERROR_NO_NETWORK
* -7 indicates LORA_ERROR_RX
* -8 indicates LORA_ERROR_UNKNOWN
* -20 packet exceeds max length
*
*/
int modemSend(const void* buff, size_t len, bool confirmed) {
if (adr)
(void)modemGetMaxSize();
if (len > msize) {
return -20;
}
if (confirmed) {
sendAT(GF(AT_CTX), " ", formatBin ? len*2 : len);
} else {
sendAT(GF(AT_UTX), " ", formatBin ? len*2 : len);
}
if (formatBin){
unsigned char * pin = (uint8_t *)buff;
const char * hex = "0123456789ABCDEF";
for(; pin < (uint8_t *)buff+len; pin++){
stream.write(hex[(*pin>>4) & 0xF]);
stream.write(hex[ *pin & 0xF]);
}
}
else
stream.write((uint8_t*)buff, len);
int8_t rc = waitResponse();
if (rc == 1) { ///< OK
return len;
} else if ( rc > 1 ) { ///< LORA ERROR
return -rc;
} else { ///< timeout
return -1;
}
}
size_t modemGetMaxSize() {
if (compat_mode && isArduinoFW()) {
return ARDUINO_LORA_MAXBUFF;
}
int size = getIntValue(GF(AT_MSIZE));
if (size > 0){
msize = (size_t)size;
return msize;
}
return 0;
}
/* Utilities */
template<typename T>
void streamWrite(T last) {
stream.print(last);
}
template<typename T, typename... Args>
void streamWrite(T head, Args... tail) {
stream.print(head);
streamWrite(tail...);
}
bool streamSkipUntil(char c, unsigned long timeout = 1000L) {
unsigned long startMillis = millis();
do {
if (stream.available()) {
if (stream.read() == c)
return true;
}
} while (millis() - startMillis < timeout);
return false;
}
template<typename... Args>
void sendAT(Args... cmd) {
streamWrite("AT", cmd..., LORA_NL);
stream.flush();
YIELD();
DBG("### AT:", cmd...);
}
int char2int(char input)
{
if(input >= '0' && input <= '9')
return input - '0';
if(input >= 'A' && input <= 'F')
return input - 'A' + 10;
if(input >= 'a' && input <= 'f')
return input - 'a' + 10;
return 0;
}
void populateChannelsMask(){
//Populate channelsMask array
int max_retry = 3;
for (int retry = 0; retry < max_retry; retry++) {
String mask = getChannelMask();
if (mask != "0") {
break;
}
}
}
/**
* @brief wait for a response from the modem.
*
* @param timeout the time in milliseconds to wait for a response
* @param data a string containing the response to wait for
* @param r1 response string defaults to LORA_OK
* @param r2 response string defaults to LORA_ERROR
* @param r3 response string defaults to LORA_ERROR_PARAM
* @param r4 response string defaults to LORA_ERROR_BUSY
* @param r5 response string defaults to LORA_ERROR_OVERFLOW
* @param r6 response string defaults to LORA_ERROR_NO_NETWORK
* @param r7 response string defaults to LORA_ERROR_RX
* @param r8 response string defaults to LORA_ERROR_UNKNOWN
* @return int8_t n if the response = r<n>
* -1 if timeout
*/
int8_t waitResponse(uint32_t timeout, String& data,
ConstStr r1=GFP(LORA_OK), ConstStr r2=GFP(LORA_ERROR),
ConstStr r3=GFP(LORA_ERROR_PARAM), ConstStr r4=GFP(LORA_ERROR_BUSY), ConstStr r5=GFP(LORA_ERROR_OVERFLOW),
ConstStr r6=GFP(LORA_ERROR_NO_NETWORK), ConstStr r7=GFP(LORA_ERROR_RX), ConstStr r8=GFP(LORA_ERROR_UNKNOWN))
{
data.reserve(msize);
int8_t index = -1;
int length = 0;
int a = -1;
unsigned long startMillis = millis();
redo:
do {
YIELD();
while (stream.available() > 0) {
a = stream.peek();
if (a < 0) continue;
if (a == '=' || a == '\r'
|| (a == '+' && data.length() > 0)) {
DBG("### Data string:", data);
if (r1 && data.endsWith(r1)) {
index = 1;
goto finish;
} else if (r2 && data.endsWith(r2)) {
index = 2;
goto finish;
} else if (r3 && data.endsWith(r3)) {
index = 3;
goto finish;
} else if (r4 && data.endsWith(r4)) {
index = 4;
goto finish;
} else if (r5 && data.endsWith(r5)) {
index = 5;
goto finish;
} else if (r6 && data.endsWith(r6)) {
index = 6;
goto finish;
} else if (r7 && data.endsWith(r7)) {
index = 7;
goto finish;
} else if (r8 && data.endsWith(r8)) {
index = 8;
goto finish;
} else if ((data.endsWith(GF(AT_RECV))
|| data.endsWith(GF(AT_RECVB))) && a == '=') {
downlinkPort = stream.readStringUntil(',').toInt();
length = stream.readStringUntil('\r').toInt();
(void)streamSkipUntil('\n');
(void)streamSkipUntil('\n');
if ((uint16_t)length >= msize){
DBG("### Data string too long:", data);
data = "";
length = 0;
continue;
}
if (data.endsWith(GF(AT_RECVB))){ // Binary receive
char Hi = 0;
for (int i = 0; i < length*2;) {
if (stream.available()) {
if (!(i%2))
Hi=char2int(stream.read()) * 0x10;
else
rx.put(char2int(stream.read()) + Hi);
i++;
}
}
}
else // String receive
for (int i = 0; i < length;) {
if (stream.available()) {
rx.put(stream.read());
i++;
}
}
data = "";
length = 0;
continue;
}
}
data += (char)stream.read();
length++;
if ((uint16_t)length >= msize){
DBG("### Data string too long:", data);
return index;
}
}
} while (millis() - startMillis < timeout);
finish:
if (a < 0){ // == Lockup Timeout
DBG("### Timeout..", data);
sendAT(GF(""));
YIELD();
if (a == -1 && stream.available()){
a--; // attempt 2
startMillis = millis();
goto redo;
}
}
else if (a != '+') // no follow-up command, get terminator from buffer
(void)stream.read();
if (index == -1) {
data.trim();
if (data.length()) {
DBG("### Unhandled:", data);
}
data = "";
}
return index;
}
int8_t waitResponse(uint32_t timeout,
ConstStr r1=GFP(LORA_OK), ConstStr r2=GFP(LORA_ERROR),
ConstStr r3=GFP(LORA_ERROR_PARAM), ConstStr r4=GFP(LORA_ERROR_BUSY), ConstStr r5=GFP(LORA_ERROR_OVERFLOW),
ConstStr r6=GFP(LORA_ERROR_NO_NETWORK), ConstStr r7=GFP(LORA_ERROR_RX), ConstStr r8=GFP(LORA_ERROR_UNKNOWN))
{
String data;
return waitResponse(timeout, data, r1, r2, r3, r4, r5, r6, r7, r8);
}
int8_t waitResponse(ConstStr r1=GFP(LORA_OK), ConstStr r2=GFP(LORA_ERROR),
ConstStr r3=GFP(LORA_ERROR_PARAM), ConstStr r4=GFP(LORA_ERROR_BUSY), ConstStr r5=GFP(LORA_ERROR_OVERFLOW),
ConstStr r6=GFP(LORA_ERROR_NO_NETWORK), ConstStr r7=GFP(LORA_ERROR_RX), ConstStr r8=GFP(LORA_ERROR_UNKNOWN))
{
return waitResponse(1000, r1, r2, r3, r4, r5, r6, r7, r8);
}
String getStringValue(ConstStr cmd){
String value = "";
sendAT(cmd, GF(AT_QM));
if ((!compat_mode && waitResponse(cmd) == 1)
|| (compat_mode && waitResponse() == 1)) {
value = stream.readStringUntil('\r');
}
return value;
}
int32_t getIntValue(ConstStr cmd){
int32_t value = -1;
sendAT(cmd, GF(AT_QM));
if ((!compat_mode && waitResponse(cmd) == 1)
|| (compat_mode && waitResponse() == 1)) {
value = stream.readStringUntil('\r').toInt();
}
return value;
}
uint32_t getUIntValue(ConstStr cmd){
uint32_t value = 0;
sendAT(cmd, GF(AT_QM));
if ((!compat_mode && waitResponse(cmd) == 1)
|| (compat_mode && waitResponse() == 1)) {
value = strtoul(stream.readStringUntil('\r').c_str(), NULL, 10);
}
return value;
}
template<typename T, typename U>
bool setValue(T cmd, U value) {
sendAT(cmd, GF(AT_EQ), value);
return (waitResponse() == 1);
}
};
|
flhofer/I4S_LoRaWanTest
|
main.h
|
<reponame>flhofer/I4S_LoRaWanTest
/*
* main.h
*
* Created on: May 25, 2020
* Author: <NAME>
*/
#ifndef _MAIN_H_
#define _MAIN_H_
#include "Arduino.h"
// Serial connection definition
#define debugSerial SerialUSB // USB Serial
#define loraSerial SerialLoRa // Hardware serial
#define LORA_DEBUG debugSerial
#define MICROVER "MKRWAN_1.0V"
//global variable declarations
extern int debug; // Global flag Debug console attached (PC)
//Do not add code below this line
#endif /* _LoRaWanTest_H_ */
|
flhofer/I4S_LoRaWanTest
|
LoRaMgmt.h
|
/*
* LoRaMgmt.h
*
* Created on: Nov 3, 2020
* Author: <NAME>
*/
#ifndef LORAMGMT_H_
#define LORAMGMT_H_
#include <stdint.h>
#include "main.h"
// Select frequency plan between TTN_FP_EU868 or TTN_FP_US915
#define CM_OTAA 1 // LORAWAN use OTAA join instead of ABP
#define CM_DTYCL 2 // LORAWAN enable duty cycle
#define CM_RJN 4 // LORAWAN rejoin if failed
#define CM_UCNF 8 // LORAWAN use unconfirmed messages
#define CM_IQINV 1 // LORA use inverted q signal
#define CM_CRC 2 // LORA use CRC
#define CM_EXHDR 4 // LORA use explicit header
#define CM_NPBLK 16 // LORAWAN & LORA use Private network
#define CM_RSTMDM 32 // LORAWAN $ LORA reset modem after each test
/**
* LoRa(Wan) Configuration
*/
typedef struct
{
uint8_t mode = 0; // test mode = 0 off, 1 LoRa, 2 LoRaWan, 3 LoRaWan + Remote, 4 LoRaWan Force Join
uint8_t confMsk; // Configuration mask bits
// Common all Modes
uint8_t txPowerTst = 0; // txPower setting for the low power test
uint8_t dataLen = 1; // data length to send over LoRa for a test
uint8_t repeatSend = 5; // number of send repeats
uint16_t rxWindow1 = 1000; // pause duration in ms between tx and rx, default 1 sec
uint16_t rxWindow2 = 2000; // pause duration in ms between tx and rx2 default 2 sec
union { // 16Bit
uint16_t frequency; // LoRa / FSK frequency in 100KHz steps
uint16_t chnMsk; // ChannelMask for LoRaWan EU868 (1-16)
};
union { // 8Bit
uint8_t dataRate = 255; // data rate starting value for LoRaWan, 255 = leave unchanged
uint8_t spreadFactor; // spread factor for Chirp signals FSK/LORA
};
// LoRaWan (OTAA vs ABP) or LoRa/FSK settings
union { // 32Bit
uint8_t bandWidth; // Bandwidth setting for LoRa & FSK TODO: create steps
char * devEui = NULL; // Device EUI OTAA
char * devAddr; // Device Address ABP
};
union { // 32Bit
uint8_t codeRate; // Code rate for LoRa & FSK, in 4/
char * appEui = NULL; // App EUI OTAA
char * nwkSKey; // Nw Session key ABP
};
union { // 32Bit
int32_t preamble; // preamble length
char * appKey = NULL; // App KEY OTAA
char * appSKey; // App Session key ABP
};
// uint32_t NetworkID; /*< Network ID */
// function pointers / callback for runtime. < 0 = error, 0 = busy, 1 = done, 2 = stop
int (*prep)() = NULL;
int (*start)() = NULL;
int (*run)() = NULL;
} sLoRaConfiguration_t;
//add your function definitions for the project LoRaWanTest here
// global types
typedef struct {
uint32_t txCount; // transmission counter
uint32_t testTime; // total test time for this run
uint32_t timeTx; // time for TX
uint32_t timeRx; // time for RX
uint32_t timeToRx; // total time until response
uint32_t txFrq; // current used frequency
uint16_t chnMsk; // Concluding channel mask
uint8_t lastCR; // Coding rate 4/x
uint8_t txDR; // Tx data rate
int8_t txPwr; // Tx power index used
int8_t rxRssi; // last rx RSSI, default -128
int8_t rxSnr; // last rx SNR, default -128
} sLoRaResutls_t;
void LoRaMgmtMain();
int LoRaMgmtSetup(const sLoRaConfiguration_t * conf, sLoRaResutls_t * const result);
int LoRaMgmtJoin();
int LoRaMgmtSend();
int LoRaMgmtSendDumb();
int LoRaMgmtPoll();
int LoRaMgmtRemote();
int LoRaMgmtGetResults(sLoRaResutls_t ** const res);
const char* LoRaMgmtGetEUI();
int LoRaMgmtUpdt();
int LoRaMgmtRcnf();
#endif /* LORAMGMT_H_ */
|
INetCC/ccbasic
|
src/ccbasic.c
|
/*
* ccbasic - Classic Computer BASIC
* Copyright (C) 2020 <NAME>
*
* Redistributions, modified or unmodified, in whole or in part, must retain
* applicable copyright or other legal privilege notices, these conditions, and
* the following license terms and disclaimer. Subject to these conditions,
* the holder(s) of copyright or other legal privileges, author(s) or
* assembler(s), and contributors of this work hereby grant to any person who
* obtains a copy of this work in any form:
*
* 1. Permission to reproduce, modify, distribute, publish, sell, sublicense,
* use, and/or otherwise deal in the licensed material without restriction.
*
* 2. A perpetual, worldwide, non-exclusive, royalty-free, irrevocable patent
* license to reproduce, modify, distribute, publish, sell, use, and/or
* otherwise deal in the licensed material without restriction, for any and all
* patents:
*
* a. Held by each such holder of copyright or other legal privilege,
* author or assembler, or contributor, necessarily infringed by the
* contributions alone or by combination with the work, of that privilege
* holder, author or assembler, or contributor.
*
* b. Necessarily infringed by the work at the time that holder of
* copyright or other privilege, author or assembler, or contributor made
* any contribution to the work.
*
* NO WARRANTY OF ANY KIND IS IMPLIED BY, OR SHOULD BE INFERRED FROM, THIS
* LICENSE OR THE ACT OF DISTRIBUTION UNDER THE TERMS OF THIS LICENSE,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
* A PARTICULAR PURPOSE, AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS,
* ASSEMBLERS, OR HOLDERS OF COPYRIGHT OR OTHER LEGAL PRIVILEGE BE LIABLE FOR
* ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN ACTION OF CONTRACT, TORT,
* OR OTHERWISE ARISING FROM, OUT OF, OR IN CONNECTION WITH THE WORK OR THE USE
* OF OR OTHER DEALINGS IN THE WORK.
*/
int
main(void)
{
return 0;
}
|
DrItanium/durandal
|
lib/maya/init.c
|
/*
maya
Copyright (c) 2012-2013, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "clips.h"
#include "maya/libmaya.h"
void MayaDefinitions(void* theEnv) {
/* Insert declarations here */
#if FILE_SYSTEM_ROOTING
DefineFSOverrideFunctions(theEnv);
#endif
#if ARCHITECTURE_IDENTIFICATION
ArchitectureDetectionFunctionDefinitions(theEnv);
#endif /* ARCHITECTURE_IDENTIFICATION */
#if BINARY_LOGICAL_OPERATIONS
BinaryOperationsFunctionDefinitions(theEnv);
#endif /* BINARY_LOGICAL_OPERATIONS */
}
void MayaOptions(void* theEnv) {
#if MAYA_EXTENSIONS
EnvPrintRouter(theEnv,WDISPLAY,"Architecture identification is ");
#if ARCHITECTURE_IDENTIFICATION
EnvPrintRouter(theEnv,WDISPLAY,"ON\n");
#else
EnvPrintRouter(theEnv,WDISPLAY,"OFF\n");
#endif
EnvPrintRouter(theEnv,WDISPLAY, "Binary logical operators are ");
#if BINARY_LOGICAL_OPERATIONS
EnvPrintRouter(theEnv,WDISPLAY,"ON\n");
#else
EnvPrintRouter(theEnv,WDISPLAY,"OFF\n");
#endif
EnvPrintRouter(theEnv,WDISPLAY, "File system rooting is ");
#if FILE_SYSTEM_ROOTING
EnvPrintRouter(theEnv,WDISPLAY,"ON\n");
#else
EnvPrintRouter(theEnv,WDISPLAY,"OFF\n");
#endif
#endif /* MAYA_EXTENSIONS */
}
|
DrItanium/durandal
|
lib/electron/strngrtr.c
|
<reponame>DrItanium/durandal<filename>lib/electron/strngrtr.c
/*******************************************************/
/* "C" Language Integrated Production System */
/* */
/* CLIPS Version 6.30 08/16/14 */
/* */
/* STRING I/O ROUTER MODULE */
/*******************************************************/
/*************************************************************/
/* Purpose: I/O Router routines which allow strings to be */
/* used as input and output sources. */
/* */
/* Principal Programmer(s): */
/* <NAME> */
/* */
/* Contributing Programmer(s): */
/* <NAME> */
/* */
/* Revision History: */
/* */
/* 6.30: Used genstrcpy instead of strcpy. */
/* */
/* Removed conditional code for unsupported */
/* compilers/operating systems (IBM_MCW, */
/* MAC_MCW, and IBM_TBC). */
/* */
/* Changed integer type/precision. */
/* */
/* Added const qualifiers to remove C++ */
/* deprecation warnings. */
/* */
/*************************************************************/
#define _STRNGRTR_SOURCE_
#include <stdio.h>
#define _STDIO_INCLUDED_
#include <stdlib.h>
#include <string.h>
#include "setup.h"
#include "constant.h"
#include "envrnmnt.h"
#include "memalloc.h"
#include "router.h"
#include "sysdep.h"
#include "strngrtr.h"
#define READ_STRING 0
#define WRITE_STRING 1
/***************************************/
/* LOCAL INTERNAL FUNCTION DEFINITIONS */
/***************************************/
static int FindString(void *,const char *);
static int PrintString(void *,const char *,const char *);
static int GetcString(void *,const char *);
static int UngetcString(void *,int,const char *);
static struct stringRouter *FindStringRouter(void *,const char *);
static int CreateReadStringSource(void *,const char *,const char *,size_t,size_t);
static void DeallocateStringRouterData(void *);
/**********************************************************/
/* InitializeStringRouter: Initializes string I/O router. */
/**********************************************************/
globle void InitializeStringRouter(
void *theEnv)
{
AllocateEnvironmentData(theEnv,STRING_ROUTER_DATA,sizeof(struct stringRouterData),DeallocateStringRouterData);
EnvAddRouter(theEnv,"string",0,FindString,PrintString,GetcString,UngetcString,NULL);
}
/*******************************************/
/* DeallocateStringRouterData: Deallocates */
/* environment data for string routers. */
/*******************************************/
static void DeallocateStringRouterData(
void *theEnv)
{
struct stringRouter *tmpPtr, *nextPtr;
tmpPtr = StringRouterData(theEnv)->ListOfStringRouters;
while (tmpPtr != NULL)
{
nextPtr = tmpPtr->next;
rm(theEnv,(void *) tmpPtr->name,strlen(tmpPtr->name) + 1);
rtn_struct(theEnv,stringRouter,tmpPtr);
tmpPtr = nextPtr;
}
}
/*************************************************************/
/* FindString: Find routine for string router logical names. */
/*************************************************************/
static int FindString(
void *theEnv,
const char *fileid)
{
struct stringRouter *head;
head = StringRouterData(theEnv)->ListOfStringRouters;
while (head != NULL)
{
if (strcmp(head->name,fileid) == 0)
{ return(TRUE); }
head = head->next;
}
return(FALSE);
}
/**************************************************/
/* PrintString: Print routine for string routers. */
/**************************************************/
static int PrintString(
void *theEnv,
const char *logicalName,
const char *str)
{
struct stringRouter *head;
head = FindStringRouter(theEnv,logicalName);
if (head == NULL)
{
SystemError(theEnv,"ROUTER",3);
EnvExitRouter(theEnv,EXIT_FAILURE);
}
if (head->readWriteType != WRITE_STRING) return(1);
if (head->maximumPosition == 0) return(1);
if ((head->currentPosition + 1) >= head->maximumPosition) return(1);
genstrncpy(&head->writeString[head->currentPosition],
str,(STD_SIZE) (head->maximumPosition - head->currentPosition) - 1);
head->currentPosition += strlen(str);
return(1);
}
/************************************************/
/* GetcString: Getc routine for string routers. */
/************************************************/
static int GetcString(
void *theEnv,
const char *logicalName)
{
struct stringRouter *head;
int rc;
head = FindStringRouter(theEnv,logicalName);
if (head == NULL)
{
SystemError(theEnv,"ROUTER",1);
EnvExitRouter(theEnv,EXIT_FAILURE);
}
if (head->readWriteType != READ_STRING) return(EOF);
if (head->currentPosition >= head->maximumPosition)
{
head->currentPosition++;
return(EOF);
}
rc = (unsigned char) head->readString[head->currentPosition];
head->currentPosition++;
return(rc);
}
/****************************************************/
/* UngetcString: Ungetc routine for string routers. */
/****************************************************/
static int UngetcString(
void *theEnv,
int ch,
const char *logicalName)
{
struct stringRouter *head;
head = FindStringRouter(theEnv,logicalName);
if (head == NULL)
{
SystemError(theEnv,"ROUTER",2);
EnvExitRouter(theEnv,EXIT_FAILURE);
}
if (head->readWriteType != READ_STRING) return(0);
if (head->currentPosition > 0)
{ head->currentPosition--; }
return(1);
}
/************************************************/
/* OpenStringSource: Opens a new string router. */
/************************************************/
globle int OpenStringSource(
void *theEnv,
const char *name,
const char *str,
size_t currentPosition)
{
size_t maximumPosition;
if (str == NULL)
{
currentPosition = 0;
maximumPosition = 0;
}
else
{ maximumPosition = strlen(str); }
return(CreateReadStringSource(theEnv,name,str,currentPosition,maximumPosition));
}
/******************************************************/
/* OpenTextSource: Opens a new string router for text */
/* (which is not NULL terminated). */
/******************************************************/
globle int OpenTextSource(
void *theEnv,
const char *name,
const char *str,
size_t currentPosition,
size_t maximumPosition)
{
if (str == NULL)
{
currentPosition = 0;
maximumPosition = 0;
}
return(CreateReadStringSource(theEnv,name,str,currentPosition,maximumPosition));
}
/******************************************************************/
/* CreateReadStringSource: Creates a new string router for input. */
/******************************************************************/
static int CreateReadStringSource(
void *theEnv,
const char *name,
const char *str,
size_t currentPosition,
size_t maximumPosition)
{
struct stringRouter *newStringRouter;
char *theName;
if (FindStringRouter(theEnv,name) != NULL) return(0);
newStringRouter = get_struct(theEnv,stringRouter);
theName = (char *) gm1(theEnv,strlen(name) + 1);
genstrcpy(theName,name);
newStringRouter->name = theName;
newStringRouter->writeString = NULL;
newStringRouter->readString = str;
newStringRouter->currentPosition = currentPosition;
newStringRouter->readWriteType = READ_STRING;
newStringRouter->maximumPosition = maximumPosition;
newStringRouter->next = StringRouterData(theEnv)->ListOfStringRouters;
StringRouterData(theEnv)->ListOfStringRouters = newStringRouter;
return(1);
}
/**********************************************/
/* CloseStringSource: Closes a string router. */
/**********************************************/
globle int CloseStringSource(
void *theEnv,
const char *name)
{
struct stringRouter *head, *last;
last = NULL;
head = StringRouterData(theEnv)->ListOfStringRouters;
while (head != NULL)
{
if (strcmp(head->name,name) == 0)
{
if (last == NULL)
{
StringRouterData(theEnv)->ListOfStringRouters = head->next;
rm(theEnv,(void *) head->name,strlen(head->name) + 1);
rtn_struct(theEnv,stringRouter,head);
return(1);
}
else
{
last->next = head->next;
rm(theEnv,(void *) head->name,strlen(head->name) + 1);
rtn_struct(theEnv,stringRouter,head);
return(1);
}
}
last = head;
head = head->next;
}
return(0);
}
/******************************************************************/
/* OpenStringDestination: Opens a new string router for printing. */
/******************************************************************/
globle int OpenStringDestination(
void *theEnv,
const char *name,
char *str,
size_t maximumPosition)
{
struct stringRouter *newStringRouter;
char *theName;
if (FindStringRouter(theEnv,name) != NULL) return(0);
newStringRouter = get_struct(theEnv,stringRouter);
theName = (char *) gm1(theEnv,(int) strlen(name) + 1);
genstrcpy(theName,name);
newStringRouter->name = theName;
newStringRouter->readString = NULL;
newStringRouter->writeString = str;
newStringRouter->currentPosition = 0;
newStringRouter->readWriteType = WRITE_STRING;
newStringRouter->maximumPosition = maximumPosition;
newStringRouter->next = StringRouterData(theEnv)->ListOfStringRouters;
StringRouterData(theEnv)->ListOfStringRouters = newStringRouter;
return(1);
}
/***************************************************/
/* CloseStringDestination: Closes a string router. */
/***************************************************/
globle int CloseStringDestination(
void *theEnv,
const char *name)
{
return(CloseStringSource(theEnv,name));
}
/*******************************************************************/
/* FindStringRouter: Returns a pointer to the named string router. */
/*******************************************************************/
static struct stringRouter *FindStringRouter(
void *theEnv,
const char *name)
{
struct stringRouter *head;
head = StringRouterData(theEnv)->ListOfStringRouters;
while (head != NULL)
{
if (strcmp(head->name,name) == 0)
{ return(head); }
head = head->next;
}
return(NULL);
}
|
DrItanium/durandal
|
include/pipeline/clips/CLIPSPassGenerator.h
|
#ifndef _clips_pass_generator_h
#define _clips_pass_generator_h
#include "indirect/IndirectPassGenerator.h"
#include "pipeline/clips/CLIPSPassTemplates.h"
using namespace indirect;
namespace pipeline {
namespace clips {
/*
* Use this class to generate indirect passes representing a clips pass
* or a set of clips passes.
*/
class CLIPSPassGenerator : public IndirectPassGeneratorTemplate<
CLIPSModulePass,
CLIPSFunctionPass,
CLIPSBasicBlockPass,
CLIPSLoopPass,
CLIPSRegionPass,
CLIPSMachineFunctionPass,
CLIPSCallGraphSCCPass> {
};
}
}
#endif
|
DrItanium/durandal
|
include/pipeline/clips/CLIPSPassHeader.h
|
<filename>include/pipeline/clips/CLIPSPassHeader.h
#ifndef _clips_pass_header_h
#define _clips_pass_header_h
#include "indirect/IndirectPassHeader.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/SmallVector.h"
using namespace indirect;
namespace pipeline {
namespace clips {
/* An extension of the IndirectPassHeader that adds some extra logic
* specific to dynamic CLIPS passes.
*/
class CLIPSPassHeader : public IndirectPassHeader {
private:
std::string passes;
bool needRegions;
bool needLoops;
public:
CLIPSPassHeader();
bool needsRegions();
void setNeedsRegions(bool v);
bool needsLoops();
void setNeedsLoops(bool v);
const char* getPasses();
void setPasses(const char* passes);
};
}
}
#endif
|
DrItanium/durandal
|
lib/electron/classexm.c
|
/*******************************************************/
/* "C" Language Integrated Production System */
/* */
/* CLIPS Version 6.30 08/16/14 */
/* */
/* CLASS EXAMINATION MODULE */
/*******************************************************/
/**************************************************************/
/* Purpose: Class browsing and examination commands */
/* */
/* Principal Programmer(s): */
/* <NAME> */
/* */
/* Contributing Programmer(s): */
/* */
/* Revision History: */
/* */
/* 6.23: Correction for FalseSymbol/TrueSymbol. DR0859 */
/* */
/* Modified the slot-writablep function to return */
/* FALSE for slots having initialize-only access. */
/* DR0860 */
/* */
/* 6.24: Added allowed-classes slot facet. */
/* */
/* Converted INSTANCE_PATTERN_MATCHING to */
/* DEFRULE_CONSTRUCT. */
/* */
/* Renamed BOOLEAN macro type to intBool. */
/* */
/* The slot-default-value function crashes when no */
/* default exists for a slot (the ?NONE value was */
/* specified). DR0870 */
/* */
/* 6.30: Used %zd for printing size_t arguments. */
/* */
/* Added EnvSlotDefaultP function. */
/* */
/* Borland C (IBM_TBC) and Metrowerks CodeWarrior */
/* (MAC_MCW, IBM_MCW) are no longer supported. */
/* */
/* Used gensprintf and genstrcat instead of */
/* sprintf and strcat. */
/* */
/* Added const qualifiers to remove C++ */
/* deprecation warnings. */
/* */
/* Converted API macros to function calls. */
/* */
/**************************************************************/
/* =========================================
*****************************************
EXTERNAL DEFINITIONS
=========================================
***************************************** */
#include "setup.h"
#if OBJECT_SYSTEM
#include <string.h>
#include "argacces.h"
#include "classcom.h"
#include "classfun.h"
#include "classini.h"
#include "envrnmnt.h"
#include "insfun.h"
#include "memalloc.h"
#include "msgcom.h"
#include "msgfun.h"
#include "router.h"
#include "strngrtr.h"
#include "sysdep.h"
#define _CLASSEXM_SOURCE_
#include "classexm.h"
/* =========================================
*****************************************
INTERNALLY VISIBLE FUNCTION HEADERS
=========================================
***************************************** */
static int CheckTwoClasses(void *,const char *,DEFCLASS **,DEFCLASS **);
static SLOT_DESC *CheckSlotExists(void *,const char *,DEFCLASS **,intBool,intBool);
static SLOT_DESC *LookupSlot(void *,DEFCLASS *,const char *,intBool);
#if DEBUGGING_FUNCTIONS
static DEFCLASS *CheckClass(void *,const char *,const char *);
static const char *GetClassNameArgument(void *,const char *);
static void PrintClassBrowse(void *,const char *,DEFCLASS *,long);
static void DisplaySeparator(void *,const char *,char *,int,int);
static void DisplaySlotBasicInfo(void *,const char *,const char *,const char *,char *,DEFCLASS *);
static intBool PrintSlotSources(void *,const char *,SYMBOL_HN *,PACKED_CLASS_LINKS *,long,int);
static void DisplaySlotConstraintInfo(void *,const char *,const char *,char *,unsigned,DEFCLASS *);
static const char *ConstraintCode(CONSTRAINT_RECORD *,unsigned,unsigned);
#endif
/* =========================================
*****************************************
EXTERNALLY VISIBLE FUNCTIONS
=========================================
***************************************** */
#if DEBUGGING_FUNCTIONS
/****************************************************************
NAME : BrowseClassesCommand
DESCRIPTION : Displays a "graph" of the class hierarchy
INPUTS : None
RETURNS : Nothing useful
SIDE EFFECTS : None
NOTES : Syntax : (browse-classes [<class>])
****************************************************************/
globle void BrowseClassesCommand(
void *theEnv)
{
register DEFCLASS *cls;
if (EnvRtnArgCount(theEnv) == 0)
/* ================================================
Find the OBJECT root class (has no superclasses)
================================================ */
cls = LookupDefclassByMdlOrScope(theEnv,OBJECT_TYPE_NAME);
else
{
DATA_OBJECT tmp;
if (EnvArgTypeCheck(theEnv,"browse-classes",1,SYMBOL,&tmp) == FALSE)
return;
cls = LookupDefclassByMdlOrScope(theEnv,DOToString(tmp));
if (cls == NULL)
{
ClassExistError(theEnv,"browse-classes",DOToString(tmp));
return;
}
}
EnvBrowseClasses(theEnv,WDISPLAY,(void *) cls);
}
/****************************************************************
NAME : EnvBrowseClasses
DESCRIPTION : Displays a "graph" of the class hierarchy
INPUTS : 1) The logical name of the output
2) Class pointer
RETURNS : Nothing useful
SIDE EFFECTS : None
NOTES : None
****************************************************************/
globle void EnvBrowseClasses(
void *theEnv,
const char *logicalName,
void *clsptr)
{
PrintClassBrowse(theEnv,logicalName,(DEFCLASS *) clsptr,0);
}
/****************************************************************
NAME : DescribeClassCommand
DESCRIPTION : Displays direct superclasses and
subclasses and the entire precedence
list for a class
INPUTS : None
RETURNS : Nothing useful
SIDE EFFECTS : None
NOTES : Syntax : (describe-class <class-name>)
****************************************************************/
globle void DescribeClassCommand(
void *theEnv)
{
const char *cname;
DEFCLASS *cls;
cname = GetClassNameArgument(theEnv,"describe-class");
if (cname == NULL)
return;
cls = CheckClass(theEnv,"describe-class",cname);
if (cls == NULL)
return;
EnvDescribeClass(theEnv,WDISPLAY,(void *) cls);
}
/******************************************************
NAME : EnvDescribeClass
DESCRIPTION : Displays direct superclasses and
subclasses and the entire precedence
list for a class
INPUTS : 1) The logical name of the output
2) Class pointer
RETURNS : Nothing useful
SIDE EFFECTS : None
NOTES : None
******************************************************/
globle void EnvDescribeClass(
void *theEnv,
const char *logicalName,
void *clsptr)
{
DEFCLASS *cls;
char buf[83],
slotNamePrintFormat[12],
overrideMessagePrintFormat[12];
int messageBanner;
long i;
size_t slotNameLength, maxSlotNameLength;
size_t overrideMessageLength, maxOverrideMessageLength;
cls = (DEFCLASS *) clsptr;
DisplaySeparator(theEnv,logicalName,buf,82,'=');
DisplaySeparator(theEnv,logicalName,buf,82,'*');
if (cls->abstract)
EnvPrintRouter(theEnv,logicalName,"Abstract: direct instances of this class cannot be created.\n\n");
else
{
EnvPrintRouter(theEnv,logicalName,"Concrete: direct instances of this class can be created.\n");
#if DEFRULE_CONSTRUCT
if (cls->reactive)
EnvPrintRouter(theEnv,logicalName,"Reactive: direct instances of this class can match defrule patterns.\n\n");
else
EnvPrintRouter(theEnv,logicalName,"Non-reactive: direct instances of this class cannot match defrule patterns.\n\n");
#else
EnvPrintRouter(theEnv,logicalName,"\n");
#endif
}
PrintPackedClassLinks(theEnv,logicalName,"Direct Superclasses:",&cls->directSuperclasses);
PrintPackedClassLinks(theEnv,logicalName,"Inheritance Precedence:",&cls->allSuperclasses);
PrintPackedClassLinks(theEnv,logicalName,"Direct Subclasses:",&cls->directSubclasses);
if (cls->instanceTemplate != NULL)
{
DisplaySeparator(theEnv,logicalName,buf,82,'-');
maxSlotNameLength = 5;
maxOverrideMessageLength = 8;
for (i = 0 ; i < cls->instanceSlotCount ; i++)
{
slotNameLength = strlen(ValueToString(cls->instanceTemplate[i]->slotName->name));
if (slotNameLength > maxSlotNameLength)
maxSlotNameLength = slotNameLength;
if (cls->instanceTemplate[i]->noWrite == 0)
{
overrideMessageLength =
strlen(ValueToString(cls->instanceTemplate[i]->overrideMessage));
if (overrideMessageLength > maxOverrideMessageLength)
maxOverrideMessageLength = overrideMessageLength;
}
}
if (maxSlotNameLength > 16)
maxSlotNameLength = 16;
if (maxOverrideMessageLength > 12)
maxOverrideMessageLength = 12;
#if WIN_MVC
gensprintf(slotNamePrintFormat,"%%-%Id.%Ids : ",maxSlotNameLength,maxSlotNameLength);
gensprintf(overrideMessagePrintFormat,"%%-%Id.%Ids ",maxOverrideMessageLength,
maxOverrideMessageLength);
#elif WIN_GCC
gensprintf(slotNamePrintFormat,"%%-%ld.%lds : ",(long) maxSlotNameLength,(long) maxSlotNameLength);
gensprintf(overrideMessagePrintFormat,"%%-%ld.%lds ",(long) maxOverrideMessageLength,
(long) maxOverrideMessageLength);
#else
gensprintf(slotNamePrintFormat,"%%-%zd.%zds : ",maxSlotNameLength,maxSlotNameLength);
gensprintf(overrideMessagePrintFormat,"%%-%zd.%zds ",maxOverrideMessageLength,
maxOverrideMessageLength);
#endif
DisplaySlotBasicInfo(theEnv,logicalName,slotNamePrintFormat,overrideMessagePrintFormat,buf,cls);
EnvPrintRouter(theEnv,logicalName,"\nConstraint information for slots:\n\n");
DisplaySlotConstraintInfo(theEnv,logicalName,slotNamePrintFormat,buf,82,cls);
}
if (cls->handlerCount > 0)
messageBanner = TRUE;
else
{
messageBanner = FALSE;
for (i = 1 ; i < cls->allSuperclasses.classCount ; i++)
if (cls->allSuperclasses.classArray[i]->handlerCount > 0)
{
messageBanner = TRUE;
break;
}
}
if (messageBanner)
{
DisplaySeparator(theEnv,logicalName,buf,82,'-');
EnvPrintRouter(theEnv,logicalName,"Recognized message-handlers:\n");
DisplayHandlersInLinks(theEnv,logicalName,&cls->allSuperclasses,0);
}
DisplaySeparator(theEnv,logicalName,buf,82,'*');
DisplaySeparator(theEnv,logicalName,buf,82,'=');
}
#endif /* DEBUGGING_FUNCTIONS */
/**********************************************************
NAME : GetCreateAccessorString
DESCRIPTION : Gets a string describing which
accessors are implicitly created
for a slot: R, W, RW or NIL
INPUTS : The slot descriptor
RETURNS : The string description
SIDE EFFECTS : None
NOTES : Used by (describe-class) and (slot-facets)
**********************************************************/
globle const char *GetCreateAccessorString(
void *vsd)
{
SLOT_DESC *sd = (SLOT_DESC *) vsd;
if (sd->createReadAccessor && sd->createWriteAccessor)
return("RW");
if ((sd->createReadAccessor == 0) && (sd->createWriteAccessor == 0))
return("NIL");
else
{
if (sd->createReadAccessor) return "R";
else return "W";
}
}
/************************************************************
NAME : GetDefclassModuleCommand
DESCRIPTION : Determines to which module a class belongs
INPUTS : None
RETURNS : The symbolic name of the module
SIDE EFFECTS : None
NOTES : H/L Syntax: (defclass-module <class-name>)
************************************************************/
globle void *GetDefclassModuleCommand(
void *theEnv)
{
return(GetConstructModuleCommand(theEnv,"defclass-module",DefclassData(theEnv)->DefclassConstruct));
}
/*********************************************************************
NAME : SuperclassPCommand
DESCRIPTION : Determines if a class is a superclass of another
INPUTS : None
RETURNS : TRUE if class-1 is a superclass of class-2
SIDE EFFECTS : None
NOTES : H/L Syntax : (superclassp <class-1> <class-2>)
*********************************************************************/
globle intBool SuperclassPCommand(
void *theEnv)
{
DEFCLASS *c1,*c2;
if (CheckTwoClasses(theEnv,"superclassp",&c1,&c2) == FALSE)
return(FALSE);
return(EnvSuperclassP(theEnv,(void *) c1,(void *) c2));
}
/***************************************************
NAME : EnvSuperclassP
DESCRIPTION : Determines if the first class is
a superclass of the other
INPUTS : 1) First class
2) Second class
RETURNS : TRUE if first class is a
superclass of the first,
FALSE otherwise
SIDE EFFECTS : None
NOTES : None
***************************************************/
globle intBool EnvSuperclassP(
void *theEnv,
void *firstClass,
void *secondClass)
{
return(HasSuperclass((DEFCLASS *) secondClass,(DEFCLASS *) firstClass));
}
/*********************************************************************
NAME : SubclassPCommand
DESCRIPTION : Determines if a class is a subclass of another
INPUTS : None
RETURNS : TRUE if class-1 is a subclass of class-2
SIDE EFFECTS : None
NOTES : H/L Syntax : (subclassp <class-1> <class-2>)
*********************************************************************/
globle intBool SubclassPCommand(
void *theEnv)
{
DEFCLASS *c1,*c2;
if (CheckTwoClasses(theEnv,"subclassp",&c1,&c2) == FALSE)
return(FALSE);
return(EnvSubclassP(theEnv,(void *) c1,(void *) c2));
}
/***************************************************
NAME : EnvSubclassP
DESCRIPTION : Determines if the first class is
a subclass of the other
INPUTS : 1) First class
2) Second class
RETURNS : TRUE if first class is a
subclass of the first,
FALSE otherwise
SIDE EFFECTS : None
NOTES : None
***************************************************/
globle intBool EnvSubclassP(
void *theEnv,
void *firstClass,
void *secondClass)
{
return(HasSuperclass((DEFCLASS *) firstClass,(DEFCLASS *) secondClass));
}
/*********************************************************************
NAME : SlotExistPCommand
DESCRIPTION : Determines if a slot is present in a class
INPUTS : None
RETURNS : TRUE if the slot exists, FALSE otherwise
SIDE EFFECTS : None
NOTES : H/L Syntax : (slot-existp <class> <slot> [inherit])
*********************************************************************/
globle int SlotExistPCommand(
void *theEnv)
{
DEFCLASS *cls;
SLOT_DESC *sd;
int inheritFlag = FALSE;
DATA_OBJECT dobj;
sd = CheckSlotExists(theEnv,"slot-existp",&cls,FALSE,TRUE);
if (sd == NULL)
return(FALSE);
if (EnvRtnArgCount(theEnv) == 3)
{
if (EnvArgTypeCheck(theEnv,"slot-existp",3,SYMBOL,&dobj) == FALSE)
return(FALSE);
if (strcmp(DOToString(dobj),"inherit") != 0)
{
ExpectedTypeError1(theEnv,"slot-existp",3,"keyword \"inherit\"");
SetEvaluationError(theEnv,TRUE);
return(FALSE);
}
inheritFlag = TRUE;
}
return((sd->cls == cls) ? TRUE : inheritFlag);
}
/***************************************************
NAME : EnvSlotExistP
DESCRIPTION : Determines if a slot exists
INPUTS : 1) The class
2) The slot name
3) A flag indicating if the slot
can be inherited or not
RETURNS : TRUE if slot exists,
FALSE otherwise
SIDE EFFECTS : None
NOTES : None
***************************************************/
globle intBool EnvSlotExistP(
void *theEnv,
void *theDefclass,
const char *slotName,
intBool inheritFlag)
{
return((LookupSlot(theEnv,(DEFCLASS *) theDefclass,slotName,inheritFlag) != NULL)
? TRUE : FALSE);
}
/************************************************************************************
NAME : MessageHandlerExistPCommand
DESCRIPTION : Determines if a message-handler is present in a class
INPUTS : None
RETURNS : TRUE if the message header is present, FALSE otherwise
SIDE EFFECTS : None
NOTES : H/L Syntax : (message-handler-existp <class> <hnd> [<type>])
************************************************************************************/
globle int MessageHandlerExistPCommand(
void *theEnv)
{
DEFCLASS *cls;
SYMBOL_HN *mname;
DATA_OBJECT temp;
unsigned mtype = MPRIMARY;
if (EnvArgTypeCheck(theEnv,"message-handler-existp",1,SYMBOL,&temp) == FALSE)
return(FALSE);
cls = LookupDefclassByMdlOrScope(theEnv,DOToString(temp));
if (cls == NULL)
{
ClassExistError(theEnv,"message-handler-existp",DOToString(temp));
return(FALSE);
}
if (EnvArgTypeCheck(theEnv,"message-handler-existp",2,SYMBOL,&temp) == FALSE)
return(FALSE);
mname = (SYMBOL_HN *) GetValue(temp);
if (EnvRtnArgCount(theEnv) == 3)
{
if (EnvArgTypeCheck(theEnv,"message-handler-existp",3,SYMBOL,&temp) == FALSE)
return(FALSE);
mtype = HandlerType(theEnv,"message-handler-existp",DOToString(temp));
if (mtype == MERROR)
{
SetEvaluationError(theEnv,TRUE);
return(FALSE);
}
}
if (FindHandlerByAddress(cls,mname,mtype) != NULL)
return(TRUE);
return(FALSE);
}
/**********************************************************************
NAME : SlotWritablePCommand
DESCRIPTION : Determines if an existing slot can be written to
INPUTS : None
RETURNS : TRUE if the slot is writable, FALSE otherwise
SIDE EFFECTS : None
NOTES : H/L Syntax : (slot-writablep <class> <slot>)
**********************************************************************/
globle intBool SlotWritablePCommand(
void *theEnv)
{
DEFCLASS *theDefclass;
SLOT_DESC *sd;
sd = CheckSlotExists(theEnv,"slot-writablep",&theDefclass,TRUE,TRUE);
if (sd == NULL)
return(FALSE);
return((sd->noWrite || sd->initializeOnly) ? FALSE : TRUE);
}
/***************************************************
NAME : EnvSlotWritableP
DESCRIPTION : Determines if a slot is writable
INPUTS : 1) The class
2) The slot name
RETURNS : TRUE if slot is writable,
FALSE otherwise
SIDE EFFECTS : None
NOTES : None
***************************************************/
globle intBool EnvSlotWritableP(
void *theEnv,
void *theDefclass,
const char *slotName)
{
SLOT_DESC *sd;
if ((sd = LookupSlot(theEnv,(DEFCLASS *) theDefclass,slotName,TRUE)) == NULL)
return(FALSE);
return((sd->noWrite || sd->initializeOnly) ? FALSE : TRUE);
}
/**********************************************************************
NAME : SlotInitablePCommand
DESCRIPTION : Determines if an existing slot can be initialized
via an init message-handler or slot-override
INPUTS : None
RETURNS : TRUE if the slot is writable, FALSE otherwise
SIDE EFFECTS : None
NOTES : H/L Syntax : (slot-initablep <class> <slot>)
**********************************************************************/
globle intBool SlotInitablePCommand(
void *theEnv)
{
DEFCLASS *theDefclass;
SLOT_DESC *sd;
sd = CheckSlotExists(theEnv,"slot-initablep",&theDefclass,TRUE,TRUE);
if (sd == NULL)
return(FALSE);
return((sd->noWrite && (sd->initializeOnly == 0)) ? FALSE : TRUE);
}
/***************************************************
NAME : EnvSlotInitableP
DESCRIPTION : Determines if a slot is initable
INPUTS : 1) The class
2) The slot name
RETURNS : TRUE if slot is initable,
FALSE otherwise
SIDE EFFECTS : None
NOTES : None
***************************************************/
globle intBool EnvSlotInitableP(
void *theEnv,
void *theDefclass,
const char *slotName)
{
SLOT_DESC *sd;
if ((sd = LookupSlot(theEnv,(DEFCLASS *) theDefclass,slotName,TRUE)) == NULL)
return(FALSE);
return((sd->noWrite && (sd->initializeOnly == 0)) ? FALSE : TRUE);
}
/**********************************************************************
NAME : SlotPublicPCommand
DESCRIPTION : Determines if an existing slot is publicly visible
for direct reference by subclasses
INPUTS : None
RETURNS : TRUE if the slot is public, FALSE otherwise
SIDE EFFECTS : None
NOTES : H/L Syntax : (slot-publicp <class> <slot>)
**********************************************************************/
globle intBool SlotPublicPCommand(
void *theEnv)
{
DEFCLASS *theDefclass;
SLOT_DESC *sd;
sd = CheckSlotExists(theEnv,"slot-publicp",&theDefclass,TRUE,FALSE);
if (sd == NULL)
return(FALSE);
return(sd->publicVisibility ? TRUE : FALSE);
}
/***************************************************
NAME : EnvSlotPublicP
DESCRIPTION : Determines if a slot is public
INPUTS : 1) The class
2) The slot name
RETURNS : TRUE if slot is public,
FALSE otherwise
SIDE EFFECTS : None
NOTES : None
***************************************************/
globle intBool EnvSlotPublicP(
void *theEnv,
void *theDefclass,
const char *slotName)
{
SLOT_DESC *sd;
if ((sd = LookupSlot(theEnv,(DEFCLASS *) theDefclass,slotName,FALSE)) == NULL)
return(FALSE);
return(sd->publicVisibility ? TRUE : FALSE);
}
/***************************************************
NAME : EnvSlotDefaultP
DESCRIPTION : Determines if a slot has a default value
INPUTS : 1) The class
2) The slot name
RETURNS : TRUE if slot is public,
FALSE otherwise
SIDE EFFECTS : None
NOTES : None
***************************************************/
globle int EnvSlotDefaultP(
void *theEnv,
void *theDefclass,
const char *slotName)
{
SLOT_DESC *sd;
if ((sd = LookupSlot(theEnv,(DEFCLASS *) theDefclass,slotName,FALSE)) == NULL)
return(NO_DEFAULT);
if (sd->noDefault)
{ return(NO_DEFAULT); }
else if (sd->dynamicDefault)
{ return(DYNAMIC_DEFAULT); }
return(STATIC_DEFAULT);
}
/**********************************************************************
NAME : SlotDirectAccessPCommand
DESCRIPTION : Determines if an existing slot can be directly
referenced by the class - i.e., if the slot is
private, is the slot defined in the class
INPUTS : None
RETURNS : TRUE if the slot is private,
FALSE otherwise
SIDE EFFECTS : None
NOTES : H/L Syntax : (slot-direct-accessp <class> <slot>)
**********************************************************************/
globle intBool SlotDirectAccessPCommand(
void *theEnv)
{
DEFCLASS *theDefclass;
SLOT_DESC *sd;
sd = CheckSlotExists(theEnv,"slot-direct-accessp",&theDefclass,TRUE,TRUE);
if (sd == NULL)
return(FALSE);
return((sd->publicVisibility || (sd->cls == theDefclass)) ? TRUE : FALSE);
}
/***************************************************
NAME : EnvSlotDirectAccessP
DESCRIPTION : Determines if a slot is directly
accessible from message-handlers
on class
INPUTS : 1) The class
2) The slot name
RETURNS : TRUE if slot is directly
accessible, FALSE otherwise
SIDE EFFECTS : None
NOTES : None
***************************************************/
globle intBool EnvSlotDirectAccessP(
void *theEnv,
void *theDefclass,
const char *slotName)
{
SLOT_DESC *sd;
if ((sd = LookupSlot(theEnv,(DEFCLASS *) theDefclass,slotName,TRUE)) == NULL)
return(FALSE);
return((sd->publicVisibility || (sd->cls == (DEFCLASS *) theDefclass)) ?
TRUE : FALSE);
}
/**********************************************************************
NAME : SlotDefaultValueCommand
DESCRIPTION : Determines the default avlue for the specified slot
of the specified class
INPUTS : None
RETURNS : Nothing useful
SIDE EFFECTS : None
NOTES : H/L Syntax : (slot-default-value <class> <slot>)
**********************************************************************/
globle void SlotDefaultValueCommand(
void *theEnv,
DATA_OBJECT_PTR theValue)
{
DEFCLASS *theDefclass;
SLOT_DESC *sd;
SetpType(theValue,SYMBOL);
SetpValue(theValue,EnvFalseSymbol(theEnv));
sd = CheckSlotExists(theEnv,"slot-default-value",&theDefclass,TRUE,TRUE);
if (sd == NULL)
return;
if (sd->noDefault)
{
SetpType(theValue,SYMBOL);
SetpValue(theValue,EnvAddSymbol(theEnv,"?NONE"));
return;
}
if (sd->dynamicDefault)
EvaluateAndStoreInDataObject(theEnv,(int) sd->multiple,
(EXPRESSION *) sd->defaultValue,
theValue,TRUE);
else
GenCopyMemory(DATA_OBJECT,1,theValue,sd->defaultValue);
}
/*********************************************************
NAME : SlotDefaultValue
DESCRIPTION : Determines the default value for
the specified slot of the specified class
INPUTS : 1) The class
2) The slot name
RETURNS : TRUE if slot default value is set,
FALSE otherwise
SIDE EFFECTS : Slot default value evaluated - dynamic
defaults will cause any side effects
NOTES : None
*********************************************************/
globle intBool EnvSlotDefaultValue(
void *theEnv,
void *theDefclass,
const char *slotName,
DATA_OBJECT_PTR theValue)
{
SLOT_DESC *sd;
SetpType(theValue,SYMBOL);
SetpValue(theValue,EnvFalseSymbol(theEnv));
if ((sd = LookupSlot(theEnv,(DEFCLASS *) theDefclass,slotName,TRUE)) == NULL)
return(FALSE);
if (sd->noDefault)
{
SetpType(theValue,SYMBOL);
SetpValue(theValue,EnvAddSymbol(theEnv,"?NONE"));
return(TRUE);
}
if (sd->dynamicDefault)
return(EvaluateAndStoreInDataObject(theEnv,(int) sd->multiple,
(EXPRESSION *) sd->defaultValue,
theValue,TRUE));
GenCopyMemory(DATA_OBJECT,1,theValue,sd->defaultValue);
return(TRUE);
}
/********************************************************
NAME : ClassExistPCommand
DESCRIPTION : Determines if a class exists
INPUTS : None
RETURNS : TRUE if class exists, FALSE otherwise
SIDE EFFECTS : None
NOTES : H/L Syntax : (class-existp <arg>)
********************************************************/
globle intBool ClassExistPCommand(
void *theEnv)
{
DATA_OBJECT temp;
if (EnvArgTypeCheck(theEnv,"class-existp",1,SYMBOL,&temp) == FALSE)
return(FALSE);
return((LookupDefclassByMdlOrScope(theEnv,DOToString(temp)) != NULL) ? TRUE : FALSE);
}
/* =========================================
*****************************************
INTERNALLY VISIBLE FUNCTIONS
=========================================
***************************************** */
/******************************************************
NAME : CheckTwoClasses
DESCRIPTION : Checks for exactly two class arguments
for a H/L function
INPUTS : 1) The function name
2) Caller's buffer for first class
3) Caller's buffer for second class
RETURNS : TRUE if both found, FALSE otherwise
SIDE EFFECTS : Caller's buffers set
NOTES : Assumes exactly 2 arguments
******************************************************/
static int CheckTwoClasses(
void *theEnv,
const char *func,
DEFCLASS **c1,
DEFCLASS **c2)
{
DATA_OBJECT temp;
if (EnvArgTypeCheck(theEnv,func,1,SYMBOL,&temp) == FALSE)
return(FALSE);
*c1 = LookupDefclassByMdlOrScope(theEnv,DOToString(temp));
if (*c1 == NULL)
{
ClassExistError(theEnv,func,ValueToString(temp.value));
return(FALSE);
}
if (EnvArgTypeCheck(theEnv,func,2,SYMBOL,&temp) == FALSE)
return(FALSE);
*c2 = LookupDefclassByMdlOrScope(theEnv,DOToString(temp));
if (*c2 == NULL)
{
ClassExistError(theEnv,func,ValueToString(temp.value));
return(FALSE);
}
return(TRUE);
}
/***************************************************
NAME : CheckSlotExists
DESCRIPTION : Checks first two arguments of
a function for a valid class
and (inherited) slot
INPUTS : 1) The name of the function
2) A buffer to hold the found class
3) A flag indicating whether the
non-existence of the slot should
be an error
4) A flag indicating if the slot
can be inherited or not
RETURNS : NULL if slot not found, slot
descriptor otherwise
SIDE EFFECTS : Class buffer set if no errors,
NULL on errors
NOTES : None
***************************************************/
static SLOT_DESC *CheckSlotExists(
void *theEnv,
const char *func,
DEFCLASS **classBuffer,
intBool existsErrorFlag,
intBool inheritFlag)
{
SYMBOL_HN *ssym;
int slotIndex;
SLOT_DESC *sd;
ssym = CheckClassAndSlot(theEnv,func,classBuffer);
if (ssym == NULL)
return(NULL);
slotIndex = FindInstanceTemplateSlot(theEnv,*classBuffer,ssym);
if (slotIndex == -1)
{
if (existsErrorFlag)
{
SlotExistError(theEnv,ValueToString(ssym),func);
SetEvaluationError(theEnv,TRUE);
}
return(NULL);
}
sd = (*classBuffer)->instanceTemplate[slotIndex];
if ((sd->cls == *classBuffer) || inheritFlag)
return(sd);
PrintErrorID(theEnv,"CLASSEXM",1,FALSE);
EnvPrintRouter(theEnv,WERROR,"Inherited slot ");
EnvPrintRouter(theEnv,WERROR,ValueToString(ssym));
EnvPrintRouter(theEnv,WERROR," from class ");
PrintClassName(theEnv,WERROR,sd->cls,FALSE);
EnvPrintRouter(theEnv,WERROR," is not valid for function ");
EnvPrintRouter(theEnv,WERROR,func);
EnvPrintRouter(theEnv,WERROR,"\n");
SetEvaluationError(theEnv,TRUE);
return(NULL);
}
/***************************************************
NAME : LookupSlot
DESCRIPTION : Finds a slot in a class
INPUTS : 1) The class
2) The slot name
3) A flag indicating if inherited
slots are OK or not
RETURNS : The slot descriptor address, or
NULL if not found
SIDE EFFECTS : None
NOTES : None
***************************************************/
static SLOT_DESC *LookupSlot(
void *theEnv,
DEFCLASS *theDefclass,
const char *slotName,
intBool inheritFlag)
{
SYMBOL_HN *slotSymbol;
int slotIndex;
SLOT_DESC *sd;
slotSymbol = FindSymbolHN(theEnv,slotName);
if (slotSymbol == NULL)
return(NULL);
slotIndex = FindInstanceTemplateSlot(theEnv,theDefclass,slotSymbol);
if (slotIndex == -1)
return(NULL);
sd = theDefclass->instanceTemplate[slotIndex];
if ((sd->cls != theDefclass) && (inheritFlag == FALSE))
return(NULL);
return(sd);
}
#if DEBUGGING_FUNCTIONS
/*****************************************************
NAME : CheckClass
DESCRIPTION : Used for to check class name for
class accessor functions such
as ppdefclass and undefclass
INPUTS : 1) The name of the H/L function
2) Name of the class
RETURNS : The class address,
or NULL if ther was an error
SIDE EFFECTS : None
NOTES : None
******************************************************/
static DEFCLASS *CheckClass(
void *theEnv,
const char *func,
const char *cname)
{
DEFCLASS *cls;
cls = LookupDefclassByMdlOrScope(theEnv,cname);
if (cls == NULL)
ClassExistError(theEnv,func,cname);
return(cls);
}
/*********************************************************
NAME : GetClassNameArgument
DESCRIPTION : Gets a class name-string
INPUTS : Calling function name
RETURNS : Class name (NULL on errors)
SIDE EFFECTS : None
NOTES : Assumes only 1 argument
*********************************************************/
static const char *GetClassNameArgument(
void *theEnv,
const char *fname)
{
DATA_OBJECT temp;
if (EnvArgTypeCheck(theEnv,fname,1,SYMBOL,&temp) == FALSE)
return(NULL);
return(DOToString(temp));
}
/****************************************************************
NAME : PrintClassBrowse
DESCRIPTION : Displays a "graph" of class and subclasses
INPUTS : 1) The logical name of the output
2) The class address
3) The depth of the graph
RETURNS : Nothing useful
SIDE EFFECTS : None
NOTES : None
****************************************************************/
static void PrintClassBrowse(
void *theEnv,
const char *logicalName,
DEFCLASS *cls,
long depth)
{
long i;
for (i = 0 ; i < depth ; i++)
EnvPrintRouter(theEnv,logicalName," ");
EnvPrintRouter(theEnv,logicalName,EnvGetDefclassName(theEnv,(void *) cls));
if (cls->directSuperclasses.classCount > 1)
EnvPrintRouter(theEnv,logicalName," *");
EnvPrintRouter(theEnv,logicalName,"\n");
for (i = 0 ;i < cls->directSubclasses.classCount ; i++)
PrintClassBrowse(theEnv,logicalName,cls->directSubclasses.classArray[i],depth+1);
}
/*********************************************************
NAME : DisplaySeparator
DESCRIPTION : Prints a separator line for DescribeClass
INPUTS : 1) The logical name of the output
2) The buffer to use for the line
3) The buffer size
4) The character to use
RETURNS : Nothing useful
SIDE EFFECTS : Buffer overwritten and displayed
NOTES : None
*********************************************************/
static void DisplaySeparator(
void *theEnv,
const char *logicalName,
char *buf,
int maxlen,
int sepchar)
{
register int i;
for (i = 0 ; i < maxlen-2 ; i++)
buf[i] = (char) sepchar;
buf[i++] = '\n';
buf[i] = '\0';
EnvPrintRouter(theEnv,logicalName,buf);
}
/*************************************************************
NAME : DisplaySlotBasicInfo
DESCRIPTION : Displays a table summary of basic
facets for the slots of a class
including:
single/multiple
default/no-default/default-dynamic
inherit/no-inherit
read-write/initialize-only/read-only
local/shared
composite/exclusive
reactive/non-reactive
public/private
create-accessor read/write
override-message
The function also displays the source
class(es) for the facets
INPUTS : 1) The logical name of the output
2) A format string for use in sprintf
(for printing slot names)
3) A format string for use in sprintf
(for printing slot override message names)
4) A buffer to store the display in
5) A pointer to the class
RETURNS : Nothing useful
SIDE EFFECTS : Buffer written to and displayed
NOTES : None
*************************************************************/
static void DisplaySlotBasicInfo(
void *theEnv,
const char *logicalName,
const char *slotNamePrintFormat,
const char *overrideMessagePrintFormat,
char *buf,
DEFCLASS *cls)
{
long i;
SLOT_DESC *sp;
const char *createString;
gensprintf(buf,slotNamePrintFormat,"SLOTS");
#if DEFRULE_CONSTRUCT
genstrcat(buf,"FLD DEF PRP ACC STO MCH SRC VIS CRT ");
#else
genstrcat(buf,"FLD DEF PRP ACC STO SRC VIS CRT ");
#endif
EnvPrintRouter(theEnv,logicalName,buf);
gensprintf(buf,overrideMessagePrintFormat,"OVRD-MSG");
EnvPrintRouter(theEnv,logicalName,buf);
EnvPrintRouter(theEnv,logicalName,"SOURCE(S)\n");
for (i = 0 ; i < cls->instanceSlotCount ; i++)
{
sp = cls->instanceTemplate[i];
gensprintf(buf,slotNamePrintFormat,ValueToString(sp->slotName->name));
genstrcat(buf,sp->multiple ? "MLT " : "SGL ");
if (sp->noDefault)
genstrcat(buf,"NIL ");
else
genstrcat(buf,sp->dynamicDefault ? "DYN " : "STC ");
genstrcat(buf,sp->noInherit ? "NIL " : "INH ");
if (sp->initializeOnly)
genstrcat(buf,"INT ");
else if (sp->noWrite)
genstrcat(buf," R ");
else
genstrcat(buf,"RW ");
genstrcat(buf,sp->shared ? "SHR " : "LCL ");
#if DEFRULE_CONSTRUCT
genstrcat(buf,sp->reactive ? "RCT " : "NIL ");
#endif
genstrcat(buf,sp->composite ? "CMP " : "EXC ");
genstrcat(buf,sp->publicVisibility ? "PUB " : "PRV ");
createString = GetCreateAccessorString(sp);
if (createString[1] == '\0')
genstrcat(buf," ");
genstrcat(buf,createString);
if ((createString[1] == '\0') ? TRUE : (createString[2] == '\0'))
genstrcat(buf," ");
genstrcat(buf," ");
EnvPrintRouter(theEnv,logicalName,buf);
gensprintf(buf,overrideMessagePrintFormat,
sp->noWrite ? "NIL" : ValueToString(sp->overrideMessage));
EnvPrintRouter(theEnv,logicalName,buf);
PrintSlotSources(theEnv,logicalName,sp->slotName->name,&sp->cls->allSuperclasses,0,TRUE);
EnvPrintRouter(theEnv,logicalName,"\n");
}
}
/***************************************************
NAME : PrintSlotSources
DESCRIPTION : Displays a list of source classes
for a composite class (in order
of most general to specific)
INPUTS : 1) The logical name of the output
2) The name of the slot
3) The precedence list of the class
of the slot (the source class
shold be first in the list)
4) The index into the packed
links array
5) Flag indicating whether to
disregard noniherit facet
RETURNS : TRUE if a class is printed, FALSE
otherwise
SIDE EFFECTS : Recursively prints out appropriate
memebers from list in reverse order
NOTES : None
***************************************************/
static intBool PrintSlotSources(
void *theEnv,
const char *logicalName,
SYMBOL_HN *sname,
PACKED_CLASS_LINKS *sprec,
long theIndex,
int inhp)
{
SLOT_DESC *csp;
if (theIndex == sprec->classCount)
return(FALSE);
csp = FindClassSlot(sprec->classArray[theIndex],sname);
if ((csp != NULL) ? ((csp->noInherit == 0) || inhp) : FALSE)
{
if (csp->composite)
{
if (PrintSlotSources(theEnv,logicalName,sname,sprec,theIndex+1,FALSE))
EnvPrintRouter(theEnv,logicalName," ");
}
PrintClassName(theEnv,logicalName,sprec->classArray[theIndex],FALSE);
return(TRUE);
}
else
return(PrintSlotSources(theEnv,logicalName,sname,sprec,theIndex+1,FALSE));
}
/*********************************************************
NAME : DisplaySlotConstraintInfo
DESCRIPTION : Displays a table summary of type-checking
facets for the slots of a class
including:
type
allowed-symbols
allowed-integers
allowed-floats
allowed-values
allowed-instance-names
range
min-number-of-elements
max-number-of-elements
The function also displays the source
class(es) for the facets
INPUTS : 1) A format string for use in sprintf
2) A buffer to store the display in
3) Maximum buffer size
4) A pointer to the class
RETURNS : Nothing useful
SIDE EFFECTS : Buffer written to and displayed
NOTES : None
*********************************************************/
static void DisplaySlotConstraintInfo(
void *theEnv,
const char *logicalName,
const char *slotNamePrintFormat,
char *buf,
unsigned maxlen,
DEFCLASS *cls)
{
long i;
CONSTRAINT_RECORD *cr;
const char *strdest = "***describe-class***";
gensprintf(buf,slotNamePrintFormat,"SLOTS");
genstrcat(buf,"SYM STR INN INA EXA FTA INT FLT\n");
EnvPrintRouter(theEnv,logicalName,buf);
for (i = 0 ; i < cls->instanceSlotCount ; i++)
{
cr = cls->instanceTemplate[i]->constraint;
gensprintf(buf,slotNamePrintFormat,ValueToString(cls->instanceTemplate[i]->slotName->name));
if (cr != NULL)
{
genstrcat(buf,ConstraintCode(cr,(unsigned) cr->symbolsAllowed,
(unsigned) cr->symbolRestriction));
genstrcat(buf,ConstraintCode(cr,(unsigned) cr->stringsAllowed,
(unsigned) cr->stringRestriction));
genstrcat(buf,ConstraintCode(cr,(unsigned) cr->instanceNamesAllowed,
(unsigned) (cr->instanceNameRestriction || cr->classRestriction)));
genstrcat(buf,ConstraintCode(cr,(unsigned) cr->instanceAddressesAllowed,
(unsigned) cr->classRestriction));
genstrcat(buf,ConstraintCode(cr,(unsigned) cr->externalAddressesAllowed,0));
genstrcat(buf,ConstraintCode(cr,(unsigned) cr->factAddressesAllowed,0));
genstrcat(buf,ConstraintCode(cr,(unsigned) cr->integersAllowed,
(unsigned) cr->integerRestriction));
genstrcat(buf,ConstraintCode(cr,(unsigned) cr->floatsAllowed,
(unsigned) cr->floatRestriction));
OpenStringDestination(theEnv,strdest,buf + strlen(buf),(maxlen - strlen(buf) - 1));
if (cr->integersAllowed || cr->floatsAllowed || cr->anyAllowed)
{
EnvPrintRouter(theEnv,strdest,"RNG:[");
PrintExpression(theEnv,strdest,cr->minValue);
EnvPrintRouter(theEnv,strdest,"..");
PrintExpression(theEnv,strdest,cr->maxValue);
EnvPrintRouter(theEnv,strdest,"] ");
}
if (cls->instanceTemplate[i]->multiple)
{
EnvPrintRouter(theEnv,strdest,"CRD:[");
PrintExpression(theEnv,strdest,cr->minFields);
EnvPrintRouter(theEnv,strdest,"..");
PrintExpression(theEnv,strdest,cr->maxFields);
EnvPrintRouter(theEnv,strdest,"]");
}
}
else
{
OpenStringDestination(theEnv,strdest,buf,maxlen);
EnvPrintRouter(theEnv,strdest," + + + + + + + + RNG:[-oo..+oo]");
if (cls->instanceTemplate[i]->multiple)
EnvPrintRouter(theEnv,strdest," CRD:[0..+oo]");
}
EnvPrintRouter(theEnv,strdest,"\n");
CloseStringDestination(theEnv,strdest);
EnvPrintRouter(theEnv,logicalName,buf);
}
}
/******************************************************
NAME : ConstraintCode
DESCRIPTION : Gives a string code representing the
type of constraint
INPUTS : 1) The constraint record
2) Allowed Flag
3) Restricted Values flag
RETURNS : " " for type not allowed
" + " for any value of type allowed
" # " for some values of type allowed
SIDE EFFECTS : None
NOTES : Used by DisplaySlotConstraintInfo
******************************************************/
static const char *ConstraintCode(
CONSTRAINT_RECORD *cr,
unsigned allow,
unsigned restrictValues)
{
if (allow || cr->anyAllowed)
{
if (restrictValues || cr->anyRestriction) return " # ";
else return " + ";
}
return(" ");
}
#endif
/*##################################*/
/* Additional Environment Functions */
/*##################################*/
#if ALLOW_ENVIRONMENT_GLOBALS
#if DEBUGGING_FUNCTIONS
globle void BrowseClasses(
const char *logicalName,
void *clsptr)
{
EnvBrowseClasses(GetCurrentEnvironment(),logicalName,clsptr);
}
globle void DescribeClass(
const char *logicalName,
void *clsptr)
{
EnvDescribeClass(GetCurrentEnvironment(),logicalName,clsptr);
}
#endif
globle intBool SlotDirectAccessP(
void *theDefclass,
const char *slotName)
{
return EnvSlotDirectAccessP(GetCurrentEnvironment(),theDefclass,slotName);
}
globle intBool SlotExistP(
void *theDefclass,
const char *slotName,
intBool inheritFlag)
{
return EnvSlotExistP(GetCurrentEnvironment(),theDefclass,slotName,inheritFlag);
}
globle intBool SlotInitableP(
void *theDefclass,
const char *slotName)
{
return EnvSlotInitableP(GetCurrentEnvironment(),theDefclass,slotName);
}
globle intBool SlotPublicP(
void *theDefclass,
const char *slotName)
{
return EnvSlotPublicP(GetCurrentEnvironment(),theDefclass,slotName);
}
globle int SlotDefaultP(
void *theDefclass,
const char *slotName)
{
return EnvSlotDefaultP(GetCurrentEnvironment(),theDefclass,slotName);
}
globle intBool SlotWritableP(
void *theDefclass,
const char *slotName)
{
return EnvSlotWritableP(GetCurrentEnvironment(),theDefclass,slotName);
}
globle intBool SubclassP(
void *firstClass,
void *secondClass)
{
return EnvSubclassP(GetCurrentEnvironment(),firstClass,secondClass);
}
globle intBool SuperclassP(
void *firstClass,
void *secondClass)
{
return EnvSuperclassP(GetCurrentEnvironment(),firstClass,secondClass);
}
globle intBool SlotDefaultValue(
void *theDefclass,
const char *slotName,
DATA_OBJECT_PTR theValue)
{
return EnvSlotDefaultValue(GetCurrentEnvironment(),theDefclass,slotName,theValue);
}
#endif
#endif
|
DrItanium/durandal
|
include/indirect/IndirectPassHeader.h
|
<gh_stars>1-10
#ifndef _indirect_pass_header_h
#define _indirect_pass_header_h
#include "indirect/IndirectAnalysisUsageDeclaration.h"
#include "indirect/IndirectUniqueIdentifier.h"
/* An indirect pass header is a magical and wonderful class that contains a
* wide assortment of information ranging from the name of a pass and it's
* description to what it requires
*/
namespace indirect {
class IndirectPassHeader : public IndirectAnalysisUsageDeclaration,
public IndirectUniqueIdentifier {
public:
enum IndirectPassType {
Unknown,
Module,
Function,
BasicBlock,
Loop,
Region,
MachineFunction,
CallGraphSCC,
/* Adding this field get's rid of the warning that having a comma
* attached to the last element of an enumerator list is a C++11
* specific feature.
*/
PassTypeCount
};
private:
std::string* passDescription; //full name
std::string* passName; //registrationName
std::string* templateSet;
bool isCFGOnlyPass;
bool isAnalysis;
bool isAnalysisGroup;
IndirectPassType passType;
public:
IndirectPassHeader();
~IndirectPassHeader();
const char* getPassDescription();
void setPassDescription(const char* description);
const char* getPassName();
void setPassName(const char* name);
void setIsCFGOnlyPass(bool isCFGOnly);
bool getIsCFGOnlyPass();
void setIsAnalysis(bool isAnalysisPass);
bool getIsAnalysis();
void setIsAnalysisGroup(bool _isAnalysisGroup);
bool getIsAnalysisGroup();
void setTemplateSet(const char* templateSet);
/*
* Get the template set that this pass is meant for. Failure to
* provide this will cause an assertion to occur when attempting to
* create a pass.
*/
const char* getTemplateSet();
void setPassType(IndirectPassHeader::IndirectPassType type);
IndirectPassHeader::IndirectPassType getPassType();
};
}
#endif
|
DrItanium/durandal
|
include/obsolete/ExpertSystem/CLIPSRegionBuilder.h
|
#ifndef _clips_region_builder_h
#define _clips_region_builder_h
#include "ExpertSystem/CLIPSObjectBuilder.h"
#include "llvm/Analysis/RegionInfo.h"
#include "llvm/Analysis/RegionIterator.h"
using namespace llvm;
class CLIPSRegionBuilder : public CLIPSObjectBuilder {
public:
CLIPSRegionBuilder(std::string nm, FunctionNamer& namer);
void addFields(Region* region, KnowledgeConstructor* kc, char* parent);
void build(Region* r, KnowledgeConstructor* kc, char* parent);
};
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.