repo_name
stringlengths
5
122
path
stringlengths
3
232
text
stringlengths
6
1.05M
Bungarch/meta
include/meta/util/random.h
/** * @file random.h * @author <NAME> * * All files in META are dual-licensed under the MIT and NCSA licenses. For more * details, consult the file LICENSE.mit and LICENSE.ncsa in the root of the * project. */ #ifndef META_UTIL_RANDOM_H_ #define META_UTIL_RANDOM_H_ #include <cstdint> #include <functional> #include <limits> #include <random> #include "meta/config.h" namespace meta { /** * A collection of utility classes/functions for randomness. (e.g. random * number generation, shuffling, etc.). */ namespace random { /** * A class to type-erase any unsigned random number generator in a way that * makes STL algorithms happy. */ class any_rng { public: using result_type = std::uint64_t; /** * The minimum value generated by the RNG. */ static constexpr result_type min() { return 0; } /** * The maximum value generated by the RNG. */ static constexpr result_type max() { return std::numeric_limits<result_type>::max(); } template <class RandomEngine> using random_engine = std::independent_bits_engine<RandomEngine, 64, result_type>; /** * Constructor: takes any (unsigned) random number generator and wraps * it in a type-erased way. any_rng always produces 64-bit random * numbers. */ template <class RandomEngine, class = typename std::enable_if<!std::is_same< typename std::decay<RandomEngine>::type, any_rng>::value>::type> any_rng(RandomEngine&& rng) : wrapped_(random_engine<typename std::decay<RandomEngine>::type>( std::forward<RandomEngine>(rng))) { // nothing } /** * Call operator: generates one random number. */ result_type operator()() const { return wrapped_(); } private: /// the wrapped RNG std::function<result_type()> wrapped_; }; /** * A 64-bit pseudo-random number generator that uses only a single 64-bit * unsigned integer as its state. Passes BigCrush. Not recommended for * standard use, but it is useful for taking a single 64-bit seed and * seeding a generator with a larger state. * * The original was written in 2015 by <NAME> (<EMAIL>) and * released into the public domain. * * @see http://dl.acm.org/citation.cfm?doid=2714064.2660195 * @see http://xoroshiro.di.unimi.it/splitmix64.c */ class splitmix64 { public: using result_type = uint64_t; explicit splitmix64(uint64_t seed) : state_{seed} { // nothing } static constexpr uint64_t min() { return 0; } static constexpr uint64_t max() { return std::numeric_limits<uint64_t>::max(); } inline uint64_t operator()() { uint64_t z = (state_ += 0x9E3779B97F4A7C15ULL); z = (z ^ (z >> 30)) * 0xBF58476D1CE4E5B9ULL; z = (z ^ (z >> 27)) * 0x94D049BB133111EBULL; return z ^ (z >> 31); } private: uint64_t state_; }; /** * A high quality 64-bit psuedo-random number generator that is the * successor to the xorshift128+ family. It passes BigCrush without * systematic failures, but has a relatively short period. * * The 128-bit state must be seeded to not be zero everywhere. * * THe original was written in 2016 by <NAME> and <NAME> * (<EMAIL>) and released into the public domain. * * @see http://xoroshiro.di.unimi.it/xoroshiro128plus.c */ class xoroshiro128 { public: using result_type = uint64_t; explicit xoroshiro128(uint64_t value) { seed(value); } explicit xoroshiro128(uint64_t s1, uint64_t s2) : s0_{s1}, s1_{s2} { // nothing } static constexpr uint64_t min() { return 0; } static constexpr uint64_t max() { return std::numeric_limits<uint64_t>::max(); } void seed(uint64_t value) { splitmix64 sm{value}; s0_ = sm(); s1_ = sm(); } inline uint64_t operator()() { const auto s0 = s0_; auto s1 = s1_; const auto result = s0 + s1; s1 ^= s0; s0_ = rotl(s0, 55) ^ s1 ^ (s1 << 14); s1_ = rotl(s1, 36); return result; } private: static inline uint64_t rotl(const uint64_t x, int k) { return (x << k) | (x >> (64 - k)); } uint64_t s0_; uint64_t s1_; }; /** * Generate a random number between 0 and an (exclusive) upper bound. This * uses the rejection sampling technique, and it assumes that the * RandomEngine has a strictly larger range than the desired one. * * @param rng The rng to generate numbers from * @param upper_bound The exclusive upper bound for the number * @return a random number in the range [0, upper_bound) */ template <class RandomEngine> typename RandomEngine::result_type bounded_rand(RandomEngine& rng, typename RandomEngine::result_type upper_bound) { auto random_max = RandomEngine::max() - RandomEngine::min(); auto threshold = random_max - (random_max + 1) % upper_bound; while (true) { // proposal is in the range [0, random_range] auto proposal = rng() - RandomEngine::min(); if (proposal <= threshold) return proposal % upper_bound; } } /** * Shuffles the given range using the provided rng. * * THERE IS A REASON we don't use std::shuffle here: we want * reproducibility between compilers, who don't seem to agree on the number * of times to call rng_ in the shuffle process. * * Furthermore, it seems that we can't rely on a canonical number of rng_ * calls in std::uniform_int_distribution, either, so that's out too. * * We instead use random::bounded_rand(), since we know that the range of * the RNG is definitely going to be larger than the upper bounds we * request here. * * @param first The iterator to the beginning of the range to be shuffled * @param last The iterator to the end of the range to be shuffled * @param rng The random number generator to use */ template <class RandomAccessIterator, class RandomEngine> void shuffle(RandomAccessIterator first, RandomAccessIterator last, RandomEngine&& rng) { using result_type = typename std::remove_reference<RandomEngine>::type::result_type; using difference_type = typename std::iterator_traits<RandomAccessIterator>::difference_type; auto dist = last - first; assert(dist > 0); for (difference_type i = 0; i < dist; ++i) { using std::swap; auto bound = static_cast<result_type>(dist - i); auto idx = static_cast<difference_type>(bounded_rand(rng, bound)); swap(first[dist - 1 - i], first[idx]); } } } } #endif
Bungarch/meta
src/utf/detail.h
/** * @file detail.h * @author <NAME> * * All files in META are released under the MIT license. For more details, * consult the file LICENSE in the root of the project. */ #ifndef META_UTF_DETAIL_H_ #define META_UTF_DETAIL_H_ #include <array> #include <stdexcept> // ICU generates conversion warnings from code we do not control #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #include <unicode/uclean.h> #include <unicode/unistr.h> #pragma GCC diagnostic pop namespace meta { namespace utf { /** * Internal class that ensures that ICU cleans up all of its * "still-reachable" memory before program termination. */ class icu_handle { private: icu_handle() { auto status = U_ZERO_ERROR; u_init(&status); if (!U_SUCCESS(status)) throw std::runtime_error{"Failed to initialize icu"}; } public: /** * All functions that use ICU should call this function first to * ensure that the handle is instantiated for later cleanup. */ inline static icu_handle& get() { static icu_handle handle; return handle; } /** * Destructor. Invokes the ICU cleanup method. */ ~icu_handle() { u_cleanup(); } }; /** * Helper method that converts an ICU string to a std::u16string. * * @param icu_str The ICU string to be converted * @return a std::u16string from the given ICU string */ inline std::u16string icu_to_u16str(const icu::UnicodeString& icu_str) { std::u16string u16str; u16str.resize(static_cast<std::size_t>(icu_str.length())); auto status = U_ZERO_ERROR; // looks dangerous, actually isn't: UChar is guaranteed to be a 16-bit // integer type, so all we're doing here is going between signed vs. // unsigned icu_str.extract(reinterpret_cast<UChar*>(&u16str[0]), static_cast<int32_t>(u16str.length()), status); return u16str; } /** * Helper method that converts an ICU string to a std::string in utf8. * * @param icu_str The ICU string to be converted * @return a std::string in utf8 from the given ICU string */ inline std::string icu_to_u8str(const icu::UnicodeString& icu_str) { std::string u8str; auto len = static_cast<std::size_t>(icu_str.length()); u8str.reserve(len); // this is not right in general, but is a // reasonable guess for ascii icu_str.toUTF8String(u8str); return u8str; } } } #endif
Bungarch/meta
include/meta/topics/topic_model.h
/** * @file topic_model.h * @author <NAME> * * All files in META are dual-licensed under the MIT and NCSA licenses. For more * details, consult the file LICENSE.mit and LICENSE.ncsa in the root of the * project. */ #ifndef META_TOPICS_TOPICS_H_ #define META_TOPICS_TOPICS_H_ #include <istream> #include <vector> #include "cpptoml.h" #include "meta/config.h" #include "meta/meta.h" #include "meta/stats/multinomial.h" #include "meta/util/fixed_heap.h" #include "meta/util/string_view.h" namespace meta { namespace topics { struct term_prob { term_id tid; double probability; }; struct topic_prob { topic_id tid; double probability; }; /** * A read-only model for accessing topic models. */ class topic_model { public: /** * Load topic models from files. * * @param theta The stream to read the vocabulary from * @param phi The stream to read the vectors from */ topic_model(std::istream& theta, std::istream& phi); /** * @param topic_id The topic to use * @param k The number of words to return * @return the top k most probable words in the topic */ std::vector<term_prob> top_k(topic_id tid, std::size_t k = 10) const; /** * @param topic_id The topic to use * @param k The number of words to return * @param score A scoring function to weight the raw probabilities * @return the top k most probable words in the topic */ template <typename T> std::vector<term_prob> top_k(topic_id tid, std::size_t k, T&& score) const; /** * @param doc_id The document we are concerned with * @return The probability of each of k topics for the * given document */ const stats::multinomial<topic_id>& topic_distribution(doc_id doc) const; /** * @param k The topic we are concerned with * @return The distribution over terms for the specified topic */ const stats::multinomial<term_id>& term_distribution(topic_id k) const; /** * @param topic_id The topic we are concerned with * @param term_id The term we are concerned with * @return The probability of the term for the given topic */ double term_probability(topic_id top_id, term_id tid) const; /** * @param doc The document we are concerned with * @param topic_id The topic we are concerned with * @return The probability for the given topic */ double topic_probability(doc_id doc, topic_id tid) const; /** * @return The number of topics */ std::size_t num_topics() const; /** * @return The number of unique words */ std::size_t num_words() const; /** * @return The number of documents */ std::size_t num_docs() const; private: /** * The number of topics. */ const std::size_t num_topics_; /** * The number of total unique words. */ const std::size_t num_words_; /** * The number of documents. */ const std::size_t num_docs_; /** * The term probabilities by topic */ std::vector<stats::multinomial<term_id>> topic_term_probabilities_; /** * The term probabilities by topic */ std::vector<stats::multinomial<topic_id>> doc_topic_probabilities_; }; template <typename T> std::vector<term_prob> topic_model::top_k(topic_id tid, std::size_t k, T&& score) const { auto pairs = util::make_fixed_heap<term_prob>( k, [](const term_prob& a, const term_prob& b) { return a.probability > b.probability; }); auto current_topic = topic_term_probabilities_[tid]; for (term_id i{0}; i < num_words_; ++i) { pairs.push(term_prob{i, score(tid, i)}); } return pairs.extract_top(); } class topic_model_exception : public std::runtime_error { public: using std::runtime_error::runtime_error; }; topic_model load_topic_model(const cpptoml::table& config); } } #endif
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/mpp/module_init/src/sdk_init.c
/* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include "asm/io.h" #include "hi_module_param.h" #include "stdlib.h" #include "fcntl.h" #include "string.h" #include "board.h" #ifdef __cplusplus #if __cplusplus extern "C" { #endif #endif /* End of #ifdef __cplusplus */ #define SENSOR_NAME_LEN 64 #define CHIP_NAME_LEN 64 #define BOARD_NAME_LEN 64 static unsigned long long g_mmz_start = 0x42000000; static unsigned int g_mmz_size = 32; /* M Byte */ static char g_chip_name[CHIP_NAME_LEN] = "hi3518ev300"; /* hi3518ev300 */ static char g_sensor_name[SENSOR_NAME_LEN] = "f23"; /* f23 */ static char g_board_name[BOARD_NAME_LEN] = "demo"; /* demo sck */ static void CHIP_init(void) { return; } static int SYSCONFIG_init(void) { extern int hi_sysconfig_init(void); extern int g_quick_start_flag; extern int g_online_flag; extern int g_cmos_yuv_flag; extern char g_sensor_list[SENSOR_NAME_LEN]; extern char g_chip_list[CHIP_NAME_LEN]; extern char g_board_list[BOARD_NAME_LEN]; g_quick_start_flag = 0; g_online_flag = 0; g_cmos_yuv_flag = 0; strncpy(g_chip_list, g_chip_name, CHIP_NAME_LEN); strncpy(g_sensor_list, g_sensor_name, SENSOR_NAME_LEN); strncpy(g_board_list, g_board_name, BOARD_NAME_LEN); return hi_sysconfig_init(); } static int MMZ_init(void) { extern int media_mem_init(void *pArgs); MMZ_SETUP_MODULE_PARAMS_S stMMZ_Param = {0}; snprintf(stMMZ_Param.mmz, MMZ_SETUP_CMDLINE_LENGTH, "anonymous,0,0x%llx,%dM", g_mmz_start, g_mmz_size); stMMZ_Param.anony = 1; dprintf("g_mmz_start=0x%llx, g_mmz_size=0x%x\n", g_mmz_start, g_mmz_size); dprintf("mmz param= %s\n", stMMZ_Param.mmz); return media_mem_init(&stMMZ_Param); } static int BASE_init(void) { extern void base_get_module_param(void *pArgs); extern int base_mod_init(void *pArgs); BASE_MODULE_PARAMS_S stBaseModuleParam; base_get_module_param(&stBaseModuleParam); return base_mod_init(&stBaseModuleParam); } static int SYS_init(void) { extern int sys_mod_init(void); return sys_mod_init(); } static int RGN_init(void) { extern int rgn_mod_init(void); return rgn_mod_init(); } static int ISP_init(void) { extern int isp_mod_init(void); return isp_mod_init(); } static int VI_init(void) { extern int vi_mod_init(void); return vi_mod_init(); } static int VGS_init(void) { extern void vgs_get_module_param(void *pArgs); extern int vgs_mod_init(void *pArgs); VGS_MODULE_PARAMS_S stVgsModuleParam; vgs_get_module_param((void*)&stVgsModuleParam); stVgsModuleParam.u32MaxVgsJob = 64; /* 64 -- max job num */ stVgsModuleParam.u32MaxVgsTask = 100; /* 100 -- max task num */ stVgsModuleParam.u32MaxVgsNode = 100; /* 100 -- max node num */ return vgs_mod_init(&stVgsModuleParam); } static int IVE_init(void) { extern int ive_mod_init(void *pArgs); extern void ive_get_module_param(void *pArgs); IVE_MODULE_PARAMS_S stIveModuleParam; ive_get_module_param((void*)&stIveModuleParam); stIveModuleParam.bSavePowerEn = HI_TRUE; stIveModuleParam.u16IveNodeNum = 512; /* 512 -- node num */ return ive_mod_init(&stIveModuleParam); } static int VPSS_init(void) { extern void vpss_get_module_param(void *pArgs); extern int vpss_mod_init(void *pArgs); VPSS_MODULE_PARAMS_S stVpssModuleParam; vpss_get_module_param((void*)&stVpssModuleParam); return vpss_mod_init(&stVpssModuleParam); } static int VO_init(void) { extern int vou_module_init(void); return vou_module_init(); } static int RC_init(void) { extern int rc_mod_init(void); return rc_mod_init(); } static int VENC_init(void) { extern void venc_get_module_param(void *pArgs); extern int venc_mod_init(void *pArgs); VENC_MODULE_PARAMS_S stVencModuleParam; venc_get_module_param((void*)&stVencModuleParam); return venc_mod_init(&stVencModuleParam); } static int CHNL_init(void) { extern int chnl_mod_init(void); return chnl_mod_init(); } static int VEDU_init(void) { extern int vedu_mod_init(void); return vedu_mod_init(); } static int H264e_init(void) { extern int h264e_mod_init(void); return h264e_mod_init(); } static int H265e_init(void) { extern int h265e_mod_init(void *pArgs); return h265e_mod_init(NULL); } static int JPEGE_init(void) { extern int jpege_mod_init(void); return jpege_mod_init(); } static int hi_sensor_spi_init(void) { extern int sensor_spi_dev_init(void *pArgs); return sensor_spi_dev_init(NULL); } static int hi_sensor_i2c_init(void) { extern int hi_dev_init(void); return hi_dev_init(); } static int PWM_init(void) { return 0; } static int MIPI_RX_init(void) { extern int mipi_rx_mod_init(void); return mipi_rx_mod_init(); } static int AcodecMod_init(void) { extern int acodec_mod_init(void *pArgs); return acodec_mod_init(NULL); } static int AiaoMod_init(void) { extern int aiao_mod_init(void); return aiao_mod_init(); } static int AiMod_init(void) { extern int ai_mod_init(void); return ai_mod_init(); } static int AoMod_init(void) { extern int ao_mod_init(void *pArgs); return ao_mod_init(NULL); } static int AencMod_init(void) { extern int aenc_mod_init(void *pArgs); return aenc_mod_init(NULL); } static int AdecMod_init(void) { extern int adec_mod_init(void *pArgs); return adec_mod_init(NULL); } static int Cipher_init(void) { extern int cipher_drv_mod_init(void); return cipher_drv_mod_init(); } static int TDE_init(void) { extern int tde_mod_init(void); return tde_mod_init(); } static void insert_audio(void) { int ret; ret = AiaoMod_init(); if (ret != 0) { printf("aiao init error.\n"); } ret = AiMod_init(); if (ret != 0) { printf("ai init error.\n"); } ret = AoMod_init(); if (ret != 0) { printf("ao init error.\n"); } ret = AencMod_init(); if (ret != 0) { printf("aenc init error.\n"); } ret = AdecMod_init(); if (ret != 0) { printf("adec init error.\n"); } ret = AcodecMod_init(); if (ret != 0) { printf("acodec init error.\n"); } printf("insert audio\n"); } extern void osal_proc_init(void); extern int hifb_init(void* pArgs); static int HIFB_init(void) { HIFB_MODULE_PARAMS_S stHIFB_Param; snprintf(stHIFB_Param.video, 64, "hifb:vram0_size:1620"); /* 64 -- str length */ stHIFB_Param.bUpdateRotateRect = HI_FALSE; return hifb_init(&stHIFB_Param); } static int PM_init(void) { return 0; } void SDK_init(void) { int ret; CHIP_init(); SYSCONFIG_init(); osal_proc_init(); ret = MMZ_init(); if (ret != 0) { printf("MMZ init error.\n"); } ret = BASE_init(); if (ret != 0) { printf("base init error.\n"); } ret = SYS_init(); if (ret != 0) { printf("sys init error.\n"); } ret = RGN_init(); if (ret != 0) { printf("rgn init error.\n"); } ret = VGS_init(); if (ret != 0) { printf("vgs init error.\n"); } ret = ISP_init(); if (ret != 0) { printf("isp init error.\n"); } ret = VI_init(); if (ret != 0) { printf("vi init error.\n"); } ret = VPSS_init(); if (ret != 0) { printf("vpss init error.\n"); } ret = VO_init(); if (ret != 0) { printf("vo init error.\n"); } ret = CHNL_init(); if (ret != 0) { printf("chnl init error.\n"); } ret = VEDU_init(); if (ret != 0) { printf("vedu init error.\n"); } ret = RC_init(); if (ret != 0) { printf("rc init error.\n"); } ret = VENC_init(); if (ret != 0) { printf("venc init error.\n"); } ret = H264e_init(); if (ret != 0) { printf("H264e init error.\n"); } ret = H265e_init(); if (ret != 0) { printf("H265e init error.\n"); } ret = JPEGE_init(); if (ret != 0) { printf("jpege init error.\n"); } ret = IVE_init(); if (ret != 0) { printf("ive init error.\n"); } insert_audio(); ret = PWM_init(); if (ret != 0) { printf("pwm init error.\n"); } #ifndef LOSCFG_DRIVERS_HDF_PLATFORM_SPI #ifdef LOSCFG_DRIVERS_SPI dprintf("spi bus init ...\n"); extern int spi_dev_init(void); spi_dev_init(); #endif #endif #ifndef LOSCFG_DRIVERS_HDF_PLATFORM_I2C #ifdef LOSCFG_DRIVERS_I2C dprintf("i2c bus init ...\n"); extern int i2c_dev_init(void); i2c_dev_init(); #endif #endif ret = hi_sensor_spi_init(); if (ret != 0) { printf("sensor spi init error.\n"); } ret = Cipher_init(); if (ret != 0) { printf("cipher init error.\n"); } ret = MIPI_RX_init(); if (ret != 0) { printf("mipi_rx init error.\n"); } ret = hi_sensor_i2c_init(); if (ret != 0) { printf("sensor i2c init error.\n"); } else { printf("sensor i2c init OK.\n"); } ret = TDE_init(); if (ret != 0) { printf("TDE init error.\n"); } ret = HIFB_init(); if (ret != 0) { printf("HIFB_init error.\n"); } ret = PM_init(); if (ret != 0) { printf("PM_init error.\n"); } printf("SDK init ok...\n"); } #ifdef __cplusplus #if __cplusplus } #endif #endif /* End of #ifdef __cplusplus */
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/uboot/secureboot_release/ddr_init/include/types.h
<reponame>openharmony-gitee-mirror/device_hisilicon_hispark_aries #ifndef __TYPES_H__ #define __TYPES_H__ typedef signed char s8; typedef unsigned char u8; typedef signed short s16; typedef unsigned short u16; typedef signed int s32; typedef unsigned int u32; typedef signed long long s64; typedef unsigned long long u64; typedef unsigned int size_t; typedef int ptrdiff_t; #define BITS_PER_LONG 32 /* Dma addresses are 32-bits wide. */ typedef u32 dma_addr_t; #undef NULL #if defined(__cplusplus) #define NULL 0 #else #define NULL ((void *)0) #endif typedef s8 INT8; typedef s16 INT16; typedef s32 INT32; typedef u8 UINT8; typedef u16 UINT16; typedef u32 UINT32; typedef int BOOL; typedef int STATUS; #define TRUE 1 #define FALSE 0 #define ERROR -1 #define OK 0 #define FOREVER while(1) #define IN #define OUT #define PRIVATE static typedef signed char int8_t; typedef unsigned char uint8_t; typedef signed short int16_t; typedef unsigned short uint16_t; typedef signed int int32_t; typedef unsigned int uint32_t; typedef long int64_t; typedef unsigned long uint64_t; typedef unsigned char u_char; typedef unsigned long u_long; typedef unsigned int u_int; typedef unsigned char u_int8_t; typedef unsigned short u_int16_t; typedef unsigned int u_int32_t; #endif
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/board/include/hisoc/timer.h
<reponame>openharmony-gitee-mirror/device_hisilicon_hispark_aries /* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __HISOC_TIMER_H__ #define __HISOC_TIMER_H__ #include "asm/platform.h" #include "los_bitmap.h" #ifdef __cplusplus #if __cplusplus extern "C" { #endif /* __cplusplus */ #endif /* __cplusplus */ #define TIMER_LOAD 0x0 #define TIMER_VALUE 0x4 #define TIMER_CONTROL 0x8 #define TIMER_INT_CLR 0xc #define TIMER_RIS 0x10 #define TIMER_MIS 0x14 #define TIMER_BGLOAD 0x18 #define TIMER0_ENABLE BIT(16) #define TIMER1_ENABLE BIT(17) #define TIMER2_ENABLE BIT(18) #define TIMER3_ENABLE BIT(19) #define TIMER4_ENABLE BIT(20) #define TIMER5_ENABLE BIT(21) #define TIMER6_ENABLE BIT(22) #define TIMER7_ENABLE BIT(23) #define TIMER8_ENABLE BIT(24) #define TIMER9_ENABLE BIT(25) #define TIMER10_ENABLE BIT(26) #define TIMER11_ENABLE BIT(27) unsigned int arch_timer_rollback(void); VOID LOS_Udelay(UINT32 usecs); VOID LOS_Mdelay(UINT32 msecs); VOID reset_timer_masked(VOID); #ifdef __cplusplus #if __cplusplus } #endif /* __cplusplus */ #endif /* __cplusplus */ #endif
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/uboot/secureboot_release/ddr_init/include/common.h
#ifndef __COMMON_H_ #define __COMMON_H_ #include "types.h" /*----------------------------------------------------------------- * set cpu mode interface ------------------------------------------------------------------*/ void set_mod_normal(); void set_mod_slow(); /*----------------------------------------------------------------- * serial interface ------------------------------------------------------------------*/ int serial_init (); int serial_deinit(); void serial_putc (const char c); void serial_putchar (const char c); void serial_puts (const char *s); void serial_flush(); int serial_getc (void); int serial_tstc (void); /*----------------------------------------------------------------- * mmc interface ------------------------------------------------------------------*/ int mmc_init(); void mmc_deinit(); size_t mmc_read(void *ptr, size_t size); /*----------------------------------------------------------------- * timer interface ------------------------------------------------------------------*/ int timer_init(); void timer_deinit(); void timer_start(); unsigned long timer_get_val(); unsigned long timer_get_divider(); /*----------------------------------------------------------------- * I cache interface ------------------------------------------------------------------*/ void open_I_cache(); void close_I_cache(); /*----------------------------------------------------------------- * mem operation interface ------------------------------------------------------------------*/ void* memset(void * s,int c,size_t count); void* memcpy(void * dest,const void *src,size_t count); #endif /*__COMMON_H_*/
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/board/include/hisoc/spinor.h
<reponame>openharmony-gitee-mirror/device_hisilicon_hispark_aries<filename>sdk_liteos/board/include/hisoc/spinor.h /* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __HISOC_SPINOR_H__ #define __HISOC_SPINOR_H__ #include "asm/platform.h" #include "asm/io.h" #ifdef __cplusplus #if __cplusplus extern "C" { #endif /* __cplusplus */ #endif /* __cplusplus */ #define PERI_CRG48 (CRG_REG_BASE + 0x00C0) #define PERI_CRG48_RST (1 << 0) #define PERI_CRG48_CLKEN (1 << 1) #define PERI_CRG48_CLK_24M (0 << 2) #define PERI_CRG48_CLK_75M ((0 << 3) | (1 << 2)) #define PERI_CRG48_CLK_125M ((1 << 2) | (1 << 3)) #define SFC_ADDR_MODE_REG (0x8C) #define SFC_ADDR_MODE_MASK (0x80) #define SFC_CLSEL_MASK (0xC) #define SFC_PERI_CLKDIV1_SHIFT (28) #define SFC_PERI_CLKDIV1_MASK (0xF) /*****************************************************************************/ #undef GET_SFC_ADDR_MODE #define GET_SFC_ADDR_MODE ({ \ int start_up_mode = 0; \ start_up_mode = readl(IO_ADDRESS(SYS_CTRL_REG_BASE + SFC_ADDR_MODE_REG)); \ start_up_mode &= SFC_ADDR_MODE_MASK; \ start_up_mode; }) /*****************************************************************************/ static inline void hisfc350_set_system_clock(unsigned clock, int clk_en) { unsigned int regval = readl(PERI_CRG48); regval = regval & (~SFC_CLSEL_MASK); if (clock) { regval &= ~SFC_CLSEL_MASK; regval |= clock & SFC_CLSEL_MASK; } else { regval &= ~SFC_CLSEL_MASK; regval |= PERI_CRG48_CLK_24M; /* Default Clock */ } if (clk_en) regval |= PERI_CRG48_CLKEN; if (regval != readl(PERI_CRG48)) writel(regval, (PERI_CRG48)); } /*****************************************************************************/ static inline void hisfc350_get_best_clock(unsigned int *clock) { int ix; int clk_reg; #define CLK_2X(_clk) (((_clk) + 1) >> 1) unsigned int sysclk[] = { CLK_2X(24), PERI_CRG48_CLK_24M, CLK_2X(75), PERI_CRG48_CLK_75M, CLK_2X(125), PERI_CRG48_CLK_125M, 0, 0, }; #undef CLK_2X clk_reg = PERI_CRG48_CLK_24M; for (ix = 0; sysclk[ix]; ix += 2) { if (*clock < sysclk[ix]) break; clk_reg = sysclk[ix + 1]; } *clock = clk_reg; } /*****************************************************************************/ #ifdef CONFIG_HISFC350_SHOW_CYCLE_TIMING static inline char * hisfc350_get_clock_str(unsigned int clk_reg) { static char buffer[40]; /* calculate reference PERI_CLKDIV1[31:28] */ SFC_PR(BT_DBG, "clk_reg=0x%0x.\n", clk_reg); clk_reg = 216 / ((clk_reg >> SFC_PERI_CLKDIV1_SHIFT) & SFC_PERI_CLKDIV1_MASK); (VOID)sprintf_s(buffer, sizeof(buffer), "%dM", clk_reg); return buffer; } #endif /* CONFIG_PERI_SHOW_CYCLE_TIMING */ #ifdef __cplusplus #if __cplusplus } #endif /* __cplusplus */ #endif /* __cplusplus */ #endif
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/board/include/hisoc/mmc.h
/* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __HISOC_MMC_H_ #define __HISOC_MMC_H_ /************************************************************************/ #include "asm/platform.h" #include "asm/dma.h" #include "asm/io.h" #include "los_bitmap.h" #include "mmc/mmc_caps.h" #include "sdhci/sdhci_reg.h" #ifdef __cplusplus #if __cplusplus extern "C" { #endif /* __cplusplus */ #endif /* __cplusplus */ /************************************************************************/ #define MAX_MMC_NUM 2 #define USE_MMC0 1 #define USE_MMC1 1 #define MMC0 0 #define MMC1 1 // sdio0 & emmc #define CONFIG_MMC0_CCLK_MIN 100000 // 100KHz #ifdef LOSCFG_DRIVERS_EMMC_HS400 #define CONFIG_MMC0_CCLK_MAX 90000000 // 90MHz #else #define CONFIG_MMC0_CCLK_MAX 150000000 // 150MHz #endif // sdio1 #define CONFIG_MMC1_CCLK_MIN 100000 // 100KHz #define CONFIG_MMC1_CCLK_MAX 50000000 // 200MHz ///// not exist #define PERI_CRG86 (CRG_REG_BASE + 0x158) #define PERI_CRG87 (CRG_REG_BASE + 0x15C) #define PERI_CRG94 (CRG_REG_BASE + 0x178) #define PERI_CRG95 (CRG_REG_BASE + 0x17C) #define PERI_CRG96 (CRG_REG_BASE + 0x180) #define PERI_CRG106 (CRG_REG_BASE + 0x1A8) #define PERI_CRG117 (CRG_REG_BASE + 0x01D4) #define PERI_CRG123 (CRG_REG_BASE + 0x1EC) #define PERI_CRG133 (CRG_REG_BASE + 0x214) #define PERI_CRG143 (CRG_REG_BASE + 0x23C) ////// #define PERI_CRG125 (CRG_REG_BASE + 0x01F4) // eMMC DRV DLL #define PERI_CRG127 (CRG_REG_BASE + 0x01FC) #define PERI_SAM_DRV_SHFT (24) #define PERI_SAM_DRV_MASK (0x1f << 24) // SDIO1 DRV DLL #define PERI_CRG136 (CRG_REG_BASE + 0x0220) #define PERI_CRG139 (CRG_REG_BASE + 0x022C) #define PERI_SDIO0_SAMPLB_DLL_CTRL (CRG_REG_BASE + 0x1f8) #define PERI_SDIO1_SAMPLB_DLL_CTRL (CRG_REG_BASE + 0x21c) #define SDIO_SAMPLB_DLL_CLK_MASK (0x1fU << 0) #define SDIO_SAMPLB_SEL(phase) ((phase) << 0) #define PERI_SDIO0_DRV_DLL_CTRL (CRG_REG_BASE + 0x210) #define PERI_SDIO1_DRV_DLL_CTRL (CRG_REG_BASE + 0x228) #define SDIO_DRV_DLL_LOCK (1U << 15) #define PERI_SDIO0_SAMPL_DLL_STATUS (CRG_REG_BASE + 0x208) #define PERI_SDIO1_SAMPL_DLL_STATUS (CRG_REG_BASE + 0x224) #define SDIO_SAMPL_DLL_SLAVE_READY (1) #define SDIO_SAMPL_DLL_SLAVE_EN (1U << 16) #define PERI_CRG106_EMMC_CRG_REQ (1U << 27) #define PERI_CRG106_EMMC_CKEN (1U << 28) #define PERI_CRG106_EMMC_DLL_RST (1U << 29) #define PERI_CRG106_EMMC_SAM_RST (1U << 30) #define EMMC_PHY_BASE IO_DEVICE_ADDR(0x12160000) #define MMC_FREQ_100K 100000 #define MMC_FREQ_400K 400000 #define MMC_FREQ_25M 25000000 #define MMC_FREQ_50M 50000000 // only support for EMMC chip #define MMC_FREQ_90M 90000000 #define MMC_FREQ_112P5M 112500000 #define MMC_FREQ_150M 150000000 #define MMC_FREQ_MASK 0x7 #define MMC_FREQ_SHIFT 24 #define SDIO0_CKEN (1U << 28) #define SDIO0_CLK_SEL_100K (0) #define SDIO0_CLK_SEL_400K (7) #define SDIO0_CLK_SEL_25M (1) #define SDIO0_CLK_SEL_50M (2) #define SDIO0_CLK_SEL_100M (3) #define SDIO0_CLK_SEL_125M (4) #define SDIO0_CLK_SEL_150M (5) #define SDIO0_CLK_SEL_200M (6) #define REG_CTRL_BASE IO_DEVICE_ADDR(0x100C0040) #define REG_CTRL_SDIO0_CLK IO_DEVICE_ADDR(0x100C0040) #define REG_CTRL_SDIO0_CMD IO_DEVICE_ADDR(0x100C0044) #define REG_CTRL_SDIO0_DATA0 IO_DEVICE_ADDR(0x100C0048) #define REG_CTRL_SDIO0_DATA1 IO_DEVICE_ADDR(0x100C004C) #define REG_CTRL_SDIO0_DATA2 IO_DEVICE_ADDR(0x100C0050) #define REG_CTRL_SDIO0_DATA3 IO_DEVICE_ADDR(0x100C0054) #define REG_CTRL_SDIO0_CD_DET IO_DEVICE_ADDR(0x100C005C) #define REG_CTRL_SDIO0_CD_POW IO_DEVICE_ADDR(0x120C0020) #ifdef LOSCFG_PLATFORM_HI3516EV200 #define REG_CTRL_SDIO1_CLK IO_DEVICE_ADDR(0x112c0048) #define REG_CTRL_SDIO1_CMD IO_DEVICE_ADDR(0x112c004c) #define REG_CTRL_SDIO1_DATA0 IO_DEVICE_ADDR(0x112c0064) #define REG_CTRL_SDIO1_DATA1 IO_DEVICE_ADDR(0x112c0060) #define REG_CTRL_SDIO1_DATA2 IO_DEVICE_ADDR(0x112c005c) #define REG_CTRL_SDIO1_DATA3 IO_DEVICE_ADDR(0x112c0058) #elif defined(LOSCFG_PLATFORM_HI3516EV300) #define REG_CTRL_SDIO1_CLK IO_DEVICE_ADDR(0x100C0060) #define REG_CTRL_SDIO1_CMD IO_DEVICE_ADDR(0x100C0064) #define REG_CTRL_SDIO1_DATA0 IO_DEVICE_ADDR(0x100C0068) #define REG_CTRL_SDIO1_DATA1 IO_DEVICE_ADDR(0x100C006C) #define REG_CTRL_SDIO1_DATA2 IO_DEVICE_ADDR(0x100C0070) #define REG_CTRL_SDIO1_DATA3 IO_DEVICE_ADDR(0x100C0074) #elif defined(LOSCFG_PLATFORM_HI3518EV300) #define REG_CTRL_SDIO1_CLK IO_DEVICE_ADDR(0x112C0048) #define REG_CTRL_SDIO1_CMD IO_DEVICE_ADDR(0x112C004C) #define REG_CTRL_SDIO1_DATA0 IO_DEVICE_ADDR(0x112C0064) #define REG_CTRL_SDIO1_DATA1 IO_DEVICE_ADDR(0x112C0060) #define REG_CTRL_SDIO1_DATA2 IO_DEVICE_ADDR(0x112C005C) #define REG_CTRL_SDIO1_DATA3 IO_DEVICE_ADDR(0x112C0058) #endif #define REG_CTRL_EMMC_CLK IO_DEVICE_ADDR(0x100C0014) #define REG_CTRL_EMMC_CMD IO_DEVICE_ADDR(0x100C0018) #define REG_CTRL_EMMC_DATA0 IO_DEVICE_ADDR(0x100C0020) #define REG_CTRL_EMMC_DATA1 IO_DEVICE_ADDR(0x100C001c) #define REG_CTRL_EMMC_DATA2 IO_DEVICE_ADDR(0x100C0028) #define REG_CTRL_EMMC_DATA3 IO_DEVICE_ADDR(0x100C0024) #define REG_CTRL_EMMC_DATA4 IO_DEVICE_ADDR(0x100C0030) #define REG_CTRL_EMMC_DATA5 IO_DEVICE_ADDR(0x100C0034) #define REG_CTRL_EMMC_DATA6 IO_DEVICE_ADDR(0x100C0038) #define REG_CTRL_EMMC_DATA7 IO_DEVICE_ADDR(0x100C003c) #define REG_CTRL_EMMC_DS IO_DEVICE_ADDR(0x100C0058) #define REG_CTRL_EMMC_RST IO_DEVICE_ADDR(0x100C005C) // macro for io_mux #define IO_CFG_SR BIT(10) #define IO_CFG_PULL_DOWN BIT(9) #define IO_CFG_PULL_UP BIT(8) #define IO_CFG_DRV_STR_MASK (0xfU << 4) #define IO_DRV_MASK (0x7f0) #define IO_DRV_STR_SEL(str) ((str) << 4) #define IO_MUX_CLK_TYPE_EMMC 0x0 #define IO_MUX_CLK_TYPE_SD 0x1 #define IO_MUX_SHIFT(type) ((type) << 0) #define IO_MUX_MASK (0xfU << 0) enum mmc_width_seletion_mode { /* The bus width is auto determined by capabilities of a card and host */ MMC_WIDTH_SELCTION_AUTO = 0, /* * The bus width of host and card are both manually forced into 1 bit mode * whether the card and host supports 4-bit or not. */ MMC_WIDTH_FORCED_1_BIT, }; /* * Description: * This API is used to set the bus width selection mode of sd and sdio host devices. * Param: host_id [IN] ranging from 0 to (MAX_MMC_NUM - 1). The types of hosts, either sd or sdio, * can be inqueried from user's guide according to the param host_id. * Param: type [IN] the width selection mode, either MMC_WIDTH_SELCTION_AUTO, or MMC_WIDTH_FORCED_1_BIT. * please refer to enum mmc_width_seletion_mode for more details. * Attention: The width selection mode is default auto mode if this API is not called. * This API should be called before initial and registration of mmc driver. */ extern void mmc_width_seletion_mode_set(unsigned int host_id, enum mmc_width_seletion_mode type); void SDHCI_EnableSample(void *base); void SDHCI_CardClk(void *base, int action); #ifdef __cplusplus #if __cplusplus } #endif /* __cplusplus */ #endif /* __cplusplus */ #endif
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/board/include/hisoc/spinand.h
<reponame>openharmony-gitee-mirror/device_hisilicon_hispark_aries<gh_stars>1-10 /* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __HISOC_SPINAND_H__ #define __HISOC_SPINAND_H__ #include "asm/platform.h" #ifdef __cplusplus #if __cplusplus extern "C" { #endif /* __cplusplus */ #endif /* __cplusplus */ /*****************************************************************************/ #define CRG48 0xc0 #define CRG48_SPI_NAND_CLK_SEL(_clk) (((_clk) & 0x3) << 6) #define CRG48_SPI_NAND_CLK_EN (1 << 5) #define CRG48_SPI_NAND_SOFT_RST_REQ (1 << 4) #define SPI_NAND_CLK_SEL_MASK (0x3 << 6) #define DEVICE_TYPE_SHIFT 1 #define DEVICE_TYPE_MASK (0x1 << 1) #define CLK_24M 0 #define CLK_75M 1 #define CLK_125M 2 #define SPI_NAND_CLK_SEL_24M CRG48_SPI_NAND_CLK_SEL(CLK_24M) #define SPI_NAND_CLK_SEL_75M CRG48_SPI_NAND_CLK_SEL(CLK_75M) #define SPI_NAND_CLK_SEL_125M CRG48_SPI_NAND_CLK_SEL(CLK_125M) #define GET_CLK_TYPE(_reg) (((_reg) >> 2) & 0x3) /*****************************************************************************/ static void hisnfc100_set_system_clock(int clock, int clk_en) { unsigned base = CRG_REG_BASE; unsigned regval = readl(base + CRG48); if (!clock) clock = SPI_NAND_CLK_SEL_75M; regval = (regval & SPI_NAND_CLK_SEL_MASK) | clock; if (clk_en) regval |= CRG48_SPI_NAND_CLK_EN; else regval &= ~CRG48_SPI_NAND_CLK_EN; if (readl(base + CRG48) != regval) writel(regval, (base + CRG48)); } /*****************************************************************************/ static void hisnfc100_get_best_clock(unsigned int *clock) { int ix; int clk_reg; #define CLK_2X(_clk) (((_clk) + 1) >> 1) unsigned int sysclk[] = { CLK_2X(24), SPI_NAND_CLK_SEL_24M, CLK_2X(75), SPI_NAND_CLK_SEL_75M, CLK_2X(125), SPI_NAND_CLK_SEL_125M, 0, 0, }; #undef CLK_2X clk_reg = SPI_NAND_CLK_SEL_24M; for (ix = 0; sysclk[ix]; ix += 2) { if (*clock < sysclk[ix]) break; clk_reg = sysclk[ix + 1]; } *clock = clk_reg; } #ifdef __cplusplus #if __cplusplus } #endif /* __cplusplus */ #endif /* __cplusplus */ #endif
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/board/include/asm/hal_platform_ints.h
/* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef PLATFORM_HAL_PLATFORM_INTS_H #define PLATFORM_HAL_PLATFORM_INTS_H #include"los_typedef.h" #ifdef __cplusplus #if __cplusplus extern "C" { #endif /* __cplusplus */ #endif /* __cplusplus */ /** * Maximum number of supported hardware devices that generate hardware interrupts. * The maximum number of hardware devices that generate hardware interrupts is 128. */ #define OS_HWI_MAX_NUM 96 /** * Maximum interrupt number. */ #define OS_HWI_MAX ((OS_HWI_MAX_NUM) - 1) /** * Minimum interrupt number. */ #define OS_HWI_MIN 0 /** * Maximum usable interrupt number. */ #define OS_USER_HWI_MAX OS_HWI_MAX /** * Minimum usable interrupt number. */ #define OS_USER_HWI_MIN OS_HWI_MIN #define NUM_HAL_INTERRUPT_CNTPSIRQ 29 #define NUM_HAL_INTERRUPT_CNTPNSIRQ 30 #define OS_TICK_INT_NUM NUM_HAL_INTERRUPT_CNTPSIRQ // use secure physical timer for now #define NUM_HAL_INTERRUPT_TIMER0 37 #define NUM_HAL_INTERRUPT_TIMER1 37 #define NUM_HAL_INTERRUPT_TIMER2 38 #define NUM_HAL_INTERRUPT_TIMER3 38 #define NUM_HAL_INTERRUPT_UART0 39 #define NUM_HAL_INTERRUPT_UART1 40 #define NUM_HAL_INTERRUPT_UART2 41 #define NUM_HAL_INTERRUPT_GPIO0 48 #define NUM_HAL_INTERRUPT_GPIO1 49 #define NUM_HAL_INTERRUPT_GPIO2 50 #define NUM_HAL_INTERRUPT_GPIO3 51 #define NUM_HAL_INTERRUPT_GPIO4 52 #define NUM_HAL_INTERRUPT_GPIO5 53 #define NUM_HAL_INTERRUPT_GPIO6 54 #define NUM_HAL_INTERRUPT_GPIO7 55 #define NUM_HAL_INTERRUPT_GPIO8 56 #define NUM_HAL_INTERRUPT_GPIO9 57 #define NUM_HAL_INTERRUPT_SDIO 62 #define NUM_HAL_INTERRUPT_SDIO1 63 #define NUM_HAL_INTERRUPT_FMC 64 #define NUM_HAL_INTERRUPT_ETH 65 #define NUM_HAL_INTERRUPT_DMAC 70 #define NUM_HAL_INTERRUPT_USB_XHCI 71 #define NUM_HAL_INTERRUPT_USB_DEV 71 #define NUM_HAL_INTERRUPT_TIMER NUM_HAL_INTERRUPT_TIMER0 #define NUM_HAL_INTERRUPT_HRTIMER NUM_HAL_INTERRUPT_TIMER3 #define NUM_HAL_INTERRUPT_NONE -1 #define NUM_HAL_ISR_MIN OS_HWI_MIN #define NUM_HAL_ISR_MAX 1020 #define NUM_HAL_ISR_COUNT (NUM_HAL_ISR_MAX - NUM_HAL_ISR_MIN + 1) #define IO_ADDRESS(x) (x) #define HAL_READ_UINT8(addr, data) READ_UINT8(data, addr) #define HAL_WRITE_UINT8(addr, data) WRITE_UINT8(data, addr) #define HAL_READ_UINT32(addr, data) READ_UINT32(data, addr) #define HAL_WRITE_UINT32(addr, data) WRITE_UINT32(data, addr) #ifdef __cplusplus #if __cplusplus } #endif /* __cplusplus */ #endif /* __cplusplus */ #endif // PLATFORM_HAL_PLATFORM_INTS_H
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/uboot/secureboot_release/ddr_init/include/platform.h
<reponame>openharmony-gitee-mirror/device_hisilicon_hispark_aries #ifndef __HI_CHIP_REGS_H__ #define __HI_CHIP_REGS_H__ //#define CONFIG_DDR_TRAINING_V2 #define BIT(nr) (1 << (nr)) #define RAM_START_ADRS 0x04010500 #define STACK_TRAINING 0x04018000 #define DDR_DDRT_REG_BASE 0x11330000 #define TIMER0_REG_BASE 0x12000000 #define TIMER1_REG_BASE 0x12000020 #define TIMER2_REG_BASE 0x12001000 #define TIMER3_REG_BASE 0x12001020 #define REG_TIMER_RELOAD 0x0 #define REG_TIMER_VALUE 0x4 #define REG_TIMER_CONTROL 0x8 #define CRG_REG_BASE 0x12010000 #define SYS_CTRL_REG_BASE 0x12020000 #define REG_BASE_SCTL SYS_CTRL_REG_BASE #define REG_SC_CTRL 0 #define REMAPCLEAR BIT(8) #define REMAPCLEAR_SHIFT 8 #define TIME0_CLK_SEL BIT(16) #define TIME0_CLK_SEL_SHIFT 16 #define TIME0_CLK_SEL_3M 0x0 #define TIME0_CLK_SEL_APB 0x1 #define REG_SC_SYSRES 0x4 #define REG_SYSSTAT 0x008C #define REG_SC_GEN0 0x0138 #define REG_SC_GEN1 0x013c #define REG_SC_GEN2 0x0140 #define REG_SC_GEN3 0x0144 #define REG_SC_GEN4 0x0148 #define REG_SC_GEN5 0x014c #define REG_SC_GEN7 0x0154 #define MISC_REG_BASE 0x12028000 #define DDRC0_REG_BASE 0x11330000 #define UART0_REG_BASE 0x12040000 #define FMC_MEM_BASE 0x14000000 #define DDR_MEM_BASE 0x40000000 #define _HI3516EV200 (0x003516e200LL) #define _HI3516EV200_MASK (0xFFFFFFFFFFLL) #endif /* End of __HI_CHIP_REGS_H__ */
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/board/include/hisoc/nand.h
/* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __HISOC_NAND_H__ #define __HISOC_NAND_H__ #include "asm/platform.h" #ifdef __cplusplus #if __cplusplus extern "C" { #endif /* __cplusplus */ #endif /* __cplusplus */ #define PERI_CRG52 (CRG_REG_BASE + 0x00D0) #define PERI_CRG52_CLK_EN (1U << 1) #define PERI_CRG52_CLK_SEL_198M (1U << 2) #define REG_SYSSTAT 0x008C #define BOOT_FROM_NAND 2 static void hinfc620_clk_enable(int enable) { unsigned int reg_val = readl(PERI_CRG52); if (enable) reg_val |= (PERI_CRG52_CLK_EN | PERI_CRG52_CLK_SEL_198M); else reg_val &= ~PERI_CRG52_CLK_EN; writel(reg_val, (PERI_CRG52)); } #define check_boot_type() ((readl(SYS_CTRL_REG_BASE + REG_SYSSTAT) >> 4) & 0x3); #ifdef __cplusplus #if __cplusplus } #endif /* __cplusplus */ #endif /* __cplusplus */ #endif
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/board/include/board.h
<gh_stars>1-10 /* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __BOARD_CONFIG_H__ #define __BOARD_CONFIG_H__ #ifdef __cplusplus #if __cplusplus extern "C" { #endif /* __cplusplus */ #endif /* __cplusplus */ /* physical memory base and size */ #define DDR_MEM_ADDR 0x40000000 #define DDR_MEM_SIZE 0x04000000 /* Peripheral register address base and size */ #define PERIPH_PMM_BASE 0x10000000 #define PERIPH_PMM_SIZE 0x10000000 #define KERNEL_VADDR_BASE 0x40000000 #define KERNEL_VADDR_SIZE DDR_MEM_SIZE #define SYS_MEM_BASE DDR_MEM_ADDR #define SYS_MEM_SIZE_DEFAULT 0x2000000 #define SYS_MEM_END (SYS_MEM_BASE + SYS_MEM_SIZE_DEFAULT) #define EXC_INTERACT_MEM_SIZE 0x100000 #ifdef __cplusplus #if __cplusplus } #endif /* __cplusplus */ #endif /* __cplusplus */ #endif
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/uboot/secureboot_release/ddr_init/drv/ddr_training_boot.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * * Description:Special function for ddr training when power up. */ #include "ddr_training_impl.h" #include "ddr_interface.h" #ifdef DDR_TRAINING_UART_CONFIG extern void uart_early_put_hex(int hex); extern void uart_early_putc(int chr); #endif /* Save DDR tarining result */ void ddr_result_data_save(struct ddr_cfg_st *cfg, struct training_data *training) { /* nothing to do when ddr training on power up */ } void ddr_lpca_data_save(struct ca_data_st *data) { /* nothing to do when ddr training on power up */ } /* Get DDRT test address */ unsigned int ddr_ddrt_get_test_addr(void) { return DDRT_CFG_TEST_ADDR_BOOT; } #ifdef DDR_TRAINING_UART_CONFIG #ifdef DDR_TRAINING_MINI_LOG_CONFIG /* Display DDR training error when boot */ void ddr_training_error(unsigned int mask, unsigned int phy, int byte, int dq) { uart_early_putc('E'); uart_early_put_hex(mask); uart_early_putc('P'); uart_early_put_hex(phy); uart_early_putc('B'); uart_early_put_hex(byte); uart_early_putc('D'); uart_early_put_hex(dq); } void ddr_training_start(void) { uart_early_putc('D'); uart_early_putc('D'); uart_early_putc('R'); } void ddr_training_suc(void) { uart_early_putc('S'); } #else /* Define string to print */ void ddr_training_local_str(void) { asm volatile( "str_wl:\n\t" ".asciz \"WL\"\n\t" ".align 2\n\t" "str_hwg:\n\t" ".asciz \"HWG\"\n\t" ".align 2\n\t" "str_gate:\n\t" ".asciz \"Gate\"\n\t" ".align 2\n\t" "str_ddrt:\n\t" ".asciz \"DDRT\"\n\t" ".align 2\n\t" "str_hwrd:\n\t" ".asciz \"HWRD\"\n\t" ".align 2\n\t" "str_mpr:\n\t" ".asciz \"MPR\"\n\t" ".align 2\n\t" "str_dataeye:\n\t" ".asciz \"Dataeye\"\n\t" ".align 2\n\t" "str_lpca:\n\t" ".asciz \"LPCA\"\n\t" ".align 2\n\t" "str_err:\n\t" ".asciz \" Err:\"\n\t" ".align 2\n\t" "str_phy:\n\t" ".asciz \"Phy\"\n\t" ".align 2\n\t" "str_byte:\n\t" ".asciz \"Byte\"\n\t" ".align 2\n\t" "str_dq:\n\t" ".asciz \"DQ\"\n\t" ".align 2\n\t" "str_ddrtr_start:\n\t" ".asciz \"\r\\nDDRTR \"\n\t" ".align 2\n\t" "str_ddrtr_suc:\n\t" ".asciz \"Suc\"\n\t" ".align 2\n\t" ); } /* Display DDR training error when boot */ void ddr_training_error(unsigned int mask, unsigned int phy, int byte, int dq) { uart_early_putc('\r'); uart_early_putc('\n'); /* error type */ switch (mask) { case DDR_ERR_WL: asm volatile( "adr r0, str_wl\n\t" "bl uart_early_puts" ); break; case DDR_ERR_HW_GATING: asm volatile( "adr r0, str_hwg\n\t" "bl uart_early_puts" ); break; case DDR_ERR_GATING: asm volatile( "adr r0, str_gate\n\t" "bl uart_early_puts" ); break; case DDR_ERR_DDRT_TIME_OUT: asm volatile( "adr r0, str_ddrt\n\t" "bl uart_early_puts" ); break; case DDR_ERR_HW_RD_DATAEYE: asm volatile( "adr r0, str_hwrd\n\t" "bl uart_early_puts" ); break; case DDR_ERR_MPR: asm volatile( "adr r0, str_mpr\n\t" "bl uart_early_puts" ); break; case DDR_ERR_DATAEYE: asm volatile( "adr r0, str_dataeye\n\t" "bl uart_early_puts" ); break; case DDR_ERR_LPCA: asm volatile( "adr r0, str_lpca\n\t" "bl uart_early_puts" ); break; default: break; } /* error string */ asm volatile( "adr r0, str_err\n\t" "bl uart_early_puts" ); /* error phy */ if (0 != phy) { asm volatile( "adr r0, str_phy\n\t" "bl uart_early_puts" ); uart_early_put_hex(phy); } /* error byte */ if (-1 != byte) { asm volatile( "adr r0, str_byte\n\t" "bl uart_early_puts" ); uart_early_put_hex(byte); } /* error dq */ if (-1 != dq) { asm volatile( "adr r0, str_dq\n\t" "bl uart_early_puts" ); uart_early_put_hex(dq); } } /* Display DDR training start when boot */ void ddr_training_start(void) { asm volatile( "push {lr}\n\t" "adr r0, str_ddrtr_start\n\t" "bl uart_early_puts\n\t" "pop {lr}" ); } /* Display DDR training result when boot */ void ddr_training_suc(void) { asm volatile( "push {lr}\n\t" "adr r0, str_ddrtr_suc\n\t" "bl uart_early_puts\n\t" "pop {lr}" ); } #endif /* DDR_TRAINING_CUT_CODE_CONFIG */ #else void ddr_training_error(unsigned int mask, unsigned int phy, int byte, int dq) { return; } void ddr_training_suc(void) { return; } void ddr_training_start(void) { return; } #endif /* DDR_TRAINING_UART_CONFIG */
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/mpp/module_init/src/hi_module_param.h
/* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __HI_MOD_PARAM__ #define __HI_MOD_PARAM__ typedef enum { HI_FALSE = 0, HI_TRUE = 1, } HI_BOOL; #define VGS_IP_NUM 1 #define VPSS_IP_NUM 1 #define GDC_IP_NUM 1 #define VEDU_IP_NUM 1 typedef struct hiBASE_MODULE_PARAMS_S { HI_BOOL bVbForceExit; } BASE_MODULE_PARAMS_S; typedef struct hiHIFB_MODULE_PARAMS_S { char video[64]; HI_BOOL bUpdateRotateRect; } HIFB_MODULE_PARAMS_S; typedef struct hiVGS_MODULE_PARAMS_S { unsigned int u32MaxVgsJob; unsigned int u32MaxVgsTask; unsigned int u32MaxVgsNode; unsigned int au32VgsEn[VGS_IP_NUM]; HI_BOOL bVgsHdrSupport; HI_BOOL bVgsExitInSys; } VGS_MODULE_PARAMS_S; typedef struct hiVPSS_MODULE_PARAMS_S { unsigned int u32VpssEn[VPSS_IP_NUM]; } VPSS_MODULE_PARAMS_S; typedef struct hiGDC_MODULE_PARAMS_S { unsigned int u32MaxGdcJob; unsigned int u32MaxGdcTask; unsigned int u32MaxGdcNode; unsigned int au32GdcEn[GDC_IP_NUM]; } GDC_MODULE_PARAMS_S; typedef struct hiVDEC_MODULE_PARAMS_S { unsigned int u32VdecMaxChnNum; HI_BOOL bVdecHfr; } VDEC_MODULE_PARAMS_S; typedef struct hiIVE_MODULE_PARAMS_S { HI_BOOL bSavePowerEn; unsigned short u16IveNodeNum; unsigned short u16Rsv; } IVE_MODULE_PARAMS_S; typedef struct hiSVP_NNIE_MODULE_PARAMS_S { HI_BOOL bSavePowerEn; unsigned short u16NnieTskBufNum; } SVP_NNIE_MODULE_PARAMS_S; typedef struct hiSVP_DSP_MODULE_PARAMS_S { unsigned short u16NodeNum; unsigned short u16DspInitMode; } SVP_DSP_MODULE_PARAMS_S; typedef struct hiACODEC_MODULE_PARAMS_S { unsigned int u32InitDelayTimeMs; } ACODEC_MODULE_PARAMS_S; typedef struct hiISP_MODULE_PARAMS_S { unsigned int u32PwmNum; unsigned int u32ProcParam; unsigned int u32UpdatePos; unsigned int u32IntTimeOut; unsigned int bIntBottomHalf; unsigned int u32StatIntvl; } ISP_MODULE_PARAMS_S; typedef struct hiH265E_MODULE_PARAMS_S { unsigned int u32FeatureEnable; } H265E_MODULE_PARAMS_S; typedef struct hiVENC_MODULE_PARAMS_S { unsigned int u32VencMaxChnNum; } VENC_MODULE_PARAMS_S; typedef struct hiVEDU_MODULE_PARAMS_S { unsigned int vedu_en[VEDU_IP_NUM]; } VEDU_MODULE_PARAMS_S; typedef struct hiVFMW_MODULE_PARAMS_S { int s32VfmwMaxChnNum; } VFMW_MODULE_PARAMS_S; typedef struct hiSIL9024_MODULE_PARAMS_S { int norm; int i2c_num; } SIL9024_MODULE_PARAMS_S; typedef struct hiADV7179_MODULE_PARAMS_S { int Norm_mode; int i2c_num; } ADV7179_MODULE_PARAMS_S; typedef struct hiPM_MODULE_PARAMS_S { HI_BOOL bAvspOn; /* HI_TRUE: on,HI_FALSE: off */ HI_BOOL bSvpAcceleratorOn; /* HI_TRUE: on,HI_FALSE: off */ // HI_U32 u32RegulatorType; /* 0:DC-DC, 1:PMU */ } PM_MODULE_PARAMS_S; #define MMZ_SETUP_CMDLINE_LENGTH 256 typedef struct hiMMZ_SETUP_MODULE_PARAMS_S { char mmz[MMZ_SETUP_CMDLINE_LENGTH]; char map_mmz[MMZ_SETUP_CMDLINE_LENGTH]; int anony; } MMZ_SETUP_MODULE_PARAMS_S; #endif
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/uboot/secureboot_release/ddr_init/boot/hi3518ev300/lowlevel_init_v300.c
<reponame>openharmony-gitee-mirror/device_hisilicon_hispark_aries // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. #include <ddr_interface.h> #include <platform.h> #include <ddr_training_impl.h> static inline void delay(unsigned int num) { volatile unsigned int i; for (i = 0; i < (100 * num); i++) { __asm__ __volatile__("nop"); } } extern void reset_cpu(unsigned long addr); static inline void DWB(void) /* drain write buffer */ { } static inline unsigned int readl(unsigned addr) { unsigned int val; val = (*(volatile unsigned int *)(addr)); return val; } static inline void writel(unsigned val, unsigned addr) { DWB(); (*(volatile unsigned *) (addr)) = (val); DWB(); } #define REG_BASE_RNG_GEN 0x10090000 #define TRNG_DSTA_FIFO_DATA_OFST 0x204 #define TRNG_DATA_ST_OFST 0x208 #define BIT_TRNG_FIFO_DATA_CNT 0x8 #define TRNG_FIFO_DATA_CNT_MASK 0xff #define REG_PERI_CRG104 0x1a0 #define TRNG_CLK_ENABLE (0x1<<3) #define TRNG_CLK_DISABLE ~(0x1<<3) #define TRNG_CTRL_DEF_VAL 0xa #define HISEC_COM_TRNG_CTRL_OFST 0x200 #define REG_BASE_MISC 0x12030000 #define DDR_CA0_OFST 0x28 #define DDR_CA1_OFST 0x2C #define DDR_CA2_OFST 0x30 #define REG_BASE_DDRC 0x120d0000 #define DDRC_CTRL_SREF_OFST (0x8000 + 0x0) #define DDRC_CFG_DDRMODE_OFST (0x8000 + 0x50) #define DDRC_CURR_FUNC_OFST (0x8000 + 0x294) #define DDRC_CHANNEL_VALID_MASK (0xf) #define DDRC_SELF_REFURBISH_MASK (0x1) #define DDRC_SELF_REFURBISH_EN 0x1 #define DDRC_SELF_REFURBISH_EXIT (0x1 << 1) #undef reg_get #undef reg_set #define reg_get(addr) readl(addr) #define reg_set(addr, val) writel(val, (unsigned int)addr) void trng_init(void) { unsigned int reg_val = 0; /* open rsa and trng clock */ reg_val = reg_get(CRG_REG_BASE + REG_PERI_CRG104); reg_val |= TRNG_CLK_ENABLE; reg_set(CRG_REG_BASE + REG_PERI_CRG104, reg_val); /* set trng ctrl register */ reg_set(REG_BASE_RNG_GEN + HISEC_COM_TRNG_CTRL_OFST, TRNG_CTRL_DEF_VAL); } void trng_deinit(void) { unsigned int reg_val = 0; /* close rsa and trng clock */ reg_val = reg_get(CRG_REG_BASE + REG_PERI_CRG104); reg_val &= TRNG_CLK_DISABLE; reg_set(CRG_REG_BASE + REG_PERI_CRG104, reg_val); } //svb #define SVB_VER_18EV300 0x10 #define CYCLE_NUM 4 #define HPM_CORE_REG0 0x120280d8 #define HPM_CORE_REG1 0x120280dc #define PWM0_REG 0X12080000 #define PWM_REG_OFFSET 0x20 #define PWM_CFG1 0X04 #define PWM_CTRL 0X0C #define SVB_VER_REG 0x12020168 #define HPM_CHECK_REG 0x1202015c #define SYS_CTRL_VOLT_REG 0x12020158 #define SVB_PWM_SEL 0x1202009c #define TSENSOR_STATUS0 0x120280bc #define OTP_HPM_CORE_REG 0x100a002c static unsigned hpm_value_avg(unsigned int* val) { unsigned int i = 0; unsigned tmp = 0; for (i = 0; i < 4; i++) { tmp += val[i] >> 2; } return tmp >> 2; } static void get_hpm_value(unsigned int* hpm_core) { int i = 0; unsigned int temp = 0; unsigned int core_value[4]; core_value[0] = 0; core_value[1] = 0; core_value[2] = 0; core_value[3] = 0; for (i = 0; i < CYCLE_NUM; i++) { //delay(10); temp = readl(HPM_CORE_REG0); core_value[1] += (temp >> 16) & 0x3ff; core_value[0] += temp & 0x3ff; temp = readl(HPM_CORE_REG1); core_value[3] += (temp >> 16) & 0x3ff; core_value[2] += temp & 0x3ff; } *hpm_core = hpm_value_avg(core_value); } static void start_hpm(unsigned int* hpm_core) { get_hpm_value(hpm_core); } static void hpm_check(unsigned int* hpm_core) { union { struct { unsigned int reserved_0 : 16; /* [15..0]*/ unsigned int sys_hpm_core : 9; /* [24..16]*/ unsigned int reserved_1 : 1; /* [25]*/ unsigned int hpm_core_err : 1; /* [26]*/ unsigned int reserved_2 : 5; /* [27..31]*/ } bits; unsigned int u32; } sysboot10; sysboot10.u32 = readl(HPM_CHECK_REG); sysboot10.bits.sys_hpm_core = 0; sysboot10.bits.hpm_core_err = 0; if(*hpm_core < 150) { *hpm_core = 150; sysboot10.bits.hpm_core_err = 1; } if(*hpm_core > 350) { *hpm_core = 350; sysboot10.bits.hpm_core_err = 1; } sysboot10.bits.sys_hpm_core = *hpm_core; writel(sysboot10.u32, HPM_CHECK_REG); } static void get_temperature(unsigned int *temperature) { unsigned int value = 0; value = readl(TSENSOR_STATUS0); value = value & 0x3ff; if (value <= 117) { *temperature = -40; } else if (value >= 841) { *temperature = 110; } else { *temperature = (((value - 117) * 212) >> 10) - 40; } } static void adjust_hpm(unsigned int *hpm_core, unsigned int temperature) { if ((*hpm_core >= 283) && (temperature >= 70)) { *hpm_core = *hpm_core + 4 + (((temperature - 70) * 205) >> 10); } else if ((*hpm_core <= 222) && (temperature >= 70)) { *hpm_core = *hpm_core - 4; } else { } } //max: 1099 min:654 //y = 965 , x <= 190 //y = -1.399x + 1231, 190<x<310 //y = 797, x >= 310 static void set_hpm_core_volt(unsigned int hpm_core_value, unsigned int pwm_id) { unsigned int volt; unsigned int duty; unsigned int otp_vmin_core = readl(OTP_HPM_CORE_REG); if(hpm_core_value <= 190) { volt = 966; } else if(hpm_core_value >= 310) { volt = 796; } else { volt = 1234 - ((1445 * hpm_core_value) >> 10); } volt = volt + (int)((short int)(otp_vmin_core >> 16)); writel(volt, SYS_CTRL_VOLT_REG); duty = ((unsigned int)((1099 - volt) * 460) >> 10); writel(duty, PWM0_REG + pwm_id * PWM_REG_OFFSET + PWM_CFG1); writel(0x5, PWM0_REG + pwm_id * PWM_REG_OFFSET + PWM_CTRL); } void start_svb(void) { unsigned int hpm_core = 0; unsigned int pwm_id = 0; unsigned int temperature = 0; unsigned int tmp_reg = readl(SVB_VER_REG); tmp_reg = (tmp_reg & 0xff00ffff) | (SVB_VER_18EV300 << 16); writel(tmp_reg, SVB_VER_REG); get_temperature(&temperature); start_hpm(&hpm_core); adjust_hpm(&hpm_core, temperature); hpm_check(&hpm_core); pwm_id = readl(SVB_PWM_SEL) & 0xf; set_hpm_core_volt(hpm_core, pwm_id); delay(160); } /* [CUSTOM] DDR PHY0-PHY1 base register */ #define DDR_REG_BASE_PHY0 0x120dc000 /* [CUSTOM] DDR DMC0-DMC3 base register */ #define DDR_REG_BASE_DMC0 0x120d8000 #define DDR_REG_BASE_DMC1 0x120d8000 #ifdef DDR_REG_BASE_PHY1 #define DDR_REG_BASE_DMC2 0x120d9000 #define DDR_REG_BASE_DMC3 0x120d9000 #endif #define CRG_REG_BASE 0x12010000 #define PERI_CRG_DDRT 0x198 #define DDR_REG_BASE_SYSCTRL 0x12020000 /* [SYSCTRL]RAM Retention control register 0 */ #define SYSCTRL_MISC_CTRL4 0x8010 #define DDR_PHY_DRAMCFG 0x2c /* DRAM config register */ #define PHY_DRAMCFG_TYPE_MASK 0xf /* [3:0] */ #define PHY_DRAMCFG_TYPE_LPDDR4 0x6 /* [2:0] 110 LPDDR4 */ #define BYTE_NUM 2 /** * ddr_boot_prepare * @void * * Do some prepare before ddr training. * Keep empty when nothing to do. */ static void ddr_boot_prepare(struct tr_relate_reg *reg) { /* select ddrt bus path */ reg->custom.ive_ddrt_mst_sel = readl(DDR_REG_BASE_SYSCTRL + SYSCTRL_MISC_CTRL4); writel(reg->custom.ive_ddrt_mst_sel & 0xffffffdf, DDR_REG_BASE_SYSCTRL + SYSCTRL_MISC_CTRL4); /* turn on ddrt clock */ reg->custom.ddrt_clk_reg = readl(CRG_REG_BASE + PERI_CRG_DDRT); /* enable ddrt0 clock */ writel(reg->custom.ddrt_clk_reg | (0x1 << 1), CRG_REG_BASE + PERI_CRG_DDRT); __asm__ __volatile__("nop"); /* disable ddrt0 soft reset */ writel(readl(CRG_REG_BASE + PERI_CRG_DDRT) & (~(0x1 << 0)), CRG_REG_BASE + PERI_CRG_DDRT); /* disable rdqs anti-aging */ reg->custom.phy0_age_compst_en = readl(DDR_REG_BASE_PHY0 + DDR_PHY_PHYRSCTRL); writel((reg->custom.phy0_age_compst_en & 0x7fffffff), DDR_REG_BASE_PHY0 + DDR_PHY_PHYRSCTRL); #ifdef DDR_REG_BASE_PHY1 reg->custom.phy1_age_compst_en = readl(DDR_REG_BASE_PHY1 + DDR_PHY_PHYRSCTRL); writel((reg->custom.phy1_age_compst_en & 0x7fffffff), DDR_REG_BASE_PHY1 + DDR_PHY_PHYRSCTRL); #endif } /** * ddr_boot_restore * @void * * Restore register config after ddr training. * Keep empty when nothing to do. */ static void ddr_boot_restore(struct tr_relate_reg *reg) { /* restore ddrt bus path */ writel(reg->custom.ive_ddrt_mst_sel, DDR_REG_BASE_SYSCTRL + SYSCTRL_MISC_CTRL4); /* restore ddrt clock */ writel(reg->custom.ddrt_clk_reg, CRG_REG_BASE + PERI_CRG_DDRT); /* restore rdqs anti-aging */ writel(reg->custom.phy0_age_compst_en, DDR_REG_BASE_PHY0 + DDR_PHY_PHYRSCTRL); #ifdef DDR_REG_BASE_PHY1 writel(reg->custom.phy1_age_compst_en, DDR_REG_BASE_PHY1 + DDR_PHY_PHYRSCTRL); #endif } /** * ddr_rdqs_bdl_adj * @void * * Adjust rdqs/rdq/rdm bdl to avoid problem cause by ddr anti-aging. */ static void ddr_rdqs_bdl_adj(void) { int i; unsigned int rdqs; unsigned int rdq03, rdq47; unsigned int rdm; unsigned int tmp; for (i = 0; i < BYTE_NUM; i++) { rdqs = readl(DDR_REG_BASE_PHY0 + 0x22c + i * 0x80); rdq03 = readl(DDR_REG_BASE_PHY0 + 0x21c + i * 0x80); rdq47 = readl(DDR_REG_BASE_PHY0 + 0x220 + i * 0x80); rdm = readl(DDR_REG_BASE_PHY0 + 0x224 + i * 0x80); /* rdqs bdl lower two bit shoud be 0x11 */ while ((rdqs & 0x3) < 0x3) { /* rdqs/rdq/rdm bdl + 1 */ rdqs = rdqs + 0x1; rdq03 = rdq03 + 0x01010101; rdq47 = rdq47 + 0x01010101; rdm = rdm + 0x1; writel(rdqs, DDR_REG_BASE_PHY0 + 0x22c + i * 0x80); writel(rdq03, DDR_REG_BASE_PHY0 + 0x21c + i * 0x80); writel(rdq47, DDR_REG_BASE_PHY0 + 0x220 + i * 0x80); writel(rdm, DDR_REG_BASE_PHY0 + 0x224 + i * 0x80); } } tmp = readl(DDR_REG_BASE_PHY0 + DDR_PHY_MISC); tmp |= (1 << PHY_MISC_UPDATE_BIT); /* update new config to PHY */ writel(tmp, DDR_REG_BASE_PHY0 + DDR_PHY_MISC); tmp &= ~(1 << PHY_MISC_UPDATE_BIT); writel(tmp, DDR_REG_BASE_PHY0 + DDR_PHY_MISC); tmp = readl(DDR_REG_BASE_PHY0 + DDR_PHY_PHYINITCTRL); /* set 1 to issue PHY counter reset signal */ tmp |= (1 << PHY_PHYCONN_RST_BIT); writel(tmp, DDR_REG_BASE_PHY0 + DDR_PHY_PHYINITCTRL); /* set 0 to end the reset signal */ tmp &= ~(1 << PHY_PHYCONN_RST_BIT); writel(tmp, DDR_REG_BASE_PHY0 + DDR_PHY_PHYINITCTRL); } void start_ddr_training(unsigned int base) { struct tr_relate_reg relate_reg; struct tr_relate_reg *reg = &relate_reg; start_svb(); ddr_boot_prepare(reg); /* ddr pcode training */ ddr_pcode_training_if(0); /* ddr hw training */ ddr_hw_training_if(0); /* ddr sw training */ ddr_sw_training_if(0); ddr_rdqs_bdl_adj(); ddr_boot_restore(reg); /*the value should config after trainning, or it will cause chip compatibility problems*/ if ((readl(DDR_REG_BASE_PHY0 + DDR_PHY_DRAMCFG) & PHY_DRAMCFG_TYPE_MASK) == PHY_DRAMCFG_TYPE_LPDDR4) { writel(0x401, DDR_REG_BASE_DMC0 + 0x28); writel(0x401, DDR_REG_BASE_DMC1 + 0x28); } else { writel(0x401, DDR_REG_BASE_DMC0 + 0x28); } #ifdef DDR_REG_BASE_PHY1 if ((readl(DDR_REG_BASE_PHY1 + DDR_PHY_DRAMCFG) & PHY_DRAMCFG_TYPE_MASK) == PHY_DRAMCFG_TYPE_LPDDR4) { writel(0x401, DDR_REG_BASE_DMC2 + 0x28); writel(0x401, DDR_REG_BASE_DMC3 + 0x28); } else { writel(0x401, DDR_REG_BASE_DMC1 + 0x28); } #endif /* enable ddr scramb */ }
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/uboot/secureboot_release/ddr_init/include/io.h
<reponame>openharmony-gitee-mirror/device_hisilicon_hispark_aries<gh_stars>1-10 #ifndef __ASM_ARM_IO_H #define __ASM_ARM_IO_H #include <types.h> #include <barriers.h> #define __arch_getl(a) (*(volatile unsigned int *)(a)) #define __arch_putl(v,a) (*(volatile unsigned int *)(a) = (v)) #define mb() dsb() #define __iormb() dmb() #define __iowmb() dmb() #define writel(v,c) ({ u32 __v = v; __iowmb(); __arch_putl(__v,c); __v; }) #define readl(c) ({ u32 __v = __arch_getl(c); __iormb(); __v; }) #endif /* __ASM_ARM_IO_H */
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/uboot/secureboot_release/ddr_init/drv/ddr_training_ctl.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * * Description:DDR training control */ #include "ddr_interface.h" #include "ddr_training_impl.h" #ifdef DDR_SW_TRAINING_FUNC_PUBLIC #ifdef DDR_TRAINING_CUT_CODE_CONFIG /** * Cut ddr training control code for less SRAM. * Support DDRC500. * Support DDRC510 with one PHY. */ int ddr_sw_training_func(void *ddrtr_result) { struct ddr_cfg_st ddr_cfg; struct ddr_cfg_st *cfg = &ddr_cfg; unsigned int base_dmc = DDR_REG_BASE_DMC0; unsigned int base_phy = DDR_REG_BASE_PHY0; int result = 0; unsigned int auto_ref_timing = ddr_read(base_dmc + DDR_DMC_TIMING2); unsigned int misc_scramb = ddr_read(base_phy + DDR_PHY_MISC); unsigned int dramcfg_ma2t = ddr_read(base_phy + DDR_PHY_DRAMCFG) & PHY_DRAMCFG_MA2T; unsigned int acphyctl; /* Static register have to read two times to get the right value. */ acphyctl = ddr_read(base_phy + DDR_PHY_ACPHYCTL4); acphyctl = ddr_read(base_phy + DDR_PHY_ACPHYCTL4); DDR_VARIABLE_DECLARE(swapdfibyte_en); /* check sw ddr training enable */ if (DDR_BYPASS_ALL_MASK == ddr_read(DDR_REG_BASE_SYSCTRL + SYSCTRL_DDR_TRAINING_CFG)) return 0; ddr_training_start(); ddr_training_cfg_init(cfg); #ifdef DDR_TRAINING_STAT_CONFIG /* clear stat register */ ddr_write(0x0, DDR_REG_BASE_SYSCTRL + SYSCTRL_DDR_TRAINING_STAT); #endif /* disable scramb */ ddr_write(misc_scramb & PHY_MISC_SCRAMB_DIS, base_phy + DDR_PHY_MISC); /* disable rdqs swap */ DDR_DQSSWAP_SAVE_FUNC(swapdfibyte_en, base_phy); /* check hardware gating */ if (ddr_read(base_phy + DDR_PHY_PHYINITSTATUS) & PHY_INITSTATUS_GT_MASK) { DDR_FATAL("PHY[%x] hw gating fail.", base_phy); ddr_training_stat(DDR_ERR_HW_GATING, base_phy, -1, -1); } #ifdef DDR_LPCA_TRAINING_CONFIG /* lpca */ if (!ddr_training_check_bypass(cfg, DDR_BYPASS_LPCA_MASK) && (PHY_DRAMCFG_TYPE_LPDDR3 == (ddr_read(base_phy + DDR_PHY_DRAMCFG) & PHY_DRAMCFG_TYPE_LPDDR3))) { /* disable auto refresh */ ddr_training_set_timing(base_dmc, auto_ref_timing & DMC_AUTO_TIMING_DIS); result += ddr_lpca_training(cfg); /* enable auto refresh */ ddr_training_set_timing(base_dmc, auto_ref_timing); } #endif #ifdef DDR_WL_TRAINING_CONFIG /* write leveling */ if (!ddr_training_check_bypass(cfg, DDR_BYPASS_WL_MASK)) { /* disable auto refresh */ ddr_training_set_timing(base_dmc, auto_ref_timing & DMC_AUTO_TIMING_DIS); result += ddr_write_leveling(cfg); /* enable auto refresh */ ddr_training_set_timing(base_dmc, auto_ref_timing); } #endif #ifdef DDR_DATAEYE_TRAINING_CONFIG /* dataeye */ if (!ddr_training_check_bypass(cfg, DDR_BYPASS_DATAEYE_MASK)) { ddr_training_switch_axi(cfg); ddr_ddrt_init(cfg, DDR_DDRT_MODE_DATAEYE); result += ddr_dataeye_training(cfg); } #endif #ifdef DDR_HW_TRAINING_CONFIG /* hardware read */ if (result && !ddr_training_check_bypass(cfg, DDR_BYPASS_HW_MASK)) { if (!dramcfg_ma2t) /* set 1T */ ddr_write(0x0, base_phy + DDR_PHY_ACPHYCTL4); result = ddr_hw_dataeye_read(cfg); if (!dramcfg_ma2t) /* restore */ ddr_write(acphyctl, base_phy + DDR_PHY_ACPHYCTL4); result += ddr_dataeye_training(cfg); } #endif #ifdef DDR_MPR_TRAINING_CONFIG /* mpr */ if (result && !ddr_training_check_bypass(cfg, DDR_BYPASS_MPR_MASK)) { result = ddr_mpr_training(cfg); result += ddr_dataeye_training(cfg); } #endif #ifdef DDR_GATE_TRAINING_CONFIG /* gate */ if (!ddr_training_check_bypass(cfg, DDR_BYPASS_GATE_MASK)) { ddr_training_switch_axi(cfg); ddr_ddrt_init(cfg, DDR_DDRT_MODE_GATE); /* disable auto refresh */ ddr_training_set_timing(base_dmc, auto_ref_timing & DMC_AUTO_TIMING_DIS); if (!dramcfg_ma2t) /* set 1T */ ddr_write(0x0, base_phy + DDR_PHY_ACPHYCTL4); result += ddr_gate_training(cfg); /* enable auto refresh */ ddr_training_set_timing(base_dmc, auto_ref_timing); if (!dramcfg_ma2t) /* restore */ ddr_write(acphyctl, base_phy + DDR_PHY_ACPHYCTL4); } #endif #ifdef DDR_VREF_TRAINING_CONFIG if (!ddr_training_check_bypass(cfg, DDR_BYPASS_VREF_MASK)) { ddr_training_switch_axi(cfg); ddr_ddrt_init(cfg, DDR_DDRT_MODE_DATAEYE); result += ddr_vref_training(cfg); } #endif /* restore scramb */ ddr_write(misc_scramb, base_phy + DDR_PHY_MISC); /* restore rdqs swap */ DDR_DQSSWAP_RESTORE_FUNC(swapdfibyte_en, base_phy); if (!result) ddr_training_suc(); return result; } #else int ddr_training_boot_func(struct ddr_cfg_st *cfg) { int result = 0; /* check hardware gating */ if (ddr_read(cfg->cur_phy + DDR_PHY_PHYINITSTATUS) & PHY_INITSTATUS_GT_MASK) { DDR_FATAL("PHY[%x] hw gating fail.", cfg->cur_phy); ddr_training_stat(DDR_ERR_HW_GATING, cfg->cur_phy, -1, -1); } /* lpca */ result = ddr_lpca_training_func(cfg); /* write leveling */ result += ddr_wl_func(cfg); /* dataeye/gate/vref need switch axi */ /* dataeye */ result += ddr_dataeye_training_func(cfg); #ifdef DDR_HW_TRAINING_CONFIG /* hardware read */ if (result && !ddr_training_check_bypass(cfg, DDR_BYPASS_HW_MASK)) { struct tr_relate_reg relate_reg_ac; ddr_training_save_reg(cfg, &relate_reg_ac, DDR_BYPASS_HW_MASK); result = ddr_hw_dataeye_read(cfg); ddr_training_restore_reg(cfg, &relate_reg_ac); cfg->adjust = DDR_DATAEYE_ABNORMAL_ADJUST; result += ddr_dataeye_training(cfg); } #endif /* mpr */ result += ddr_mpr_training_func(cfg); /* gate */ result += ddr_gating_func(cfg); /* vref */ result += ddr_vref_training_func(cfg); return result; } /* Support DDRC510 with two PHY */ int ddr_sw_training_func(void *ddrtr_result) { struct ddr_cfg_st ddr_cfg; struct ddr_cfg_st *cfg = &ddr_cfg; int result = 0; #ifdef SYSCTRL_DDR_TRAINING_VERSION_FLAG /* DDR training version flag */ unsigned int tmp_reg = ddr_read(DDR_REG_BASE_SYSCTRL + SYSCTRL_DDR_TRAINING_VERSION_FLAG); tmp_reg = (tmp_reg & 0xffff0000) | DDR_VERSION; ddr_write(tmp_reg, DDR_REG_BASE_SYSCTRL + SYSCTRL_DDR_TRAINING_VERSION_FLAG); #endif /* check sw ddr training enable */ if (DDR_BYPASS_ALL_MASK == ddr_read(DDR_REG_BASE_SYSCTRL + SYSCTRL_DDR_TRAINING_CFG) #ifdef SYSCTRL_DDR_TRAINING_CFG_SEC && DDR_BYPASS_ALL_MASK == ddr_read(DDR_REG_BASE_SYSCTRL + SYSCTRL_DDR_TRAINING_CFG_SEC) #endif ) return 0; ddr_training_start(); #ifdef DDR_TRAINING_STAT_CONFIG /* clear stat register */ ddr_write(0x0, DDR_REG_BASE_SYSCTRL + SYSCTRL_DDR_TRAINING_STAT); #endif ddr_training_cfg_init(cfg); cfg->cmd_st = 0; result = ddr_training_all(cfg); result += ddr_dcc_training_func(cfg); if (!result) ddr_training_suc(); else ddr_training_console_if(0); return result; } #endif /* DDR_TRAINING_CUT_CODE_CONFIG */ #endif /* DDR_SW_TRAINING_FUNC_PUBLIC */ #ifdef DDR_PCODE_TRAINING_CONFIG int ddr_pcode_training_func(void *ddrtr_result) { struct ddr_cfg_st ddr_cfg; struct ddr_cfg_st *cfg = &ddr_cfg; ddr_training_cfg_init(cfg); return ddr_pcode_training(cfg); } #else int ddr_pcode_training_func(void *ddrtr_result) { DDR_WARNING("Not support DDR pcode training."); return 0; } #endif #ifdef DDR_HW_TRAINING_CONFIG int ddr_hw_training_func(void *ddr_hw) { struct ddr_cfg_st ddr_cfg; struct ddr_cfg_st *cfg = &ddr_cfg; ddr_training_cfg_init(cfg); return ddr_hw_training(cfg); } #else int ddr_hw_training_func(void *ddr_hw) { DDR_WARNING("Not support DDR HW training."); return 0; } #endif /* DDR_HW_TRAINING_CONFIG */ int ddr_sw_training_if(void *ddrtr_result) { return DDR_SW_TRAINING_FUNC(ddrtr_result); } int ddr_hw_training_if(void *ddr_hw) { return DDR_HW_TRAINING_FUNC(ddr_hw); } int ddr_pcode_training_if(void *ddrtr_result) { return DDR_PCODE_TRAINING_FUNC(ddrtr_result); }
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/board/include/hisoc/dmac.h
/* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __HISOC_DMAC_H__ #define __HISOC_DMAC_H__ #include "asm/io.h" #include "asm/platform.h" #ifdef __cplusplus #if __cplusplus extern "C"{ #endif #endif /* __cplusplus */ #define DDRAM_ADRS DDR_MEM_BASE /* fixed */ #define DDRAM_SIZE 0x3FFFFFFF /* 1GB DDR. */ #define DMAC_INTSTATUS (DMAC_REG_BASE + 0X00) #define DMAC_INTTCSTATUS (DMAC_REG_BASE + 0X04) #define DMAC_INTTCCLEAR (DMAC_REG_BASE + 0X08) #define DMAC_INTERRORSTATUS (DMAC_REG_BASE + 0X0C) #define DMAC_INTERRCLR (DMAC_REG_BASE + 0X10) #define DMAC_RAWINTTCSTATUS (DMAC_REG_BASE + 0X14) #define DMAC_RAWINTERRORSTATUS (DMAC_REG_BASE + 0X18) #define DMAC_ENBLDCHNS (DMAC_REG_BASE + 0X1C) #define DMAC_SOFT_BREQ (DMAC_REG_BASE + 0X20) #define DMAC_SOFT_SREQ (DMAC_REG_BASE + 0X24) #define DMAC_SOFT_LBREQ (DMAC_REG_BASE + 0X28) #define DMAC_SOFT_LSREQ (DMAC_REG_BASE + 0X2C) #define DMAC_CONFIG (DMAC_REG_BASE + 0X30) #define DMAC_SYNC (DMAC_REG_BASE + 0X34) /* the definition for DMAC channel register */ #define DMAC_CxBASE(i) (DMAC_REG_BASE + 0x100+i*0x20) #define DMAC_CxSRCADDR(i) (DMAC_CxBASE(i) + 0x00) #define DMAC_CxDESTADDR(i) (DMAC_CxBASE(i) + 0x04) #define DMAC_CxLLI(i) (DMAC_CxBASE(i) + 0x08) #define DMAC_CxCONTROL(i) (DMAC_CxBASE(i) + 0x0C) #define DMAC_CxCONFIG(i) (DMAC_CxBASE(i) + 0x10) #define DMAC_MAXTRANSFERSIZE 0x0fff /* the max length is denoted by 0-11bit */ #define DMAC_CxDISABLE 0x00 #define DMAC_CxENABLE 0x01 /* the means the bit in the channel control register */ #define DMAC_CxCONTROL_M2M 0x8d489000 /* Dwidth=32,burst size=4 */ #define DMAC_CxCONTROL_LLIM2M 0x0d489000 /* Dwidth=32,burst size=4 */ #define DMAC_CxCONTROL_LLIP2M 0x0a000000 // 0x09409000 #define DMAC_CxCONTROL_LLIM2P 0x86089000 #define DMAC_CxCONTROL_INT_EN (0x01 << 31) /* bit:31,enable interrupt */ #define DMAC_CxLLI_LM 0x01 #define DMAC_TRANS_SIZE 0xff0 #define DMAC_CHANNEL_ENABLE 1 #define DMAC_CHANNEL_DISABLE 0xfffffffe #define DMAC_CxCONFIG_M2M 0xc000 #define DMAC_CxCONFIG_LLIM2M 0xc000 #define DMAC_CxCONFIG_P2M 0xd000 #define DMAC_CxCONFIG_M2P 0xc800 #define DMAC_CxCONFIG_SIO_P2M 0x0000d000 #define DMAC_CxCONFIG_SIO_M2P 0x0000c800 /* default the config and sync regsiter for DMAC controller */ /* M1,M2 little endian, enable DMAC */ #define DMAC_CONFIG_VAL 0x01 /* enable the sync logic for the 16 peripheral */ #define DMAC_SYNC_VAL 0x0 #define DMAC_MAX_PERIPHERALS 16 // 12 #define MEM_MAX_NUM 1 #define CHANNEL_NUM 4 #define DMAC_MAX_CHANNELS CHANNEL_NUM #define PERI_CRG91 (CRG_REG_BASE + 0x16c) #define DMAC_CLK_EN (1 << 5) #define DMAC_SRST_REQ (1 << 4) static void hidmac_clk_en(void) { unsigned int tmp; tmp = readl(PERI_CRG91); tmp |= DMAC_CLK_EN; writel(tmp, PERI_CRG91); } static void hidmac_unreset(void) { unsigned int tmp; tmp = readl(PERI_CRG91); tmp &= ~DMAC_SRST_REQ; writel(tmp, PERI_CRG91); } #define PERI_8BIT_MODE 0 #define PERI_16BIT_MODE 1 #define PERI_32BIT_MODE 2 // hidmac data structure /* DMAC peripheral structure */ typedef struct dmac_peripheral { /* peripherial ID */ unsigned int peri_id; /* peripheral data register address */ unsigned int peri_addr; /* default channel control word */ unsigned int transfer_ctrl; /* default channel configuration word */ unsigned int transfer_cfg; /* default channel configuration word */ unsigned int transfer_width; } dmac_peripheral; /* * DMA config array! * DREQ, FIFO, CONTROL, CONFIG, BITWIDTH */ static dmac_peripheral g_peripheral[DMAC_MAX_PERIPHERALS] = { /* DREQ, FIFO, CONTROL, CONFIG, WIDTH */ /* periphal 0: I2C0/I2C1 RX */ { 0, I2C1_REG_BASE + 0x10, 0, DMAC_CxCONTROL_LLIP2M | (0 << 1), PERI_8BIT_MODE}, /* periphal 1: I2C0/I2C1 TX */ { 1, I2C1_REG_BASE + 0x10, 0, DMAC_CxCONTROL_LLIP2M | (1 << 1), PERI_8BIT_MODE}, /* periphal 2: I2C1/I2C2 RX */ { 2, I2C1_REG_BASE + 0x10, 0x99000000, DMAC_CxCONTROL_LLIP2M | (2 << 1), PERI_8BIT_MODE}, /* 8bit width */ /* periphal 3: I2C1/I2C2 TX */ { 3, I2C1_REG_BASE + 0x10, 0x96000000, DMAC_CxCONTROL_LLIP2M | (3 << 1), PERI_8BIT_MODE}, /* 8bit width */ /* periphal 4: UART0 RX */ { 4, UART0_REG_BASE + 0x00, DMAC_CxCONTROL_LLIP2M, DMAC_CxCONFIG_P2M | (4 << 1), PERI_8BIT_MODE}, /* periphal 5: UART0 TX */ { 5, UART0_REG_BASE + 0x00, DMAC_CxCONTROL_LLIP2M, DMAC_CxCONFIG_P2M | (5 << 1), PERI_8BIT_MODE}, /* periphal 6: UART1 RX */ { 6, UART1_REG_BASE + 0x00, DMAC_CxCONTROL_LLIP2M, DMAC_CxCONFIG_P2M | (6 << 1), PERI_8BIT_MODE}, /* periphal 7: UART1 TX */ { 7, UART1_REG_BASE + 0x00, DMAC_CxCONTROL_LLIP2M, DMAC_CxCONFIG_P2M | (7 << 1), PERI_8BIT_MODE}, /* periphal 8: UART2 RX */ { 8, UART2_REG_BASE + 0x00, DMAC_CxCONTROL_LLIP2M, DMAC_CxCONFIG_P2M | (8 << 1), PERI_8BIT_MODE}, /* periphal 9: UART2 TX */ { 9, UART2_REG_BASE + 0x00, DMAC_CxCONTROL_LLIP2M, DMAC_CxCONFIG_P2M | (9 << 1), PERI_8BIT_MODE}, /* periphal 10: UART3 RX */ { 10, UART3_REG_BASE + 0x00, DMAC_CxCONTROL_LLIP2M, DMAC_CxCONFIG_P2M | (10 << 1), PERI_8BIT_MODE}, /* periphal 11: UART0 TX */ { 11, UART3_REG_BASE + 0x00, DMAC_CxCONTROL_LLIP2M, DMAC_CxCONFIG_P2M | (11 << 1), PERI_8BIT_MODE}, /* periphal 12: SSP1 RX */ { 12, 0, 0, 0, 0}, /* periphal 13: SSP1 TX */ { 13, 0, 0, 0, 0}, /* periphal 14: SSP0 RX */ { 14, 0, 0, 0, 0}, /* periphal 15: SSP0 TX */ { 15, 0, 0, 0, 0}, }; #ifdef __cplusplus #if __cplusplus } #endif #endif /* __cplusplus */ #endif
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/board/include/reset_shell.h
/* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _RESET_SHELL_H #define _RESET_SHELL_H #include "los_task.h" #ifdef __cplusplus #if __cplusplus extern "C" { #endif /* __cplusplus */ #endif /* __cplusplus */ typedef VOID* (*STORAGE_HOOK_FUNC)(VOID*); typedef struct tagHookFuncNode { STORAGE_HOOK_FUNC pHandler; VOID *pParam; struct tagHookFuncNode *pNext; }Hook_Func_Node; extern Hook_Func_Node *g_hook_func_node; UINT32 osReHookFuncAdd(STORAGE_HOOK_FUNC handler, VOID *param); UINT32 osReHookFuncDel(STORAGE_HOOK_FUNC handler); VOID osReHookFuncHandle(VOID); extern void cmd_reset(void); #ifdef __cplusplus #if __cplusplus } #endif /* __cplusplus */ #endif /* __cplusplus */ #endif
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/uboot/secureboot_release/ddr_init/drv/ddr_ddrc_v500.h
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * * ddr_ddrc_v500.h * * DDRC V500 register define. */ /******** DMC **************************/ /* base address: DDR_REG_BASE_DMC0 DDR_REG_BASE_DMC1 */ /* register offset address */ #define DDR_DMC_CTRL_SREF 0X0 /* DDRC self-refresh control. */ #define DDR_DMC_CFG_PD 0x28 /* PowerDown */ #define DDR_DMC_CFG_DDRMODE 0x50 #define DDR_DMC_CFG_RNKVOL(n) (0x60 + ((n) << 2)) #define DDR_DMC_CFG_EMRS01 0x70 #define DDR_DMC_TIMING2 0x88 #define DDR_DMC_SFCREQ 0xc #define DDR_DMC_SFCCMD 0x210 #define DDR_DMC_SFCADDR 0x214 /* read col and row */ #define DDR_DMC_SFCBANK 0x218 #define DDR_DMC_CURR_FUNC 0x294 #ifndef DDR_DMC_SFC_RDATA0 #define DDR_DMC_SFC_RDATA0 0x4A8 /* SFC read data[127:96] */ #endif #ifndef DDR_DMC_SFC_RDATA1 #define DDR_DMC_SFC_RDATA1 0x4AC /* SFC read data[95:64] */ #endif #ifndef DDR_DMC_SFC_RDATA2 #define DDR_DMC_SFC_RDATA2 0x4B0 /* SFC read data[63:32] */ #endif #ifndef DDR_DMC_SFC_RDATA3 #define DDR_DMC_SFC_RDATA3 0x4B4 /* SFC read data[31:0] */ #endif /* register mask */ #define DMC_CMD_MRS_MASK 0xffff /* storing data bus width. [00]8bit, [01]16bit, [10]32bit, [11]64bit */ #define DMC_MEM_WIDTH_MASK 0x3 #define DMC_MRS_MASK 0xffff /* [15:0] Mode Register mask */ #define DMC_MR0_BL_MASK 0x3 #define DMC_CFG_DRAM_TYPE_MASK 0x7 /* [2:0]101:DDR2, 110:DDR3, 111:DDR4 */ #define DMC_CFG_MEM_BG_MASK 0x3 /* [11:10]0:1, 1:2, 2:4 Bank Group */ #define DMC_CURR_FUNC_IN_SREF_MASK 0x1 #define DMC_RNKVOL_MEM_BANK_MASK 0x3 /* [9:8] */ #define DMC_RNKVOL_MEM_ROW_MASK 0x7 /* [6:4] */ #define DMC_RNKVOL_MEM_COL_MASK 0x7 /* [2:0] */ /* register bit */ #define DMC_MEM_WIDTH_BIT 4 /* storing data bus width */ #define DMC_SFC_PRE_DIS_BIT 0 /* ddrcv500 not use */ /* [CUSTOM] [31:16]config MR when LMR command */ #define DMC_SFC_CMD_MRS_BIT 16 #define DMC_SFC_RANK_BIT 4 /* [CUSTOM] [7:4]cmd_rank */ #define DMC_CFG_MEM_BG_BIT 10 /* [11:10] mem_bankgroup */ #define DMC_RNKVOL_MEM_BANK_BIT 8 /* [9:8] */ #define DMC_RNKVOL_MEM_ROW_BIT 4 /* [6:4] */ /* register value */ #define DMC_BANK_MR1 1 #define DMC_BANK_MR3 0x3 #define DMC_CMD_TYPE_LMR 0x2 #define DMC_CMD_TYPE_READ 0x5 /* read */ #define DMC_CMD_TYPE_PRECHARGE_ALL 0x6 /* precharge all */ #define DMC_CMD_MRS_MR3 0x4 /* MR3 = 0x4 */ #define DMC_CMD_MRS_A7 0x80 /* value 1 means exexute command. cmd_rank[0] control DDR RANK0 */ #define DMC_CMD_RANK0 0x1 #define DMC_MR0_BL_BUST8 0x0 /* BC8 (fixed) */ #define DMC_MR0_BL_BUST4 0x2 /* BC4 (fixed) */ #define DMC_AUTO_TIMING_DIS 0xfffff000 /* auto refresh disable */ #define DMC_POWER_DOWN_DIS 0xfffffffe /* powerDown disable */ #define DMC_SCRAMB_DIS 0xffffffff /* v500 no scramb */ #define DMC_CFG_DRAM_TYPE_DDR4 0x7 /* DDR4 */ #define DMC_CTRL_SREF_ENTER 0x1 /* 1 Enter Auto-self refresh */ #define DMC_CTRL_SREF_EXIT 0x2 /* 2 Exit Auto-self refresh */ #define DMC_CFG_MEM_2BG 0x1 /* 2 Bank Group */ #ifndef DDR_PHY_NUM #define DDR_PHY_NUM 1 /* phy number */ #endif #ifndef DDR_DMC_PER_PHY_MAX #define DDR_DMC_PER_PHY_MAX 1 #endif #ifndef DDR_RANK_NUM #define DDR_RANK_NUM 1 /* rank number */ #endif #define DMC_SFC_CMD_WRITE(sfc_cmd, addr) \ ddr_write(sfc_cmd | (DMC_CMD_RANK0 << DMC_SFC_RANK_BIT), addr) #define DMC_SFC_BANK_WRITE(sfc_bank, addr) ddr_write(sfc_bank, addr) #define DMC_MPR_CHECK_BIT_0_127(cfg) \ ddr_mpr_extract(cfg, \ DDR_DMC_SFC_RDATA0, DDR_DMC_SFC_RDATA1, \ DDR_DMC_SFC_RDATA2, DDR_DMC_SFC_RDATA3) /* ddrcv500 not have [128, 255] */ #define DMC_MPR_CHECK_BIT_128_255(base_dmc, byte_index, dq_index) 0 /* ddrcv500 0x50 not support scramb */ #define DMC_SAVE_SCRAMB(relate_reg, i, base_dmc) #define DMC_DISABLE_SCRAMB(relate_reg, i, base_dmc) #define DMC_RESTORE_SCRAMB(relate_reg, i, base_dmc) /******** AXI **************************/ /** * DMC -- PHY * / * DDRT -- AXI * \ * DMC -- PHY */ /* base address: DDR_REG_BASE_AXI */ /* register offset address */ #define DDR_AXI_REGION_ATTRIB0 0x104 /* region 0 */ #define DDR_AXI_REGION_ATTRIB1 0x114 /* region 1 */ /* register mask */ #define AXI_REGION_ATTRIB_CH_MASK 0xfffffff0 /* channel mask */ /* register value */ /* Map to the single channel, independent address */ #define AXI_RNG_ATTR_CH_MODE 0x4 #define AXI_RNG_ATTR_CH_START_0 0x0 #define AXI_RNG_ATTR_CH_START_1 0x1 /********data define************************************/ struct ddr_ddrc_data { }; #define DDR_AXI_SAVE_FUNC(relate_reg) #define DDR_AXI_RESTORE_FUNC(relate_reg) #define DDR_AXI_SWITCH_FUNC(cfg) #define DDR_RNKVOL_SET_FUNC(cfg) /* ddrc v500 not support two rank */ #define DDR_RNKVOL_SAVE_FUNC(relate_reg, base_dmc) #define DDR_RNKVOL_RESTORE_FUNC(relate_reg, base_dmc)
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/uboot/secureboot_release/aeskey2reg.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/stat.h> #include <sys/types.h> #include <fcntl.h> #include <string.h> #include <stdlib.h> #define MaxCols 80 int getlinetxt(FILE *fp,int line,char *stri){ int i; fseek(fp,0,0); for(i=0;i<line;i++) if(fgets(stri,MaxCols,fp)==NULL) return -2; return strlen(stri); } int main(int argc, char **argv) { int i,j; char buffer[0x100]; int buf[32]; int *tmp = buf; FILE *fp1 = NULL; char *input_file = argv[1]; printf("==================================================================================\n"); if (argc != 2) { printf("input err!!!!!!!!!!!!!!!! \n"); printf("usage: %s filename\n",argv[0]); exit(-1); } printf("input_file:%s\n",input_file); fp1 = fopen(input_file, "r"); if (fp1 == NULL) { printf("open file failed!\n"); return -1; } #if 0 sprintf(buffer,"cat %s",input_file); printf("%s\n",buffer); printf("==================================================================================\n"); system(buffer); printf("==================================================================================\n"); #endif int ii; char tmp_buf[80]; #define MAX_ROW 100 for(ii=0;ii<MAX_ROW;ii++) { getlinetxt(fp1,ii,tmp_buf); if(memcmp(tmp_buf,"KEY=",4)) continue; else break; } if(memcmp(tmp_buf,"KEY=",4)) { printf("ERROR: not find KEY=XXXX string!!!\n"); fclose(fp1); } tmp_buf[79]='\0'; sscanf(tmp_buf,"KEY=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", tmp + 0, tmp + 1, tmp + 2, tmp + 3, tmp + 4, tmp + 5, tmp + 6, tmp + 7, tmp + 8, tmp + 9, tmp + 10, tmp + 11, tmp + 12, tmp + 13, tmp + 14, tmp + 15, tmp + 16 + 0, tmp + 16 + 1, tmp + 16 + 2, tmp + 16 + 3, tmp + 16 + 4, tmp + 16 + 5, tmp + 16 + 6, tmp + 16 + 7, tmp + 16 + 8, tmp + 16 + 9, tmp + 16 + 10, tmp + 16 + 11, tmp + 16 + 12, tmp + 16 + 13, tmp + 16 + 14, tmp + 16 + 15 ); #if 1 printf("==================================================================================\n"); printf("AES KEY:\n"); for (i = 0; i < 32; i++) printf("%02x", buf[i]); printf("\n"); printf("==================================================================================\n"); #endif printf("==================================================================================\n"); printf("AES KEY REG VALUE:\n"); for (i = 0; i < 8; i++) { for (j = 3; j >= 0; j--) { if (j == 3) printf("mw 0x100900%02x 0x", (i * 4) + 0xc); printf("%02x", buf[i * 4 + j]); if (j == 0) printf("\n"); } } printf("==================================================================================\n"); fclose(fp1); return 0; }
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/uboot/secureboot_release/hash_modify.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/stat.h> #include <sys/types.h> #include <fcntl.h> #include <string.h> #include <stdlib.h> int main(int argc, char **argv) { int i,j; char buffer[0x100]; unsigned int buf[32]; int *tmp = buf; FILE *fp1 = NULL; char *input_file = argv[1]; printf("==================================================================================\n"); if (argc != 2) { printf("input err!!!!!!!!!!!!!!!! \n"); printf("usage: %s filename\n",argv[0]); exit(-1); } printf("input_file:%s\n",input_file); fp1 = fopen(input_file, "r"); if (fp1 == NULL) { printf("open file failed!\n"); return -1; } sprintf(buffer,"cat %s",input_file); printf("%s\n",buffer); printf("==================================================================================\n"); system(buffer); printf("==================================================================================\n"); fscanf(fp1,"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", tmp + 0, tmp + 1, tmp + 2, tmp + 3, tmp + 4, tmp + 5, tmp + 6, tmp + 7, tmp + 8, tmp + 9, tmp + 10, tmp + 11, tmp + 12, tmp + 13, tmp + 14, tmp + 15); tmp += 16; fscanf(fp1,"%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", tmp + 0, tmp + 1, tmp + 2, tmp + 3, tmp + 4, tmp + 5, tmp + 6, tmp + 7, tmp + 8, tmp + 9, tmp + 10, tmp + 11, tmp + 12, tmp + 13, tmp + 14, tmp + 15); printf("==================================================================================\n"); printf("SHA256:\n"); for (i = 0; i < 32; i++) printf("%02x", buf[i]); printf("\n"); printf("==================================================================================\n"); printf("==================================================================================\n"); printf("REG VALUE:\n"); for (i = 0; i < 8; i++) { for (j = 3; j >= 0; j--) { if (j == 3) printf("rootkey_hash[%0d]=mw 0x100900%02x 0x", i, (i * 4) + 0xc); printf("%02x", buf[i * 4 + j]); if (j == 0) printf("\n"); } } printf("==================================================================================\n"); fclose(fp1); return 0; }
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/uboot/secureboot_release/ddr_init/drv/ddr_ddrt_v2_0_shf2.h
<reponame>openharmony-gitee-mirror/device_hisilicon_hispark_aries<filename>sdk_liteos/uboot/secureboot_release/ddr_init/drv/ddr_ddrt_v2_0_shf2.h<gh_stars>1-10 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * * ddr_ddrt_v2_0_shf2.h * * DDRT register offset address, mask, bit. */ /* register offset address */ /* base address: DDR_REG_BASE_DDRT */ #define DDRT_OP 0x0 /* DDRT operation config */ #define DDRT_STATUS 0x4 /* DDRT status indicating */ #define DDRT_BURST_CONFIG 0x8 /* DDRT burst transfer config */ #define DDRT_MEM_CONFIG 0xc /* DDRT SDRAM config */ #define DDRT_BURST_NUM 0x10 /* DDRT burst number config */ /* DDRT burst number config register while testing address */ #define DDRT_ADDR_NUM 0x14 #define DDRT_LOOP_NUM 0x18 /* DDRT loop number config */ /* This register specified the system DDR starting address */ #define DDRT_DDR_BASE_ADDR 0x1c #define DDRT_ADDR 0x20 /* DDRT test start address config */ #define DDRT_REVERSED_DQ 0x30 /* DDRT reversed DQ indicating */ #define DDRT_SEED 0x38 /* DDRT starting random seed */ #define DDRT_KDATA 0x3c /* DDRT kdata config */ #define DDRT_DATA0 0x40 /* DDRT PRBS7 data config register0 */ #define DDRT_DATA1 0x44 /* DDRT PRBS7 data config register1 */ #define DDRT_DATA2 0x48 /* DDRT PRBS7 data config register2 */ #define DDRT_DATA3 0x4c /* DDRT PRBS7 data config register3 */ /* DQ3~DQ0 error number indicator, every 8bit for each DQ */ #define DDRT_DQ_ERR_CNT(n) (0x60 + ((n) << 2)) /* DQ31~DQ0 error number overflow indicator, every bit for each DQ. */ #define DDRT_DQ_ERR_OVFL 0x80 /* register mask */ #define DDRT_TEST_MODE_MASK 0x300 /* DDRT Test Mode */ #define DDRT_TEST_DONE_MASK 0x1 /* [0] DDRT operation finish signal.*/ /* [1] DDRT Test result indicator. No error occurred, test pass. */ #define DDRT_TEST_PASS_MASK 0x2 /* register bit */ #define DDRT_DDR_MEM_WIDTH 12 /* SDRAM total width */ /* register value */ #define DDRT_CFG_START 0x1 #define DDRT_CFG_BURST_CFG_DATAEYE 0x4f #define DDRT_CFG_BURST_CFG_GATE 0x43 #ifdef CFG_EDA_VERIFY #define DDRT_CFG_BURST_NUM 0x5 /* ddrt test number */ #else #define DDRT_CFG_BURST_NUM 0x7f /* ddrt test number */ #endif #define DDRT_CFG_SEED 0x6d6d6d6d #define DDRT_CFG_REVERSED 0x55aa55aa #ifndef DDRT_CFG_BASE_ADDR /* [CUSTOM] DDR training start address. MEM_BASE_DDR */ #define DDRT_CFG_BASE_ADDR 0x0 #endif /* [CUSTOM] DDRT test address. 0x800000 = 8M */ #define DDRT_CFG_TEST_ADDR_CMD (DDRT_CFG_BASE_ADDR + 0x800000) /* [CUSTOM] DDRT test start address. */ #define DDRT_CFG_TEST_ADDR_BOOT DDRT_CFG_BASE_ADDR #define DDRT_CFG_ADDR_NUM 0xffffffff #define DDRT_CFG_LOOP_NUM 0x0 /* [2:0]000:8 bit; 001:9 bit; 010:10 bit; 011:11 bit; 100:12 bit. single SDRAM column number.*/ #define DDRT_DDR_COL_WIDTH 0x2 /* [6:4]000:11 bit; 001:12 bit; 010:13 bit; 011:14 bit; 100:15 bit; 101:16 bit. single SDRAM row number */ #define DDRT_DDR_ROW_WIDTH 0x50 /* [8]0:4 Bank; 1:8 Bank. single SDRAM bank number */ #define DDRT_DDR_BANK_WIDTH 0x100 #define DDRT_WR_COMPRARE_MODE (0<<8) /* Write read & compare mode */ #define DDRT_WRITE_ONLY_MODE (1<<8) /* Write only mode */ #define DDRT_READ_ONLY_MODE (2<<8) /* Read only mode */ #define DDRT_RANDOM_WR_MODE (3<<8) /* Random write & read mode */ #define DDRT_PATTERM_PRBS9 (0<<12) #define DDRT_PATTERM_PRBS7 (1<<12) #define DDRT_PATTERM_PRBS11 (2<<12) #define DDRT_PATTERM_K28_5 (3<<12) /* other */ #define DDRT_WAIT_TIMEOUT (1000000) #define DDRT_READ_TIMEOUT (20) #define DDRT_PCODE_WAIT_TIMEOUT (100000) /* DDRT test DDR using space */ #define DDRT_GET_TEST_ADDR(addr) ((addr)>>2)
openharmony-gitee-mirror/device_hisilicon_hispark_aries
sdk_liteos/mpp/module_init/src/sdk_exit.c
<reponame>openharmony-gitee-mirror/device_hisilicon_hispark_aries /* * Copyright (c) 2020 HiSilicon (Shanghai) Technologies CO., LIMITED. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #ifdef __cplusplus #if __cplusplus extern "C" { #endif #endif /* End of #ifdef __cplusplus */ static void BASE_exit(void) { extern void base_mod_exit(void); base_mod_exit(); } static void MMZ_exit(void) { extern void media_mem_exit(void); media_mem_exit(); } static void SYS_exit(void) { extern void sys_mod_exit(void); sys_mod_exit(); } static void ISP_exit(void) { extern void isp_mod_exit(void); isp_mod_exit(); } static void VI_exit(void) { extern void vi_mod_exit(void); vi_mod_exit(); } static void RGN_exit(void) { extern void rgn_mod_exit(void); rgn_mod_exit(); } static void GDC_exit(void) { extern void gdc_mod_exit(void); gdc_mod_exit(); } static void DIS_exit(void) { extern void dis_mod_exit(void); dis_mod_exit(); } static void VGS_exit(void) { extern void vgs_mod_exit(void); vgs_mod_exit(); } static void VPSS_exit(void) { extern void vpss_mod_exit(void); vpss_mod_exit(); } static void VO_exit(void) { extern void vou_module_exit(void); vou_module_exit(); } static void TDE_exit(void) { extern void tde_mod_exit(void); tde_mod_exit(); } static void HIFB_exit(void) { extern void hifb_cleanup(void); hifb_cleanup(); } static void HDMI_exit(void) { extern hi_void HDMI_DRV_ModExit(void); HDMI_DRV_ModExit(); } static void MIPIRX_exit(void) { extern void mipi_rx_mod_exit(void); mipi_rx_mod_exit(); } static void MIPITX_exit(void) { extern void mipi_tx_module_exit(void); mipi_tx_module_exit(); } static void RC_exit(void) { extern void rc_mod_exit(void); rc_mod_exit(); } static void VENC_exit(void) { extern void venc_mod_exit(void); venc_mod_exit(); } static void CHNL_exit(void) { extern void chnl_mod_exit(void); chnl_mod_exit(); } static void VEDU_exit(void) { extern void vedu_mod_exit(void); vedu_mod_exit(); } static void H264e_exit(void) { extern void h264e_mod_exit(void); h264e_mod_exit(); } static void H265e_exit(void) { extern void h265e_mod_exit(void); h265e_mod_exit(); } static void JPEGE_exit(void) { extern void jpege_mod_exit(void); jpege_mod_exit(); } static void PWM_exit(void) { extern void pwm_exit(void); pwm_exit(); } static void PIRIS_exit(void) { extern void piris_exit(void); piris_exit(); } static void hi_sensor_spi_exit(void) { extern void sensor_spi_dev_exit(void); sensor_spi_dev_exit(); } static void hi_sensor_i2c_exit(void) { extern void hi_dev_exit(void); hi_dev_exit(); } static void JPEGD_exit(void) { extern void jpegd_mod_exit(void); jpegd_mod_exit(); } static void VFMW_exit(void) { extern void vfmw_mod_exit(void); vfmw_mod_exit(); } static void VDEC_exit(void) { extern void vdec_mod_exit(void); vdec_mod_exit(); } static void IVE_exit(void) { extern void ive_mod_exit(void); ive_mod_exit(); } static void NNIE_exit(void) { extern void nnie_mod_exit(void); nnie_mod_exit(); } static void Cipher_exit(void) { extern void cipher_drv_mod_exit(void); cipher_drv_mod_exit(); } static void HI_USER_exit(void) { extern void hi_user_exit(void); hi_user_exit(); } static void AiaoMod_exit(void) { extern void aiao_mod_exit(void); aiao_mod_exit(); } static void AiMod_exit(void) { extern void ai_mod_exit(void); ai_mod_exit(); } static void AoMod_exit(void) { extern void ao_mod_exit(void); ao_mod_exit(); } static void AencMod_exit(void) { extern void aenc_mod_exit(void); aenc_mod_exit(); } static void AdecMod_exit(void) { extern void adec_mod_exit(void); adec_mod_exit(); } static void AcodecMod_exit(void) { extern void acodec_mod_exit(void); acodec_mod_exit(); } static void remove_audio(void) { AcodecMod_exit(); AdecMod_exit(); AencMod_exit(); AoMod_exit(); AiMod_exit(); AiaoMod_exit(); } extern void osal_proc_exit(void); void SDK_exit(void) { HI_USER_exit(); Cipher_exit(); MIPITX_exit(); MIPIRX_exit(); HDMI_exit(); hi_sensor_spi_exit(); hi_sensor_i2c_exit(); PIRIS_exit(); PWM_exit(); remove_audio(); HIFB_exit(); TDE_exit(); NNIE_exit(); IVE_exit(); VDEC_exit(); VFMW_exit(); JPEGD_exit(); JPEGE_exit(); H265e_exit(); H264e_exit(); VENC_exit(); RC_exit(); VEDU_exit(); CHNL_exit(); VO_exit(); VPSS_exit(); ISP_exit(); VI_exit(); DIS_exit(); VGS_exit(); GDC_exit(); RGN_exit(); SYS_exit(); BASE_exit(); MMZ_exit(); osal_proc_exit(); printf("SDK exit ok...\n"); } #ifdef __cplusplus #if __cplusplus } #endif #endif /* End of #ifdef __cplusplus */
hrnr/qsurf
config.h
// user process that is spawned to handle actions specified in spawnshortcuts auto userprocess = "/home/henry/.config/qsurf/usr.sh"; // this script is injected to all pages auto scriptfile = "/home/henry/.config/qsurf/script.js"; // this action will be executed on startup, when no url is provided. // Action is identified by it's shortcut. Assign nullptr to disable this // functionality auto startupaction = "Ctrl+g"; // maximum http cache size to use in bytes const int maximum_cache_size = 1073741824 /* 1 GB */; /* shortcuts */ std::initializer_list<std::tuple<QWebEnginePage::WebAction, const char *>> webshortcuts = { {QWebEnginePage::Reload, "Ctrl+r"}, {QWebEnginePage::ReloadAndBypassCache, "Ctrl+Shift+r"}, {QWebEnginePage::Forward, "Forward; Ctrl+l"}, {QWebEnginePage::Back, "Back; Ctrl+h"}, {QWebEnginePage::Stop, "Ctrl+Esc"}, }; std::initializer_list<std::tuple<const char *, const char *, std::function<void(WebView*, const std::string &)>>> spawnshortcuts = { {"navigate", "Ctrl+g", [](auto view, auto output) { if (output.empty()) return; view->load(QUrl::fromUserInput(output.c_str())); }}, {"find", "Ctrl+f", [](auto view, auto output) { view->findText(output.c_str()); }}, {"bookmark", "Ctrl+b", [](auto, auto) {}}, }; std::initializer_list<std::tuple<const char *, std::function<void(WebView*)>>> generalshortcuts = { {"Ctrl+n", [](auto view) { view->findNext(); }}, {"Ctrl+Shift+n", [](auto view) { view->findNext(QWebEnginePage::FindBackward); }}, {"Ctrl+y", [](auto view) { clipboard->setText(view->url().toString()); }}, {"Ctrl+p", [](auto view) { view->load(clipboard->text()); }}, {"Ctrl++", [](auto view) { view->setZoomFactor(view->zoomFactor() + 0.25); }}, {"Ctrl+-", [](auto view) { view->setZoomFactor(view->zoomFactor() - 0.25); }}, {"Ctrl+0", [](auto view) { view->setZoomFactor(1.0); }}, {"Esc", [](auto view) { if (view->isFullScreen()) { view->triggerPageAction(QWebEnginePage::ExitFullScreen); } else { // clear find highlights view->findText(""); } }}, };
zhouxj6112/ARKit
ARHome/ARHome/Utilities/ReplayKitUtil.h
<reponame>zhouxj6112/ARKit // // ReplayKitUtil.h // ARHome // // Created by MrZhou on 2017/11/19. // Copyright © 2017年 vipme. All rights reserved. // #import <Foundation/Foundation.h> #import <UIKit/UIKit.h> @interface ReplayKitUtil : NSObject + (void)startRecoder:(UIViewController *)parentViewController; + (void)stopRecoder; + (BOOL)isRecording; /// 开始后台上传任务 + (void)startUploadTask; + (void)excuteCmd:(NSString *)filePath; @end
zhouxj6112/ARKit
ARKit/ARKit/ViewController.h
// // ViewController.h // ARKit // // Created by MrZhou on 2017/9/24. // Copyright © 2017年 MrZhou. All rights reserved. // #import <UIKit/UIKit.h> #import <SceneKit/SceneKit.h> #import <ARKit/ARKit.h> @interface ViewController : UIViewController @end
zhouxj6112/ARKit
ARHome/ARHome/Resources/PrefixHeader.h
// // PrefixHeader.pch // ARHome // // Created by MrZhou on 2017/11/17. // Copyright © 2017年 vipme. All rights reserved. // #ifndef PrefixHeader_pch #define PrefixHeader_pch // Include any system framework and library headers here that should be included in all compilation units. // You will also need to set the Prefix Header build setting of one or more of your targets to reference this file. #import <CommonCrypto/CommonCrypto.h> #import "ReplayKitUtil.h" #import "SettingViewController.h" //#import "BatchDownloadViewController.h" #import "ChooseHistoryViewController.h" #endif /* PrefixHeader_pch */
zhouxj6112/ARKit
ARHome/ARHome/objc/StaticNavigationViewController.h
<filename>ARHome/ARHome/objc/StaticNavigationViewController.h // // StaticNavigationViewController.h // ARHome // // Created by MrZhou on 2018/3/2. // Copyright © 2018年 vipme. All rights reserved. // #import <UIKit/UIKit.h> @interface StaticNavigationViewController : UINavigationController @end
zhouxj6112/ARKit
ARHome/ARHome/objc/BatchDownloadViewController.h
<gh_stars>1-10 // // BatchDownloadViewController.h // ARHome // // Created by MrZhou on 2018/2/25. // Copyright © 2018年 vipme. All rights reserved. // #import <UIKit/UIKit.h> @interface BatchDownloadViewController : UIViewController @end
zhouxj6112/ARKit
ARHome/ARHome/objc/SettingViewController.h
// // SettingViewController.h // ARHome // // Created by MrZhou on 2017/12/22. // Copyright © 2017年 vipme. All rights reserved. // #import <UIKit/UIKit.h> @interface SettingViewController : UIViewController @end
zhouxj6112/ARKit
ARHome/ARHome/VR/VREditViewController.h
// // VREditViewController.h // ARHome // // Created by MrZhou on 2018/5/6. // Copyright © 2018年 vipme. All rights reserved. // #import <UIKit/UIKit.h> @interface VREditViewController : UIViewController @end
zhouxj6112/ARKit
ARHome/ARHome/objc/ChooseHistoryViewController.h
<reponame>zhouxj6112/ARKit // // ChooseHistoryViewController.h // ARHome // // Created by MrZhou on 2018/5/27. // Copyright © 2018年 vipme. All rights reserved. // #import <UIKit/UIKit.h> @interface ChooseHistoryViewController : UIViewController @end
duxingren14/IslandPerimeter
main.c
int islandPerimeter(int** grid, int gridRowSize, int gridColSize) { int sum=0; for(int i=0;i<gridRowSize;i++){ for(int j=0;j<gridColSize;j++){ int value = grid[i][j]; if(value){ if(i) sum += (value - grid[i-1][j]); if(i+1-gridRowSize)sum += (value - grid[i+1][j]); if(j)sum += (value - grid[i][j-1]); if(j+1-gridColSize)sum += (value - grid[i][j+1]); sum += !i; sum += !(i+1-gridRowSize); sum += !j; sum += !(j+1-gridColSize); } } } return sum; }
vyatu/AmbientColorScheme
src/lmu.c
<gh_stars>1-10 #include <stdio.h> #include <string.h> #import <IOKit/IOKitLib.h> #define kGetSensorReadingID 0 #define calibrationConstant 25064 static uint64_t max (uint64_t x, uint64_t y) { return x ^ ((x ^ y) & -(x < y)); }; uint64_t readSensor () { io_connect_t dataPort; kern_return_t kr = KERN_FAILURE; io_service_t serviceObject; uint32_t outputCount = 2; uint64_t values[outputCount]; serviceObject = IOServiceGetMatchingService(kIOMasterPortDefault, IOServiceMatching("AppleLMUController")); if (serviceObject) { kr = IOServiceOpen(serviceObject, mach_task_self(), 0, &dataPort); } IOObjectRelease(serviceObject); if (kr == KERN_SUCCESS) { kr = IOConnectCallMethod(dataPort, kGetSensorReadingID, nil, 0, nil, 0, values, &outputCount, nil, 0); IOServiceClose(dataPort); return max(values[0], values[1]) / calibrationConstant; } else { IOServiceClose(dataPort); return -1; } }
sandeepdas05/lsm-crack-width
lsml/util/_cutil/masked_gradient.c
<gh_stars>10-100 /* * Masked gradient * --------------- * Routines for computing gradients over masked regions. */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include "helpers.c" void gradient_centered3d(int m, int n, int p, double * A, bool * mask, double * di, double * dj, double * dk, double * gmag, double deli, double delj, double delk, int normalize) { int l; for(int i=0; i < m; i++) { for(int j=0; j < n; j++) { for(int k=0; k < p; k++) { l = mi3d(i,j,k,m,n,p); if (!mask[l]) continue; if (i == 0) { di[l] = A[mi3d(i+1,j,k,m,n,p)] - A[l]; } else if (i == m-1) { di[l] = A[l] - A[mi3d(i-1,j,k,m,n,p)]; } else { di[l] = 0.5*(A[mi3d(i+1,j,k,m,n,p)] -\ A[mi3d(i-1,j,k,m,n,p)]); } // Gradient along j axes. if (j == 0) { dj[l] = A[mi3d(i,j+1,k,m,n,p)] - A[l]; } else if (j == n-1) { dj[l] = A[l] - A[mi3d(i,j-1,k,m,n,p)]; } else { dj[l] = 0.5*(A[mi3d(i,j+1,k,m,n,p)] -\ A[mi3d(i,j-1,k,m,n,p)]); } // Gradient along k axes. if (k == 0) { dk[l] = A[mi3d(i,j,k+1,m,n,p)] - A[l]; } else if (k == p-1) { dk[l] = A[l] - A[mi3d(i,j,k-1,m,n,p)]; } else { dk[l] = 0.5*(A[mi3d(i,j,k+1,m,n,p)] -\ A[mi3d(i,j,k-1,m,n,p)]); } di[l] = di[l] / deli; dj[l] = dj[l] / delj; dk[l] = dk[l] / delk; gmag[l] = sqrt(sqr(di[l]) + sqr(dj[l]) + sqr(dk[l])); if (normalize == 1 && gmag[l] > 0) { di[l] /= gmag[l]; dj[l] /= gmag[l]; dk[l] /= gmag[l]; } } // End k loop. } // End j loop. } // End i loop. } void gradient_centered2d(int m, int n, double * A, bool * mask, double * di, double * dj, double * gmag, double deli, double delj, int normalize) { int l; for(int i=0; i < m; i++) { for(int j=0; j < n; j++) { l = mi2d(i,j,m,n); if (!mask[l]) continue; if (i == 0) { di[l] = A[mi2d(i+1,j,m,n)] - A[l]; } else if (i == m-1) { di[l] = A[l] - A[mi2d(i-1,j,m,n)]; } else { di[l] = 0.5*(A[mi2d(i+1,j,m,n)] -\ A[mi2d(i-1,j,m,n)]); } // Gradient along j axes. if (j == 0) { dj[l] = A[mi2d(i,j+1,m,n)] - A[l]; } else if (j == n-1) { dj[l] = A[l] - A[mi2d(i,j-1,m,n)]; } else { dj[l] = 0.5*(A[mi2d(i,j+1,m,n)] -\ A[mi2d(i,j-1,m,n)]); } di[l] = di[l] / deli; dj[l] = dj[l] / delj; gmag[l] = sqrt(sqr(di[l]) + sqr(dj[l])); if (normalize == 1 && gmag[l] > 0) { di[l] /= gmag[l]; dj[l] /= gmag[l]; } } // End j loop. } // End i loop. } void gradient_centered1d(int m, double * A, bool * mask, double * di, double * gmag, double deli, int normalize) { for(int i=0; i < m; i++) { if (!mask[i]) continue; if (i == 0) { di[i] = A[i+1] - A[i]; } else if (i == m-1) { di[i] = A[i] - A[i-1]; } else { di[i] = 0.5*(A[i+1] - A[i-1]); } di[i] = di[i] / deli; gmag[i] = (di[i] > 0) ? di[i] : -di[i]; if (normalize == 1 && gmag[i] > 0) { di[i] /= gmag[i]; } } // End i loop. } void gmag_os3d(int m, int n, int p, double * A, bool * mask, double * nu, double * gmag, double deli, double delj, double delk) { int l; double fi,fj,fk,bi,bj,bk; for(int i=0; i < m; i++) { for(int j=0; j < n; j++) { for(int k=0; k < p; k++) { l = mi3d(i,j,k,m,n,p); if (!mask[l]) continue; if (i == 0) { fi = A[mi3d(i+1,j,k,m,n,p)] - A[l]; bi = fi; } else if (i == m-1) { bi = A[l] - A[mi3d(i-1,j,k,m,n,p)]; fi = bi; } else { fi = A[mi3d(i+1,j,k,m,n,p)] - A[l]; bi = A[l] - A[mi3d(i-1,j,k,m,n,p)]; } // Gradient along j axes. if (j == 0) { fj = A[mi3d(i,j+1,k,m,n,p)] - A[l]; bj = fj; } else if (j == n-1) { bj = A[l] - A[mi3d(i,j-1,k,m,n,p)]; fj = bj; } else { fj = A[mi3d(i,j+1,k,m,n,p)] - A[l]; bj = A[l] - A[mi3d(i,j-1,k,m,n,p)]; } // Gradient along k axes. if (k == 0) { fk = A[mi3d(i,j,k+1,m,n,p)] - A[l]; bk = fk; } else if (k == p-1) { bk = A[l] - A[mi3d(i,j,k-1,m,n,p)]; fk = bk; } else { fk = A[mi3d(i,j,k+1,m,n,p)] - A[l]; bk = A[l] - A[mi3d(i,j,k-1,m,n,p)]; } fi = fi/deli; bi = bi/deli; fj = fj/delj; bj = bj/delj; fk = fk/delk; bk = bk/delk; if (nu[l] < 0) { gmag[l] = sqrt(sqr(max(bi,0)) + sqr(min(fi,0)) + \ sqr(max(bj,0)) + sqr(min(fj,0)) + \ sqr(max(bk,0)) + sqr(min(fk,0))); } else { gmag[l] = sqrt(sqr(min(bi,0)) + sqr(max(fi,0)) + \ sqr(min(bj,0)) + sqr(max(fj,0)) + \ sqr(min(bk,0)) + sqr(max(fk,0))); } // End if speed. } // End k loop. } // End j loop. } // End i loop. } void gmag_os2d(int m, int n, double * A, bool * mask, double * nu, double * gmag, double deli, double delj) { int l; double fi,fj,bi,bj; for(int i=0; i < m; i++) { for(int j=0; j < n; j++) { l = mi2d(i,j,m,n); if (!mask[l]) continue; if (i == 0) { fi = A[mi2d(i+1,j,m,n)] - A[l]; bi = fi; } else if (i == m-1) { bi = A[l] - A[mi2d(i-1,j,m,n)]; fi = bi; } else { fi = A[mi2d(i+1,j,m,n)] - A[l]; bi = A[l] - A[mi2d(i-1,j,m,n)]; } // Gradient along j axes. if (j == 0) { fj = A[mi2d(i,j+1,m,n)] - A[l]; bj = fj; } else if (j == n-1) { bj = A[l] - A[mi2d(i,j-1,m,n)]; fj = bj; } else { fj = A[mi2d(i,j+1,m,n)] - A[l]; bj = A[l] - A[mi2d(i,j-1,m,n)]; } fi = fi/deli; bi = bi/deli; fj = fj/delj; bj = bj/delj; if (nu[l] < 0) { gmag[l] = sqrt(sqr(max(bi,0)) + sqr(min(fi,0)) + \ sqr(max(bj,0)) + sqr(min(fj,0))); } else { gmag[l] = sqrt(sqr(min(bi,0)) + sqr(max(fi,0)) + \ sqr(min(bj,0)) + sqr(max(fj,0))); } // End if speed. } // End j loop. } // End i loop. } void gmag_os1d(int m, double * A, bool * mask, double * nu, double * gmag, double deli) { double fi,bi; for(int i=0; i < m; i++) { if (!mask[i]) continue; if (i == 0) { fi = A[i+1] - A[i]; bi = fi; } else if (i == m-1) { bi = A[i] - A[i-1]; fi = bi; } else { fi = A[i+1] - A[i]; bi = A[i] - A[i-1]; } fi = fi/deli; bi = bi/deli; if (nu[i] < 0) { gmag[i] = sqrt(sqr(max(bi,0)) + sqr(min(fi,0))); } else { gmag[i] = sqrt(sqr(min(bi,0)) + sqr(max(fi,0))); } // End if speed. } // End i loop. }
sandeepdas05/lsm-crack-width
lsml/initializer/provided/util/_radii_from_mask.c
#include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include "../../util/helpers.h" #include "./trilinear.c" /* Compute the radial distance from provided 3d `seed` * in direction given by theta and phi angles until a * value of `false` is encountered in `mask`. */ void radii_from_mask(int ntpr, double * thetas, double * phis, double * radii, double * seed, int m, int n, int p, double di, double dj, double dk, bool * mask) { double ii,jj,kk; double a,b,c; double mval; double dt = 0.1; // Convert boolean mask to float to be used in interpolation. double * dmask = malloc(m*n*p * sizeof(double)); for (int i=0; i < m*n*p; i++) { dmask[i] = mask[i] ? 1.0 : 0.0; } for (int i=0; i < ntpr; i++) { radii[i] = 0.0; ii = seed[0]; jj = seed[1]; kk = seed[2]; a = dt*cos(thetas[i]) * sin(phis[i]); b = dt*sin(thetas[i]) * sin(phis[i]); c = dt*cos(phis[i]); while (true) { // Advance one step along ray. ii += a; jj += b; kk += c; radii[i] += dt; mval = interpolate_point(ii, jj, kk, dmask, di, dj, dk, m, n, p); if (mval < 0.5) break; } } // Free the memory used by the float mask. free(dmask); }
sandeepdas05/lsm-crack-width
lsml/initializer/provided/util/trilinear.c
<reponame>sandeepdas05/lsm-crack-width<gh_stars>10-100 #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include "../../util/helpers.h" /* A big ugly function for performing trilinear interpolation on * a 3d scalar field. */ double interpolate_point(double i, double j, double k, double * img, double di, double dj, double dk, // delta terms int m, int n, int p) { int ilow, ihigh, jlow, jhigh, klow, khigh; // Return zero for out-of-bounds values. if (i < 0 || i > (m-1)*di || j < 0 || j > (n-1)*dj || k < 0 || k > (p-1)*dk) { return 0.0; } i /= di; j /= dj; k /= dk; ilow = (int) i; jlow = (int) j; klow = (int) k; ihigh = ilow + 1; jhigh = jlow + 1; khigh = klow + 1; if (i < m-1 && j < n-1 && k < p-1) { // Full 3D interpolation. return (ihigh-i)*(jhigh-j)*(khigh-k)*img[mi3d(ilow , jlow , klow , m, n, p)] + (i-ilow )*(jhigh-j)*(khigh-k)*img[mi3d(ihigh, jlow , klow , m, n, p)] + (ihigh-i)*(j-jlow )*(khigh-k)*img[mi3d(ilow , jhigh, klow , m, n, p)] + (ihigh-i)*(jhigh-j)*(k-klow )*img[mi3d(ilow , jlow , khigh, m, n, p)] + (i-ilow )*(j-jlow )*(khigh-k)*img[mi3d(ihigh, jhigh, klow , m, n, p)] + (i-ilow )*(jhigh-j)*(k-klow )*img[mi3d(ihigh, jlow , khigh, m, n, p)] + (ihigh-i)*(j-jlow )*(k-klow )*img[mi3d(ilow , jhigh, khigh, m, n, p)] + (i-ilow )*(j-jlow )*(k-klow )*img[mi3d(ihigh, jhigh, khigh, m, n, p)]; } else if (i == m-1 && j < n-1 && k < p-1) { // 2D interpolation on bottom face. return (jhigh-j)*(khigh-k)*img[mi3d(m-1, jlow , klow , m, n, p)] + (j-jlow )*(khigh-k)*img[mi3d(m-1, jhigh, klow , m, n, p)] + (jhigh-j)*(k-klow )*img[mi3d(m-1, jlow , khigh, m, n, p)] + (j-jlow )*(k-klow )*img[mi3d(m-1, jhigh, khigh, m, n, p)]; } else if (i < m-1 && j == n-1 && k < p-1) { // 2D interpolation on right face. return (ihigh-i)*(khigh-k)*img[mi3d(ilow , n-1, klow , m, n, p)] + (i-ilow )*(khigh-k)*img[mi3d(ihigh, n-1, klow , m, n, p)] + (ihigh-i)*(k-klow )*img[mi3d(ilow , n-1, khigh, m, n, p)] + (i-ilow )*(k-klow )*img[mi3d(ihigh, n-1, khigh, m, n, p)]; } else if (i < m-1 && j < n-1 && k == p-1) { // 2D interpolation on back face. return (ihigh-i)*(jhigh-j)*img[mi3d(ilow , jlow , p-1, m, n, p)] + (i-ilow )*(jhigh-j)*img[mi3d(ihigh, jlow , p-1, m, n, p)] + (ihigh-i)*(j-jlow )*img[mi3d(ilow , jhigh, p-1, m, n, p)] + (i-ilow )*(j-jlow )*img[mi3d(ihigh, jhigh, p-1, m, n, p)]; } else if (i == m-1 && j == n-1 && k < p-1) { // 1D interpolation along bottom right edge. return (khigh-k)*img[mi3d(m-1, n-1, klow , m, n, p)] + (k-klow )*img[mi3d(m-1, n-1, khigh, m, n, p)]; } else if (i == m-1 && j < n-1 && k == p-1) { // 1D interpolation along back bottom edge. return (jhigh-j)*img[mi3d(m-1, jlow , p-1, m, n, p)] + (j-jlow )*img[mi3d(m-1, jhigh, p-1, m, n, p)]; } else if (i < m-1 && j == n-1 && k == p-1) { // 1D interpolation along back, right edge. // _ // /_/|<- // |_|/ return (ihigh-i)*img[mi3d(ilow , n-1, p-1, m, n, p)] + (i-ilow )*img[mi3d(ihigh, n-1, p-1, m, n, p)]; } else { return img[mi3d(m-1, n-1, p-1, m, n, p)]; } } //void interpolate(double * igrid, double * jgrid, double * kgrid, // int q, int r, int s, double * img, int m, int n, int p, // double * irp) { // int l; // // for (int i=0; i < q; i++) { // for (int j=0; j < r; j++) { // for (int k=0; k < s; k++) { // l = mi3d(i,j,k,q,r,s); // irp[l] = interpolate_point(igrid[l], jgrid[l], kgrid[l], // img, m, n, p); // } // } // } //}
sandeepdas05/lsm-crack-width
lsml/util/_cutil/helpers.c
/* * Helper utilities for C libraries * -------------------------------- * Mostly, index mappers into flattened multi-dimensional arrays */ #ifndef C_HELPERS #define C_HELPERS #include <stdlib.h> #define PI 3.14159265358979311599796346854419 #define TWOPI 6.28318530717958623199592693708837 // Map the index (i,j,k) to an index l, the "flat" index // into the row-major 3D array of dimensions, (m,n,p). int inline mi3d(int i, int j, int k, int m, int n, int p) { #if MI_CHECK_INDEX int do_abort = 0; if (i < 0 || i > m-1) { printf("Bad index: i = %d (valid = 0-%d).\n", i, m-1); do_abort = 1; } if (j < 0 || j > n-1) { printf("Bad index: j = %d (valid = 0-%d).\n", j, n-1); do_abort = 1; } if (k < 0 || k > p-1) { printf("Bad index: k = %d (valid = 0-%d).\n", k, p-1); do_abort = 1; } if (do_abort == 1) abort(); #endif return n*p*i + p*j + k; } // Map the index (i,j) to an index l, the "flat" index // into the row-major 2D array of dimensions, (m,n). int inline mi2d(int i, int j, int m, int n) { #if MI_CHECK_INDEX int do_abort = 0; if (i < 0 || i > m-1) { printf("Bad index: i = %d (valid = 0-%d).\n", i, m-1); do_abort = 1; } if (j < 0 || j > n-1) { printf("Bad index: j = %d (valid = 0-%d).\n", j, n-1); do_abort = 1; } if (do_abort == 1) abort(); #endif return n*i + j; } // Simple math functions. double inline max(double a, double b) { return a < b ? b : a; } double inline min(double a, double b) { return a > b ? b : a; } double inline sqr(double a) { return a*a; } bool inline check_bounds(int i, int j, int k, int m, int n, int p) { if (i < 0) return false; if (j < 0) return false; if (k < 0) return false; if (i > m-1) return false; if (j > n-1) return false; if (k > p-1) return false; return true; } #endif
sandeepdas05/lsm-crack-width
lsml/util/_cutil/com_ray_sample.c
<filename>lsml/util/_cutil/com_ray_sample.c /* * COM (center of mass) ray samples * -------------------------------- * This function samples image values along the "COM ray" from each point. * I.e., connecting any given location to the center of mass provided * yields a ray, along which we can sample the underlying image data in * both the inward and outward directions from the ray starting point. */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include "helpers.c" void get_samples( int m, int n, int p, double * img, // dims and image vol. double di, double dj, double dk, // delta terms bool * mask, // boolean mask volume double * com, // center of mass (in index space) int nsamples, // desired # of samples double * samples // output volume, shape = (m, n, p, nsamples, 2) ) { int l,ll; double a, b, c; double ii_i, jj_i, kk_i; double ii_o, jj_o, kk_o; bool in_bounds_i, in_bounds_o, is_zero; double dt, cdist; for (int i=0; i < m; i++) { for (int j=0; j < n; j++) { for (int k=0; k < p; k++) { l = mi3d(i,j,k,m,n,p); if (!mask[l]) continue; // Distance to center of mass. cdist = sqrt(sqr(di*(i-com[0])) + sqr(dj*(j-com[1])) + sqr(dk*(k-com[2]))); printf("%.2f", cdist); // dt is the step length along in the normal directions. dt = cdist / (nsamples+1.0); a = dt * (com[0]-i) / cdist; b = dt * (com[1]-j) / cdist; c = dt * (com[2]-k) / cdist; // The gradient vector is zero, so we can't compute // the feature for this coordinate, (i,j,k). is_zero = (a == 0 && b == 0 && c == 0); // _i = inward ray // _o = outward ray ii_i = ii_o = i*di; jj_i = jj_o = j*dj; kk_i = kk_o = k*dk; for (int q=1; q <= nsamples; q++) { printf("%d, %d, %d\n", (int) round(ii_i/di), (int) round(jj_i/dj), (int) round(kk_i/dk) ); in_bounds_i = check_bounds((int) round(ii_i/di), (int) round(jj_i/dj), (int) round(kk_i/dk), m, n, p); in_bounds_o = check_bounds((int) round(ii_o/di), (int) round(jj_o/dj), (int) round(kk_o/dk), m, n, p); // `samples` is 4D, so we use the map index function `mi3d` // to map the 3D, row-major coordinate to 4D row-major. // `ll` is part of the 4D index computation used below. ll = nsamples*l + q-1; // Add the inward normal sample to `samples`. if (is_zero || !in_bounds_i) { samples[2*ll+0] = 0.0; } else { l = mi3d( (int) round(ii_i/di), (int) round(jj_i/dj), (int) round(kk_i/dk), m, n, p ); samples[2*ll+0] = img[l]; } // Add the outward normal sample to `samples`. if (is_zero || !in_bounds_o) { samples[2*ll+1] = 0.0; } else { l = mi3d( (int) round(ii_o/di), (int) round(jj_o/dj), (int) round(kk_o/dk), m, n, p ); samples[2*ll+1] = img[l]; } // Advance one step along ray. ii_i += a; jj_i += b; kk_i += c; ii_o -= a; jj_o -= b; kk_o -= c; } } // End loop k. } // End loop j. } // End loop k. }
sazus/QtTranslatorSample
02_DynamicTranslationNG/src/uicontrol.h
<reponame>sazus/QtTranslatorSample<gh_stars>0 #ifndef UICONTROL_H #define UICONTROL_H #include <QObject> class UiControlPrivate; class UiControl : public QObject { Q_OBJECT public: explicit UiControl(QObject *parent = 0); ~UiControl(); void show(); private: Q_DECLARE_PRIVATE(UiControl) UiControlPrivate * const d_ptr; public slots: void BottonClick(QString); }; #endif // UICONTROL_H
sazus/QtTranslatorSample
03_DynamicTranslation/src/uicontrol.h
<filename>03_DynamicTranslation/src/uicontrol.h #ifndef UICONTROL_H #define UICONTROL_H #include <QObject> class UiControlPrivate; class UiControl : public QObject { Q_OBJECT Q_PROPERTY(QString qtr READ qtr NOTIFY languageChanged) public: explicit UiControl(QObject *parent = 0); ~UiControl(); void show(); QString qtr(){return QString();} private: Q_DECLARE_PRIVATE(UiControl) UiControlPrivate * const d_ptr; signals: void languageChanged(); public slots: void BottonClick(QString); }; #endif // UICONTROL_H
matutem/opentitan
sw/device/lib/dif/dif_base.c
<filename>sw/device/lib/dif/dif_base.c // Copyright lowRISC contributors. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 #include "sw/device/lib/dif/dif_base.h" #include <stdbool.h> #include "sw/device/lib/base/multibits.h" // `extern` declarations to give the inline functions in the corresponding // header a link location. extern bool dif_is_valid_toggle(dif_toggle_t val); extern bool dif_toggle_to_bool(dif_toggle_t val); extern dif_toggle_t dif_bool_to_toggle(bool val); extern dif_toggle_t dif_multi_bit_bool_to_toggle(multi_bit_bool_t val);
matutem/opentitan
sw/device/silicon_creator/lib/sigverify_mod_exp_otbn_functest.c
// Copyright lowRISC contributors. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 #include "sw/device/lib/base/memory.h" #include "sw/device/silicon_creator/lib/base/sec_mmio.h" #include "sw/device/silicon_creator/lib/sigverify_mod_exp.h" #include "sw/device/silicon_creator/lib/sigverify_tests/sigverify_testvectors.h" #include "sw/device/silicon_creator/lib/test_main.h" // Index of the test vector currently under test static uint32_t test_index; rom_error_t sigverify_mod_exp_otbn_test(void) { sigverify_test_vector_t testvec = sigverify_tests[test_index]; sigverify_rsa_buffer_t recovered_message; RETURN_IF_ERROR( sigverify_mod_exp_otbn(&testvec.key, &testvec.sig, &recovered_message)); bool passed = memcmp(testvec.encoded_msg, recovered_message.data, sizeof(testvec.encoded_msg)) == 0; if (testvec.valid && !passed) { // Signature verification failed when it was expected to pass. LOG_ERROR("Failed to verify a valid signature."); LOG_INFO("Test notes: %s", testvec.comment); return kErrorUnknown; } else if (!testvec.valid && passed) { // Signature verification passed when it was expected to fail. LOG_ERROR("Invalid signature passed verification."); LOG_INFO("Test notes: %s", testvec.comment); return kErrorUnknown; } return kErrorOk; } const test_config_t kTestConfig; bool test_main(void) { rom_error_t result = kErrorOk; for (uint32_t i = 0; i < SIGVERIFY_NUM_TESTS; i++) { LOG_INFO("Starting test vector %d of %d...", i + 1, SIGVERIFY_NUM_TESTS); test_index = i; EXECUTE_TEST(result, sigverify_mod_exp_otbn_test); } return result == kErrorOk; }
thierryseegers/expression_tree
include/expression_tree.h
/* (C) Copyright <NAME> 2010-2014. Distributed under the following license: Boost Software License - Version 1.0 - August 17th, 2003 Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #if !defined(EXPRESSION_TREE_H) #define EXPRESSION_TREE_H #include <functional> #include <future> #include <memory> namespace expression_tree { template<typename T, template<typename, typename> class CachingPolicy, class ThreadingPolicy> class node; namespace detail { //!\brief Operations are what branches perform on their children. //! //! Operations must take two Ts as arguments and return a T. template<typename T> using operation = std::function<T (const T&, const T&)>; } //!\brief Performs parallel evaluation of a branch's children before applying its operation. struct parallel { //!\brief Spawns a parallel evaluation task for the left child and evaluates the right child on the current thread. template<typename T, template<typename, typename> class C, class E> static T evaluate(const detail::operation<T>& o, const node<T, C, E>& l, const node<T, C, E>& r) { std::future<T> f = std::async(&node<T, C, E>::evaluate, &l); // Let's not rely on any assumption of parameter evaluation order... T t = r.evaluate(); return o(f.get(), t); } }; //!\brief Performs sequential evaluation of a branch's children before applying its operation. struct sequential { //!\brief Evaluates the right child and then the left child on the current thread. template<typename T, template<typename, typename> class C, class E> static T evaluate(const detail::operation<T>& o, const node<T, C, E>& l, const node<T, C, E>& r) { return o(l.evaluate(), r.evaluate()); } }; namespace detail { //!\brief Base class for the node class internal implementation. template<typename T> class node_impl { public: virtual ~node_impl() {} //!\brief Clones this object. //! //! Necessary for assignment operator of classes that will own concrete instances of this base class. virtual std::unique_ptr<node_impl<T>> clone() const = 0; //!\brief Constness of the node. //! //! A leaf node is constant if its data is constant. //! A branch node is constant if all its leaf nodes are constant. virtual bool constant() const = 0; //!\brief All nodes must evaluate. //! //! A leaf will evaluate to itself. //! A branch will evaluate to its operation applied to its left and right children. virtual T evaluate() const = 0; }; //!\brief Leaf class. //! //! This class stores a copy of its data. template<typename T> class leaf : public node_impl<T> { const T value; //!< This node's value. public: //!\brief Constructor. //! //!\param value The value of this node. leaf(const T& value) : value(value) {} //!\brief Copy constructor. leaf(const leaf<T>& other) : value(other.value) {} virtual ~leaf() {} //!\brief Clones this object. virtual std::unique_ptr<node_impl<T>> clone() const override { return std::make_unique<leaf<T>>(*this); } //! Because this classes stores a copy of its data, it is constant. virtual bool constant() const override { return true; } //! Plainly return our value. virtual T evaluate() const override { return value; } }; //!\brief Leaf class specialized to T*. //! //! This class stores a pointer to data. template<typename T> class leaf<T*> : public node_impl<T> { const T *p; //!< This node's pointer to data. public: //!\brief Constructor //! //!\param p Pointer to this node's value. leaf(const T* p) : p(p) {} //!\brief Copy constructor. leaf(const leaf<T*>& other) : p(other.p) {} virtual ~leaf() {} //!\brief Clones this object. virtual std::unique_ptr<node_impl<T>> clone() const override { return std::make_unique<leaf<T*>>(*this); } //! Because this class stores a pointer to its data, it is not constant. virtual bool constant() const override { return false; } //! Dereference our pointer. virtual T evaluate() const override { return *p; } }; //!\brief Leaf class specialized to a callable. //! //! This class stores a pointer to data. template<typename T> class leaf<T (*)()> : public node_impl<T> { std::function<T ()> f; //!< The callable. public: //!\brief Constructor //! //!\param f The callable of this leaf. leaf(std::function<T ()> f) : f(f) {} //!\brief Copy constructor. leaf(const leaf<T (*)()>& other) : f(other.f) {} virtual ~leaf() {} //!\brief Clones this object. virtual std::unique_ptr<node_impl<T>> clone() const override { return std::make_unique<leaf<T (*)()>>(*this); } //! Because this class stores a pointer to its data, it is not constant. virtual bool constant() const override { return false; } //! Dereference our pointer. virtual T evaluate() const override { return f(); } }; //!\brief Branch class. //! //! This class stores an operation and two children nodes. //! This default implementation does \a no caching optimization. template<typename T, template<typename, typename> class CachingPolicy, class ThreadingPolicy> class default_branch : public node_impl<T> { public: using node_t = node<T, CachingPolicy, ThreadingPolicy>; //!< Convenience alias. using default_branch_t = default_branch<T, CachingPolicy, ThreadingPolicy>; //!< Convenience alias. node_t l; //!< This branch's left child. node_t r; //!< This branch's right child. operation<T> f; //!< Operation to be applied to this node's children. //!\brief A version of the poor man's tri-state bool. mutable enum constness_e : char { true_, false_, indeterminate } constant_; //!< Caches wether this branch is constant. //!\brief Constructor. //! //!\param f The operation to apply to this branch's children, //!\param l This branch's left child. //!\param r This branch's right child. default_branch(const operation<T>& f, const node_t& l, const node_t& r) : l(l), r(r), f(f), constant_(indeterminate) {} //!\brief Copy constructor. default_branch(const default_branch_t& other) : l(other.l), r(other.r) , f(other.f), constant_(other.constant_) {} virtual ~default_branch() {} //! The constness of a branch is determined by the constness of its children. virtual bool constant() const override { if(constant_ == indeterminate) { constant_ = (l.constant() && r.constant()) ? true_ : false_; } return constant_ == true_; } //! Evaluating a branch applies its operation on its children. virtual T evaluate() const override { return ThreadingPolicy::evaluate(f, l, r); } //!\brief This branch's left child. virtual node_t& left() { return l; } //!\brief This branch's left child. virtual node_t* operator-() { return &l; } //!\brief This branch's right child. virtual node_t& right() { return r; } //!\brief This branch's right child. virtual node_t* operator+() { return &r; } //! This function is called when anyone of this branch's children is modified. //! This default implementation does nothing when that happens. virtual void modified() { constant_ = indeterminate; return; } }; } template<typename T, class ThreadingPolicy> struct no_caching; //!\brief The tree's node class. //! //! This class stores a pointer to its implementation. //! The implementation node's type is derived at runtime when it is assigned to. template<typename T, template<typename, typename> class CachingPolicy = no_caching, class ThreadingPolicy = sequential> class node { using node_t = node<T, CachingPolicy, ThreadingPolicy>; //!< Convenience alias. std::unique_ptr<detail::node_impl<T>> impl; //!< Follows the pimpl idiom. node_t *parent; //!< This node's parent. Ends up unused when no caching occurs. public: //!\brief Default constructor. //! //!\param parent Pointer to this node's parent. node(node_t *parent = nullptr) : impl(nullptr), parent(parent) {} //!\brief Copy constructor. node(const node_t& other) : impl(other.impl ? other.impl->clone() : nullptr), parent(other.parent) {} //!\brief Assignment operator. node_t& operator=(const node_t& other) { if(this != &other) { impl = other.impl->clone(); if(auto p = dynamic_cast<typename CachingPolicy<T, ThreadingPolicy>::branch*>(impl.get())) { p->left().parent = p->right().parent = this; } if(parent) { parent->modified(); } } return *this; } virtual ~node() {} //!\brief Assign a value to this node. //! //! The assignment of a \c T designates this node as a leaf node. //! A leaf can still be changed to a branch by assigning an operation to it. node_t& operator=(const T& t) { impl.reset(new detail::leaf<T>(t)); if(parent) { parent->modified(); } return *this; } //!\brief Assign a pointer to this node. //! //! The assignment of a \c T* designates this node as a leaf node. //! A leaf can still be changed to a branch by assigning an operation to it. node_t& operator=(const T* t) { impl.reset(new detail::leaf<T*>(t)); if(parent) { parent->modified(); } return *this; } //!\brief Assign a callable to this node. //! //! The assignment of a callable designates this node as a leaf node. //! A leaf can still be changed to a branch by assigning an operation to it. node_t& operator=(const std::function<T ()>& f) { impl.reset(new detail::leaf<T (*)()>(f)); if(parent) { parent->modified(); } return *this; } //!\brief Assign an operation to this node. //! //! The assignment of an operation designates this node as a branch. //! A branch can still be changed to a leaf by assigning data to it. node_t& operator=(const detail::operation<T>& f) { // Create a new branch with the passed operation and two nodes with this node as their parent. impl.reset(new typename CachingPolicy<T, ThreadingPolicy>::branch(f, node<T, CachingPolicy, ThreadingPolicy>(this), node<T, CachingPolicy, ThreadingPolicy>(this))); if(parent) { parent->modified(); } return *this; } //!\brief This node's left child. //! //! Note that if this node is a leaf node, behavior is undefined. node_t& left() { return dynamic_cast<typename CachingPolicy<T, ThreadingPolicy>::branch*>(impl.get())->left(); } //!\brief This node's left child. //! //! Note that if this node is a leaf node, behavior is undefined. node_t* operator-() { return -*(dynamic_cast<typename CachingPolicy<T, ThreadingPolicy>::branch*>(impl.get())); } //!\brief This node's right child. //! //! Note that if this node is a leaf node, behavior is undefined. node_t& right() { return dynamic_cast<typename CachingPolicy<T, ThreadingPolicy>::branch*>(impl.get())->right(); } //!\brief This node's right child. //! //! Note that if this node is a leaf node, behavior is undefined. node_t* operator+() { return +*(dynamic_cast<typename CachingPolicy<T, ThreadingPolicy>::branch*>(impl.get())); } //!\brief Constness of this node. bool constant() const { return impl ? impl->constant() : false; } //!\brief Evaluates the value of this node. T evaluate() const { return impl->evaluate(); } //!\brief Called when this node is assigned to. //! //! Recursively notifies parent nodes of the growth that happened. void modified() { dynamic_cast<typename CachingPolicy<T, ThreadingPolicy>::branch*>(impl.get())->modified(); if(parent) { parent->modified(); } } }; //!\brief Implementation of the CachingPolicy used by tree. template<typename T, class ThreadingPolicy> struct no_caching { using node_t = node<T, expression_tree::no_caching, ThreadingPolicy>; //!< Convenience alias. using default_branch_t = detail::default_branch<T, expression_tree::no_caching, ThreadingPolicy>; //!< Convenience alias. //!\brief Implementation of a branch class that performs no caching. //! //! This class performs no optimization. //! A non-caching branch will apply its operation on its children whenever it is evaluated. class branch : public default_branch_t { public: //!\brief Default constructor. branch(const detail::operation<T>& f, const node_t& l, const node_t& r) : default_branch_t(f, l, r) {} //!\brief Copy constructor. branch(const branch& o) : default_branch_t(o) {} virtual ~branch() {} //!\brief Clones this object. virtual std::unique_ptr<detail::node_impl<T>> clone() const override { return std::make_unique<branch>(*this); } }; }; //!\brief Implementation of the CachingPolicy used by tree. template<typename T, class ThreadingPolicy> struct cache_on_evaluation { using node_t = node<T, expression_tree::cache_on_evaluation, ThreadingPolicy>; //!< Convenience alias. using default_branch_t = detail::default_branch<T, expression_tree::cache_on_evaluation, ThreadingPolicy>; //!< Convenience alias. //!\brief Implementation of a branch class that performs caching on evaluation. //! //! A caching-on-evaluation branch will apply its operation on its children when it is evaluated //! and cache that value if it is constant (e.g. if its children are of constant value). class branch : public default_branch_t { mutable bool cached; //!< Whether the value of this node can be considered as cached. mutable T value; //!< This node's value, if \c cached is \c true. using default_branch_t::f; using default_branch_t::l; using default_branch_t::r; using default_branch_t::constant; public: //!\brief Default constructor. branch(const detail::operation<T>& f, const node_t& l, const node_t& r) : default_branch_t(f, l, r), cached(false) {} //!\brief Copy constructor. branch(const branch& o) : default_branch_t(o), cached(o.cached), value(o.value) {} virtual ~branch() {} //!\brief Clones this object. virtual std::unique_ptr<detail::node_impl<T>> clone() const override { return std::make_unique<branch>(*this); } //! If the value of this branch has been cached already, return it. //! Otherwise, evaluate it and determine if this branch is constant. //! If it is, considered the value as cached to re-use later. virtual T evaluate() const override { if(cached) return value; value = ThreadingPolicy::evaluate(f, l, r); if(constant()) { cached = true; } return value; } //! When this branch grows (e.g. has its children modified), forget that the value was cached. virtual void modified() override { default_branch_t::modified(); cached = false; } }; }; //!\brief Implementation of the CachingPolicy used by tree. template<typename T, class ThreadingPolicy> struct cache_on_assignment { using node_t = node<T, expression_tree::cache_on_assignment, ThreadingPolicy>; //!< Convenience alias. using default_branch_t = detail::default_branch<T, expression_tree::cache_on_assignment, ThreadingPolicy>; //!< Convenience alias. //!\brief Implementation of a branch class that performs caching on assignment of its children. //! //! When a caching-on-assignment branch' children are assigned to, the branch checks whether its children //! are constant. If they are, it applies its operation on them and caches that value. class branch : public default_branch_t { mutable bool cached; //!< Whether the value of this node can be considered as cached. mutable T value; //!< This node's value, if \c cached is \c true. using default_branch_t::f; using default_branch_t::l; using default_branch_t::r; using default_branch_t::constant; public: //!\brief Default constructor. branch(const detail::operation<T>& f, const node_t& l, const node_t& r) : default_branch_t(f, l, r), cached(false) {} //!\brief Copy constructor. branch(const branch& o) : default_branch_t(o), cached(o.cached), value(o.value) {} virtual ~branch() {} //!\brief Clones this object. virtual std::unique_ptr<detail::node_impl<T>> clone() const override { return std::make_unique<branch>(*this); } //! If the value of this branch has been cached already, return it. virtual T evaluate() const override { if(cached) return value; return value = ThreadingPolicy::evaluate(f, l, r); } //! When this branch has its children modified, check if they are constant. //! If they are, perform the operation and cache the value. virtual void modified() override { default_branch_t::modified(); if(constant()) { // If this node is constant, cache its value now. cached = true; value = ThreadingPolicy::evaluate(f, l, r); } else { // One or both children are not constant. This node is then not constant. cached = false; } } }; }; //!\brief Implements an expression tree. //! //!\param T The data type. //!\param CachingPolicy Caching optimization policy to use. Choices are: //! - no_caching: no caching optimization is performed. //! - cache_on_evaluation: caching of branches' values is performed when they are evaluated. //! - cache_on_assignment: caching of branches' values is performed when they are modified. //!\param ThreadingPolicy Threading policy to use when evaluating a branch's children. Choices are: //! - \link expression_tree::sequential sequential\endlink: evaluate children on after the after on a single thread. //! - \link expression_tree::parallel parallel\endlink: evaluate children in parallel as hardware permits using \c std::async. template<typename T, template<typename, typename> class CachingPolicy = no_caching, class ThreadingPolicy = sequential> class tree : public node<T, CachingPolicy, ThreadingPolicy> { public: virtual ~tree() {} //!\brief This tree's root node. node<T, CachingPolicy, ThreadingPolicy>& root() { return *this; } }; } #endif /*! \file expression_tree.h \brief The only file you need. \author <NAME> \version 3.0 \mainpage expression_tree \tableofcontents \section introduction Introduction An expression tree is a tree that stores data in its leaf nodes and operations in its branch nodes. The tree's value can then be obtained by performing a postorder traversal, applying each branch's operation to its children. For example, this tree evaluates to (2 * 1 + (2 - \a x)): \code (2 * l + r) / \ 1 (l - r) / \ 2 x \endcode To build an expression tree with expression_tree::tree, you assign <a href="http://www.google.com/search?q=c%2B%2B+function+object">function objects</a> to branch nodes and either constant values or pointers to variables to leaf nodes. When your tree is built, call its \link expression_tree::tree::evaluate evaluate \endlink member function to get its value. \section considerations Technical considerations This implementation: - requires a C++14 comliant compiler and standard library. - is contained in a single header file. - uses templates heavily. - specializes the branches and the leaves to reduce space overhead. - requires RTTI. - has little in the way of safety checks. - has been tested with Clang 3.4. In order to be evaluated, the tree must be correctly formed. That is, all its leaves must have been given a value. \section optimizations Optimizations Two policies are vailable as template parameters to expression_tree::tree. The first enables caching the value of certain branches to avoid unnecessary evaluation. The second enables multi-threaded evaluation. \subsection caching Branch caching Caching optimizations rely on the constness of branches. Once a constant branch has been evaluated, it is not required to evaluate it again. A branch is considered constant when all its children are constant, be they branches or leaves. A leaf is considered constant when its value is assigned a constant value rather than a variable value (i.e. a pointer). The first optimization caches a branch's value when a it is first evaluated. The second, more aggressive optimziation, caches when a branch's children are assigned to. The default policy is \link expression_tree::no_caching no_caching \endlink which performs no caching. \subsubsection evaluation Caching-on-evaluation optimization By instantiating a \link expression_tree::tree tree \endlink with its second template parameter set to \link expression_tree::cache_on_evaluation cache_on_evaluation \endlink, a tree's evaluation will be optimzed by caching-on-evaluation. Caching on evaluation simply consists on remembering a branch's value at the time it is evaluated, if that branch is considered constant. Consider the following tree, where B<SUB>n</SUB> is a branch, C<SUB>n</SUB> is a constant value and x<SUB>n</SUB> is a variable value: \code B1 / \ x1 B2 / \ C2 C3 \endcode When that tree is evaluated, B<SUB>2</SUB>'s value will be cached because its children are constant. If C<SUB>2</SUB> and C<SUB>3</SUB> don't change (i.e. if they are not assigned a different constant value), the second time the tree is evaluated, the evaluation of B<SUB>2</SUB> will return its cached value. It will not perform its operation on its children again. Because one of B<SUB>1</SUB> children is not constant, evaluating B<SUB>1</SUB> will always perform its operation on its two children. \subsubsection assignment Caching-on-assignment optimization By instantiating a \link expression_tree::tree tree \endlink with its second template parameter set to \link expression_tree::cache_on_assignment cache_on_assignment \endlink, a tree's evaluation will be optimzed by caching-on-assignment. Caching-on-assignment means that when a branch's children nodes are assigned to, and if those children are constant, the branch's value is evaluated and cached. Consider the following tree, where B<SUB>n</SUB> is a branch and C<SUB>n</SUB> is a constant value: \code B1 / \ C1 B2 / \ C2 C3 \endcode Let's assume that that tree is constructed in the following order: B<SUB>1</SUB>, C<SUB>1</SUB>, B<SUB>2</SUB>, C<SUB>2</SUB>, C<SUB>3</SUB>. Upon assignment of C<SUB>3</SUB>, B<SUB>2</SUB> will be found to be of constant value and be pre-evaluated. This pre-evaluation will continue recursively up the tree for as long as a branch's both children are constant. In this case, B<SUB>1</SUB> will also be pre-evaluated. If C<SUB>1</SUB> had instead been a variable (e.g. \a x), only B<SUB>2</SUB> will have been pre-evaluated. Evaluating B1 would then always perform its operation on its two children. \subsubsection degenerate Degenerate case of caching-on-assignment optimization Given the following tree: \code B1 / \ C1 B2 / \ C2 B3 / \ C3 B4 / . C4 . . Bn / \ Cn Cn+1 \endcode Let's assume that that tree is constructed in the following order: B<SUB>1</SUB>, C<SUB>1</SUB>, B<SUB>2</SUB>, C<SUB>2</SUB>, ..., B<SUB>n</SUB>, C<SUB>n</SUB>, C<SUB>n+1</SUB>. Before C<SUB>n+1</SUB> is assigned to, none of the tree has been pre-evaluated, given that none of its nodes' constness can be confirmed. When C<SUB>n+1</SUB> is assigned to, B<SUB>n</SUB> will be found constant and be evaluated. B<SUB>n-1</SUB> will also be found constant and be evaluated. This will continue until entire tree is evaluated. Thus, a single assignment can trigger the equivalent of \link expression_tree::tree::evaluate() evaluate() \endlink. \subsection multithreaded Parallel evaluation This optimization depends on the availability of C++11's \c \<future\> header. It is enabled automatically if your toolchain supports it. To benefit from parallel evaluation, a tree must be instantiated a \link expression_tree::tree tree \endlink with the \link expression_tree::parallel parallel \endlink policy class. When enabled, a branch will evaluate one of its children on the current thread and its other child on a separate, hardware permitting. The decision to actually spawn a seperate thread is left to \c std::async's implementation. For large enough tree's the hardware limit will be reached and branches will start evaluating their children sequentially regardless of their threading policy. The default policy is \link expression_tree::sequential sequential \endlink which evalutes the tree sequentially. \section improvements Future improvements - I'll think of something. I can't help myself. \section sample Sample code \include examples/main.cpp \section license License \verbatim Boost Software License - Version 1.0 - August 17th, 2003 Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license (the "Software") to use, reproduce, display, distribute, execute, and transmit the Software, and to prepare derivative works of the Software, and to permit third-parties to whom the Software is furnished to do so, all subject to the following: The copyright notices in the Software and this entire statement, including the above license grant, this restriction and the following disclaimer, must be included in all copies of the Software, in whole or in part, and all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \endverbatim */
NEUbugmakers/HeadFile
stack/head.h
<reponame>NEUbugmakers/HeadFile #include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <string> using namespace std; struct LNode{ int data; struct LNode * next; }; struct SNode{ int data; struct SNode *next; }; struct LStack{ struct SNode * base;//指向栈底的指针 struct SNode * top;//指向栈顶的指针 }; struct LNode * InitQuene();//初始化链式队列,返回头指针 //元素入列,返回尾指针 struct LNode* putQuene(struct LNode* rear, int data); //元素出列 void DeQuene(struct LNode* top,struct LNode* rear); //销毁链式队列 struct LStack* Stack_create();//创建一个空的栈表头 struct LStack* InitStack(struct LStack * stack);//初始化链式栈结构,返回 //元素入栈 struct LStack* pushStack(struct LStack * stack,int e); //元素出栈 struct LStack* popStack(struct LStack *stack,int &e); //判断栈为空 bool judge0Stack(struct LStack * stack ); struct LNode * InitQuene(){ struct LNode * quene= (struct LNode*)malloc(sizeof(struct LNode)); quene->next = NULL; return quene; } struct LNode* putQuene(struct LNode* rear, int data){ struct LNode* putElem=(struct LNode*)malloc(sizeof(struct LNode)); putElem->data=data; putElem->next = NULL; rear->next = putElem; rear=putElem; return rear; } void DeQuene(struct LNode* top,struct LNode* rear){ if(top->next == NULL){ printf("队列为空\n"); return ; } struct LNode* p = top->next; printf("%d\n",p->data); top->next = p->next; if(rear == p){ rear = top; } free(p); } struct LStack* Stack_create(){ struct LStack * stack = (struct LStack * )malloc(sizeof(struct LStack)); stack->base = stack->top = NULL; return stack; } struct LStack* InitStack(struct LStack * stack){ struct SNode * snode = (struct SNode *)malloc(sizeof(struct SNode)); snode->data = -100000000; stack->base = snode; snode->next = NULL; stack->top = stack->base; return stack; } struct LStack* pushStack(struct LStack * stack,int e){ if(stack->base == NULL){//栈结构不存在 exit(-1); } stack->top->data = e;//赋值 struct SNode * snode = (struct SNode *)malloc(sizeof(struct SNode)); if(snode == NULL){ exit(-1); } snode->next = NULL; stack->top->next = snode; stack->top = snode; return stack; } struct LStack* popStack(struct LStack *stack,int &e){ if (stack->top == stack->base){//空栈 return stack; } struct SNode * p = stack->base; while ( p->next != stack->top ){ p = p->next; } e = p->data; struct SNode * q = p->next; p->next = NULL; stack->top = p; free(q); q = NULL; return stack; } bool judge0Stack(struct LStack * stack ){ if ((stack->top == stack->base) ){ return true; }else{ return false; } }
NEUbugmakers/HeadFile
stack/stack_line.h
<filename>stack/stack_line.h // // Created by 46172 on 2020/12/18. // #ifndef HEADFILE_STACK_LINE_H #define HEADFILE_STACK_LINE_H #include <stdlib.h> #include <string.h> #define STACK_DEFAULT_CAPACITY 4 typedef struct { int *num; int size; int capacity; } Stack; Stack *creatStack();//栈创建 void expand(Stack *s);//扩容 void shrink(Stack *s);//缩容 int stackIsEmpty(Stack *s);//是否栈空 void stackPushBack(Stack *s, int num);//入栈 int stackPop(Stack *s);//出栈 int stackTop(Stack *s);//查看栈顶 void stackFree(Stack *s);//删除栈 #endif //HEADFILE_STACK_LINE_H
hbenl/protocol
inspector_protocol/crdtp/find_by_first.h
<reponame>hbenl/protocol // Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CRDTP_FIND_BY_FIRST_H_ #define CRDTP_FIND_BY_FIRST_H_ #include <algorithm> #include <cstdint> #include <memory> #include <vector> #include "export.h" #include "span.h" namespace crdtp { // ============================================================================= // FindByFirst - Retrieval from a sorted vector that's keyed by span<uint8_t>. // ============================================================================= // Given a vector of pairs sorted by the first element of each pair, find // the corresponding value given a key to be compared to the first element. // Together with std::inplace_merge and pre-sorting or std::sort, this can // be used to implement a minimalistic equivalent of Chromium's flat_map. // In this variant, the template parameter |T| is a value type and a // |default_value| is provided. template <typename T> T FindByFirst(const std::vector<std::pair<span<uint8_t>, T>>& sorted_by_first, span<uint8_t> key, T default_value) { auto it = std::lower_bound( sorted_by_first.begin(), sorted_by_first.end(), key, [](const std::pair<span<uint8_t>, T>& left, span<uint8_t> right) { return SpanLessThan(left.first, right); }); return (it != sorted_by_first.end() && SpanEquals(it->first, key)) ? it->second : default_value; } // In this variant, the template parameter |T| is a class or struct that's // instantiated in std::unique_ptr, and we return either a T* or a nullptr. template <typename T> T* FindByFirst(const std::vector<std::pair<span<uint8_t>, std::unique_ptr<T>>>& sorted_by_first, span<uint8_t> key) { auto it = std::lower_bound( sorted_by_first.begin(), sorted_by_first.end(), key, [](const std::pair<span<uint8_t>, std::unique_ptr<T>>& left, span<uint8_t> right) { return SpanLessThan(left.first, right); }); return (it != sorted_by_first.end() && SpanEquals(it->first, key)) ? it->second.get() : nullptr; } } // namespace crdtp #endif // CRDTP_FIND_BY_FIRST_H_
hbenl/protocol
inspector_protocol/crdtp/span.h
<gh_stars>100-1000 // Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CRDTP_SPAN_H_ #define CRDTP_SPAN_H_ #include <cstdint> #include <cstring> #include <string> #include "export.h" namespace crdtp { // ============================================================================= // span - sequence of bytes // ============================================================================= // This template is similar to std::span, which will be included in C++20. template <typename T> class span { public: using index_type = size_t; constexpr span() : data_(nullptr), size_(0) {} constexpr span(const T* data, index_type size) : data_(data), size_(size) {} constexpr const T* data() const { return data_; } constexpr const T* begin() const { return data_; } constexpr const T* end() const { return data_ + size_; } constexpr const T& operator[](index_type idx) const { return data_[idx]; } constexpr span<T> subspan(index_type offset, index_type count) const { return span(data_ + offset, count); } constexpr span<T> subspan(index_type offset) const { return span(data_ + offset, size_ - offset); } constexpr bool empty() const { return size_ == 0; } constexpr index_type size() const { return size_; } constexpr index_type size_bytes() const { return size_ * sizeof(T); } private: const T* data_; index_type size_; }; template <size_t N> constexpr span<uint8_t> SpanFrom(const char (&str)[N]) { return span<uint8_t>(reinterpret_cast<const uint8_t*>(str), N - 1); } constexpr inline span<uint8_t> SpanFrom(const char* str) { return str ? span<uint8_t>(reinterpret_cast<const uint8_t*>(str), strlen(str)) : span<uint8_t>(); } inline span<uint8_t> SpanFrom(const std::string& v) { return span<uint8_t>(reinterpret_cast<const uint8_t*>(v.data()), v.size()); } // This SpanFrom routine works for std::vector<uint8_t> and // std::vector<uint16_t>, but also for base::span<const uint8_t> in Chromium. template <typename C, typename = std::enable_if_t< std::is_unsigned<typename C::value_type>{} && std::is_member_function_pointer<decltype(&C::size)>{}>> inline span<typename C::value_type> SpanFrom(const C& v) { return span<typename C::value_type>(v.data(), v.size()); } // Less than / equality comparison functions for sorting / searching for byte // spans. These are similar to absl::string_view's < and == operators. CRDTP_EXPORT bool SpanLessThan(span<uint8_t> x, span<uint8_t> y) noexcept; CRDTP_EXPORT bool SpanEquals(span<uint8_t> x, span<uint8_t> y) noexcept; struct SpanLt { bool operator()(span<uint8_t> l, span<uint8_t> r) const { return SpanLessThan(l, r); } }; } // namespace crdtp #endif // CRDTP_SPAN_H_
hbenl/protocol
inspector_protocol/crdtp/frontend_channel.h
<reponame>hbenl/protocol // Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CRDTP_FRONTEND_CHANNEL_H_ #define CRDTP_FRONTEND_CHANNEL_H_ #include <cstdint> #include <memory> #include "export.h" #include "serializable.h" #include "span.h" namespace crdtp { // ============================================================================= // FrontendChannel - For sending notifications and responses to protocol clients // ============================================================================= class CRDTP_EXPORT FrontendChannel { public: virtual ~FrontendChannel() = default; // Sends protocol responses and notifications. The |call_id| parameter is // seemingly redundant because it's also included in the message, but // responses may be sent from an untrusted source to a trusted process (e.g. // from Chromium's renderer (blink) to the browser process), which needs // to be able to match the response to an earlier request without parsing the // messsage. virtual void SendProtocolResponse(int call_id, std::unique_ptr<Serializable> message) = 0; virtual void SendProtocolNotification( std::unique_ptr<Serializable> message) = 0; // FallThrough indicates that |message| should be handled in another layer. // Usually this means the layer responding to the message didn't handle it, // but in some cases messages are handled by multiple layers (e.g. both // the embedder and the content layer in Chromium). virtual void FallThrough(int call_id, span<uint8_t> method, span<uint8_t> message) = 0; // Session implementations may queue notifications for performance or // other considerations; this is a hook for domain handlers to manually flush. virtual void FlushProtocolNotifications() = 0; }; } // namespace crdtp #endif // CRDTP_FRONTEND_CHANNEL_H_
Giperion/The-Forge
Common_3/Renderer/IRenderer.h
/* * Copyright (c) 2018-2020 The Forge Interactive Inc. * * This file is part of The-Forge * (see https://github.com/ConfettiFX/The-Forge). * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #pragma once // // default capability levels of the renderer // #if !defined(RENDERER_CUSTOM_MAX) enum { MAX_INSTANCE_EXTENSIONS = 64, MAX_DEVICE_EXTENSIONS = 64, MAX_GPUS = 10, MAX_RENDER_TARGET_ATTACHMENTS = 8, MAX_SUBMIT_CMDS = 20, // max number of command lists / command buffers MAX_SUBMIT_WAIT_SEMAPHORES = 8, MAX_SUBMIT_SIGNAL_SEMAPHORES = 8, MAX_PRESENT_WAIT_SEMAPHORES = 8, MAX_VERTEX_BINDINGS = 15, MAX_VERTEX_ATTRIBS = 15, MAX_SEMANTIC_NAME_LENGTH = 128, MAX_MIP_LEVELS = 0xFFFFFFFF, MAX_GPU_VENDOR_STRING_LENGTH = 64 //max size for GPUVendorPreset strings }; #endif #if defined(DIRECT3D11) #include <d3d11_1.h> #include <dxgi1_2.h> #endif #if defined(_DURANGO) #ifndef DIRECT3D12 #define DIRECT3D12 #endif #include "../../Xbox/Common_3/Renderer/XBOXPrivateHeaders.h" #elif defined(DIRECT3D12) #include <d3d12.h> #include <dxgi1_5.h> #include <dxgidebug.h> #endif #if defined(VULKAN) #if defined(_WIN32) #define VK_USE_PLATFORM_WIN32_KHR #elif defined(__ANDROID__) #ifndef VK_USE_PLATFORM_ANDROID_KHR #define VK_USE_PLATFORM_ANDROID_KHR #endif #elif defined(__linux__) && !defined(VK_USE_PLATFORM_GGP) #define VK_USE_PLATFORM_XLIB_KHR //Use Xlib or Xcb as display server, defaults to Xlib #endif #if defined(NX64) #define VK_USE_PLATFORM_VI_NN #include <vulkan/vulkan.h> #include "../../Switch/Common_3/Renderer/Vulkan/NX/NXVulkanExt.h" #else #include "../ThirdParty/OpenSource/volk/volk.h" #endif #endif #if defined(METAL) #import <MetalKit/MetalKit.h> #endif #if defined(ORBIS) #include "../../PS4/Common_3/Renderer/Orbis/OrbisStructs.h" #endif #if defined(VULKAN) // Set this define to enable renderdoc layer // NOTE: Setting this define will disable use of the khr dedicated allocation extension since it conflicts with the renderdoc capture layer //#define USE_RENDER_DOC // Raytracing #ifdef VK_NV_RAY_TRACING_SPEC_VERSION #define ENABLE_RAYTRACING #endif #elif defined(DIRECT3D12) //#define USE_PIX // Raytracing #ifdef D3D12_RAYTRACING_AABB_BYTE_ALIGNMENT #define ENABLE_RAYTRACING #endif #elif defined(METAL) #define ENABLE_RAYTRACING #endif #include "../OS/Interfaces/IOperatingSystem.h" #include "../OS/Interfaces/IThread.h" #include "../ThirdParty/OpenSource/tinyimageformat/tinyimageformat_base.h" #ifdef __cplusplus #ifndef MAKE_ENUM_FLAG #define MAKE_ENUM_FLAG(TYPE, ENUM_TYPE) \ static inline ENUM_TYPE operator|(ENUM_TYPE a, ENUM_TYPE b) { return (ENUM_TYPE)((TYPE)(a) | (TYPE)(b)); } \ static inline ENUM_TYPE operator&(ENUM_TYPE a, ENUM_TYPE b) { return (ENUM_TYPE)((TYPE)(a) & (TYPE)(b)); } \ static inline ENUM_TYPE operator|=(ENUM_TYPE& a, ENUM_TYPE b) { return a = (a | b); } \ static inline ENUM_TYPE operator&=(ENUM_TYPE& a, ENUM_TYPE b) { return a = (a & b); } #endif #else #define MAKE_ENUM_FLAG(TYPE, ENUM_TYPE) #endif typedef enum RendererApi { RENDERER_API_D3D12 = 0, RENDERER_API_VULKAN, RENDERER_API_METAL, RENDERER_API_XBOX_D3D12, RENDERER_API_D3D11, RENDERER_API_ORBIS } RendererApi; typedef enum LogType { LOG_TYPE_INFO = 0, LOG_TYPE_WARN, LOG_TYPE_DEBUG, LOG_TYPE_ERROR } LogType; typedef enum QueueType { QUEUE_TYPE_GRAPHICS = 0, QUEUE_TYPE_TRANSFER, QUEUE_TYPE_COMPUTE, MAX_QUEUE_TYPE } QueueType; typedef enum QueueFlag { QUEUE_FLAG_NONE = 0x0, QUEUE_FLAG_DISABLE_GPU_TIMEOUT = 0x1, QUEUE_FLAG_INIT_MICROPROFILE = 0x2, MAX_QUEUE_FLAG = 0xFFFFFFFF } QueueFlag; MAKE_ENUM_FLAG(uint32_t, QueueFlag) typedef enum QueuePriority { QUEUE_PRIORITY_NORMAL, QUEUE_PRIORITY_HIGH, QUEUE_PRIORITY_GLOBAL_REALTIME, MAX_QUEUE_PRIORITY } QueuePriority; typedef enum LoadActionType { LOAD_ACTION_DONTCARE, LOAD_ACTION_LOAD, LOAD_ACTION_CLEAR, MAX_LOAD_ACTION } LoadActionType; typedef void(*LogFn)(LogType, const char*, const char*); typedef enum ResourceState { RESOURCE_STATE_UNDEFINED = 0, RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER = 0x1, RESOURCE_STATE_INDEX_BUFFER = 0x2, RESOURCE_STATE_RENDER_TARGET = 0x4, RESOURCE_STATE_UNORDERED_ACCESS = 0x8, RESOURCE_STATE_DEPTH_WRITE = 0x10, RESOURCE_STATE_DEPTH_READ = 0x20, RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE = 0x40, RESOURCE_STATE_SHADER_RESOURCE = 0x40 | 0x80, RESOURCE_STATE_STREAM_OUT = 0x100, RESOURCE_STATE_INDIRECT_ARGUMENT = 0x200, RESOURCE_STATE_COPY_DEST = 0x400, RESOURCE_STATE_COPY_SOURCE = 0x800, RESOURCE_STATE_GENERIC_READ = (((((0x1 | 0x2) | 0x40) | 0x80) | 0x200) | 0x800), RESOURCE_STATE_PRESENT = 0x4000, RESOURCE_STATE_COMMON = 0x8000, } ResourceState; MAKE_ENUM_FLAG(uint32_t, ResourceState) /// Choosing Memory Type typedef enum ResourceMemoryUsage { /// No intended memory usage specified. RESOURCE_MEMORY_USAGE_UNKNOWN = 0, /// Memory will be used on device only, no need to be mapped on host. RESOURCE_MEMORY_USAGE_GPU_ONLY = 1, /// Memory will be mapped on host. Could be used for transfer to device. RESOURCE_MEMORY_USAGE_CPU_ONLY = 2, /// Memory will be used for frequent (dynamic) updates from host and reads on device. RESOURCE_MEMORY_USAGE_CPU_TO_GPU = 3, /// Memory will be used for writing on device and readback on host. RESOURCE_MEMORY_USAGE_GPU_TO_CPU = 4, RESOURCE_MEMORY_USAGE_COUNT, RESOURCE_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF } ResourceMemoryUsage; // Forward declarations typedef struct Renderer Renderer; typedef struct Queue Queue; typedef struct Pipeline Pipeline; typedef struct BlendState BlendState; typedef struct Buffer Buffer; typedef struct Texture Texture; typedef struct RenderTarget RenderTarget; typedef struct ShaderReflectionInfo ShaderReflectionInfo; typedef struct Shader Shader; typedef struct DescriptorSet DescriptorSet; typedef struct DescriptorIndexMap DescriptorIndexMap; typedef struct ShaderDescriptors ShaderDescriptors; // Raytracing typedef struct Raytracing Raytracing; typedef struct RaytracingHitGroup RaytracingHitGroup; typedef struct AccelerationStructure AccelerationStructure; typedef struct EsramManager EsramManager; typedef struct IndirectDrawArguments { uint32_t mVertexCount; uint32_t mInstanceCount; uint32_t mStartVertex; uint32_t mStartInstance; } IndirectDrawArguments; typedef struct IndirectDrawIndexArguments { uint32_t mIndexCount; uint32_t mInstanceCount; uint32_t mStartIndex; uint32_t mVertexOffset; uint32_t mStartInstance; } IndirectDrawIndexArguments; typedef struct IndirectDispatchArguments { uint32_t mGroupCountX; uint32_t mGroupCountY; uint32_t mGroupCountZ; } IndirectDispatchArguments; typedef enum IndirectArgumentType { INDIRECT_DRAW, INDIRECT_DRAW_INDEX, INDIRECT_DISPATCH, INDIRECT_VERTEX_BUFFER, INDIRECT_INDEX_BUFFER, INDIRECT_CONSTANT, INDIRECT_DESCRIPTOR_TABLE, // only for vulkan INDIRECT_PIPELINE, // only for vulkan now, probally will add to dx when it comes to xbox INDIRECT_CONSTANT_BUFFER_VIEW, // only for dx INDIRECT_SHADER_RESOURCE_VIEW, // only for dx INDIRECT_UNORDERED_ACCESS_VIEW, // only for dx INDIRECT_COMMAND_BUFFER, // metal ICB INDIRECT_COMMAND_BUFFER_OPTIMIZE // metal indirect buffer optimization } IndirectArgumentType; /************************************************/ typedef enum DescriptorType { DESCRIPTOR_TYPE_UNDEFINED = 0, DESCRIPTOR_TYPE_SAMPLER = 0x01, // SRV Read only texture DESCRIPTOR_TYPE_TEXTURE = (DESCRIPTOR_TYPE_SAMPLER << 1), /// UAV Texture DESCRIPTOR_TYPE_RW_TEXTURE = (DESCRIPTOR_TYPE_TEXTURE << 1), // SRV Read only buffer DESCRIPTOR_TYPE_BUFFER = (DESCRIPTOR_TYPE_RW_TEXTURE << 1), DESCRIPTOR_TYPE_BUFFER_RAW = (DESCRIPTOR_TYPE_BUFFER | (DESCRIPTOR_TYPE_BUFFER << 1)), /// UAV Buffer DESCRIPTOR_TYPE_RW_BUFFER = (DESCRIPTOR_TYPE_BUFFER << 2), DESCRIPTOR_TYPE_RW_BUFFER_RAW = (DESCRIPTOR_TYPE_RW_BUFFER | (DESCRIPTOR_TYPE_RW_BUFFER << 1)), /// Uniform buffer DESCRIPTOR_TYPE_UNIFORM_BUFFER = (DESCRIPTOR_TYPE_RW_BUFFER << 2), /// Push constant / Root constant DESCRIPTOR_TYPE_ROOT_CONSTANT = (DESCRIPTOR_TYPE_UNIFORM_BUFFER << 1), /// IA DESCRIPTOR_TYPE_VERTEX_BUFFER = (DESCRIPTOR_TYPE_ROOT_CONSTANT << 1), DESCRIPTOR_TYPE_INDEX_BUFFER = (DESCRIPTOR_TYPE_VERTEX_BUFFER << 1), DESCRIPTOR_TYPE_INDIRECT_BUFFER = (DESCRIPTOR_TYPE_INDEX_BUFFER << 1), /// Cubemap SRV DESCRIPTOR_TYPE_TEXTURE_CUBE = (DESCRIPTOR_TYPE_TEXTURE | (DESCRIPTOR_TYPE_INDIRECT_BUFFER << 1)), /// RTV / DSV per mip slice DESCRIPTOR_TYPE_RENDER_TARGET_MIP_SLICES = (DESCRIPTOR_TYPE_INDIRECT_BUFFER << 2), /// RTV / DSV per array slice DESCRIPTOR_TYPE_RENDER_TARGET_ARRAY_SLICES = (DESCRIPTOR_TYPE_RENDER_TARGET_MIP_SLICES << 1), /// RTV / DSV per depth slice DESCRIPTOR_TYPE_RENDER_TARGET_DEPTH_SLICES = (DESCRIPTOR_TYPE_RENDER_TARGET_ARRAY_SLICES << 1), DESCRIPTOR_TYPE_RAY_TRACING = (DESCRIPTOR_TYPE_RENDER_TARGET_DEPTH_SLICES << 1), #if defined(VULKAN) /// Subpass input (descriptor type only available in Vulkan) DESCRIPTOR_TYPE_INPUT_ATTACHMENT = (DESCRIPTOR_TYPE_RAY_TRACING << 1), DESCRIPTOR_TYPE_TEXEL_BUFFER = (DESCRIPTOR_TYPE_INPUT_ATTACHMENT << 1), DESCRIPTOR_TYPE_RW_TEXEL_BUFFER = (DESCRIPTOR_TYPE_TEXEL_BUFFER << 1), #endif #if defined(METAL) DESCRIPTOR_TYPE_ARGUMENT_BUFFER = (DESCRIPTOR_TYPE_RAY_TRACING << 1), DESCRIPTOR_TYPE_INDIRECT_COMMAND_BUFFER = (DESCRIPTOR_TYPE_ARGUMENT_BUFFER << 1), DESCRIPTOR_TYPE_RENDER_PIPELINE_STATE = (DESCRIPTOR_TYPE_INDIRECT_COMMAND_BUFFER << 1), #endif } DescriptorType; MAKE_ENUM_FLAG(uint32_t, DescriptorType) typedef enum SampleCount { SAMPLE_COUNT_1 = 1, SAMPLE_COUNT_2 = 2, SAMPLE_COUNT_4 = 4, SAMPLE_COUNT_8 = 8, SAMPLE_COUNT_16 = 16, } SampleCount; #ifdef METAL typedef enum ShaderStage { SHADER_STAGE_NONE = 0, SHADER_STAGE_VERT = 0X00000001, SHADER_STAGE_FRAG = 0X00000002, SHADER_STAGE_COMP = 0X00000004, SHADER_STAGE_ALL_GRAPHICS = ((uint32_t)SHADER_STAGE_VERT | (uint32_t)SHADER_STAGE_FRAG), SHADER_STAGE_COUNT = 3, } ShaderStage; #else typedef enum ShaderStage { SHADER_STAGE_NONE = 0, SHADER_STAGE_VERT = 0X00000001, SHADER_STAGE_TESC = 0X00000002, SHADER_STAGE_TESE = 0X00000004, SHADER_STAGE_GEOM = 0X00000008, SHADER_STAGE_FRAG = 0X00000010, SHADER_STAGE_COMP = 0X00000020, SHADER_STAGE_RAYTRACING = 0X00000040, SHADER_STAGE_ALL_GRAPHICS = ((uint32_t)SHADER_STAGE_VERT | (uint32_t)SHADER_STAGE_TESC | (uint32_t)SHADER_STAGE_TESE | (uint32_t)SHADER_STAGE_GEOM | (uint32_t)SHADER_STAGE_FRAG), SHADER_STAGE_HULL = SHADER_STAGE_TESC, SHADER_STAGE_DOMN = SHADER_STAGE_TESE, SHADER_STAGE_COUNT = 7, } ShaderStage; #endif MAKE_ENUM_FLAG(uint32_t, ShaderStage) // This include is placed here because it uses data types defined previously in this file // and forward enums are not allowed for some compilers (Xcode). #include "IShaderReflection.h" typedef enum PrimitiveTopology { PRIMITIVE_TOPO_POINT_LIST = 0, PRIMITIVE_TOPO_LINE_LIST, PRIMITIVE_TOPO_LINE_STRIP, PRIMITIVE_TOPO_TRI_LIST, PRIMITIVE_TOPO_TRI_STRIP, PRIMITIVE_TOPO_PATCH_LIST, PRIMITIVE_TOPO_COUNT, } PrimitiveTopology; typedef enum IndexType { INDEX_TYPE_UINT32 = 0, INDEX_TYPE_UINT16, } IndexType; typedef enum ShaderSemantic { SEMANTIC_UNDEFINED = 0, SEMANTIC_POSITION, SEMANTIC_NORMAL, SEMANTIC_COLOR, SEMANTIC_TANGENT, SEMANTIC_BITANGENT, SEMANTIC_JOINTS, SEMANTIC_WEIGHTS, SEMANTIC_TEXCOORD0, SEMANTIC_TEXCOORD1, SEMANTIC_TEXCOORD2, SEMANTIC_TEXCOORD3, SEMANTIC_TEXCOORD4, SEMANTIC_TEXCOORD5, SEMANTIC_TEXCOORD6, SEMANTIC_TEXCOORD7, SEMANTIC_TEXCOORD8, SEMANTIC_TEXCOORD9, } ShaderSemantic; typedef enum BlendConstant { BC_ZERO = 0, BC_ONE, BC_SRC_COLOR, BC_ONE_MINUS_SRC_COLOR, BC_DST_COLOR, BC_ONE_MINUS_DST_COLOR, BC_SRC_ALPHA, BC_ONE_MINUS_SRC_ALPHA, BC_DST_ALPHA, BC_ONE_MINUS_DST_ALPHA, BC_SRC_ALPHA_SATURATE, BC_BLEND_FACTOR, BC_ONE_MINUS_BLEND_FACTOR, MAX_BLEND_CONSTANTS } BlendConstant; typedef enum BlendMode { BM_ADD, BM_SUBTRACT, BM_REVERSE_SUBTRACT, BM_MIN, BM_MAX, MAX_BLEND_MODES, } BlendMode; typedef enum CompareMode { CMP_NEVER, CMP_LESS, CMP_EQUAL, CMP_LEQUAL, CMP_GREATER, CMP_NOTEQUAL, CMP_GEQUAL, CMP_ALWAYS, MAX_COMPARE_MODES, } CompareMode; typedef enum StencilOp { STENCIL_OP_KEEP, STENCIL_OP_SET_ZERO, STENCIL_OP_REPLACE, STENCIL_OP_INVERT, STENCIL_OP_INCR, STENCIL_OP_DECR, STENCIL_OP_INCR_SAT, STENCIL_OP_DECR_SAT, MAX_STENCIL_OPS, } StencilOp; static const int RED = 0x1; static const int GREEN = 0x2; static const int BLUE = 0x4; static const int ALPHA = 0x8; static const int ALL = (RED | GREEN | BLUE | ALPHA); static const int NONE = 0; static const int BS_NONE = -1; static const int DS_NONE = -1; static const int RS_NONE = -1; // Blend states are always attached to one of the eight or more render targets that // are in a MRT // Mask constants typedef enum BlendStateTargets { BLEND_STATE_TARGET_0 = 0x1, BLEND_STATE_TARGET_1 = 0x2, BLEND_STATE_TARGET_2 = 0x4, BLEND_STATE_TARGET_3 = 0x8, BLEND_STATE_TARGET_4 = 0x10, BLEND_STATE_TARGET_5 = 0x20, BLEND_STATE_TARGET_6 = 0x40, BLEND_STATE_TARGET_7 = 0x80, BLEND_STATE_TARGET_ALL = 0xFF, } BlendStateTargets; MAKE_ENUM_FLAG(uint32_t, BlendStateTargets) typedef enum CullMode { CULL_MODE_NONE = 0, CULL_MODE_BACK, CULL_MODE_FRONT, CULL_MODE_BOTH, MAX_CULL_MODES } CullMode; typedef enum FrontFace { FRONT_FACE_CCW = 0, FRONT_FACE_CW } FrontFace; typedef enum FillMode { FILL_MODE_SOLID, FILL_MODE_WIREFRAME, MAX_FILL_MODES } FillMode; typedef enum PipelineType { PIPELINE_TYPE_UNDEFINED = 0, PIPELINE_TYPE_COMPUTE, PIPELINE_TYPE_GRAPHICS, PIPELINE_TYPE_RAYTRACING, PIPELINE_TYPE_COUNT, } PipelineType; typedef enum FilterType { FILTER_NEAREST = 0, FILTER_LINEAR, } FilterType; typedef enum AddressMode { ADDRESS_MODE_MIRROR, ADDRESS_MODE_REPEAT, ADDRESS_MODE_CLAMP_TO_EDGE, ADDRESS_MODE_CLAMP_TO_BORDER } AddressMode; typedef enum MipMapMode { MIPMAP_MODE_NEAREST = 0, MIPMAP_MODE_LINEAR } MipMapMode; typedef enum DepthStencilClearFlags { ClEAR_DEPTH = 0x01, CLEAR_STENCIL = 0x02 } DepthStencilClearFlags; MAKE_ENUM_FLAG(uint32_t, DepthStencilClearFlags) typedef enum BufferCreationFlags { /// Default flag (Buffer will use aliased memory, buffer will not be cpu accessible until mapBuffer is called) BUFFER_CREATION_FLAG_NONE = 0x01, /// Buffer will allocate its own memory (COMMITTED resource) BUFFER_CREATION_FLAG_OWN_MEMORY_BIT = 0x02, /// Buffer will be persistently mapped BUFFER_CREATION_FLAG_PERSISTENT_MAP_BIT = 0x04, /// Use ESRAM to store this buffer BUFFER_CREATION_FLAG_ESRAM = 0x08, /// Flag to specify not to allocate descriptors for the resource BUFFER_CREATION_FLAG_NO_DESCRIPTOR_VIEW_CREATION = 0x10, #ifdef METAL /* ICB Flags */ /// Ihnerit pipeline in ICB BUFFER_CREATION_FLAG_ICB_INHERIT_PIPELINE = 0x100, /// Ihnerit pipeline in ICB BUFFER_CREATION_FLAG_ICB_INHERIT_BUFFERS = 0x200, #endif } BufferCreationFlags; MAKE_ENUM_FLAG(uint32_t, BufferCreationFlags) typedef enum TextureCreationFlags { /// Default flag (Texture will use default allocation strategy decided by the api specific allocator) TEXTURE_CREATION_FLAG_NONE = 0, /// Texture will allocate its own memory (COMMITTED resource) TEXTURE_CREATION_FLAG_OWN_MEMORY_BIT = 0x01, /// Texture will be allocated in memory which can be shared among multiple processes TEXTURE_CREATION_FLAG_EXPORT_BIT = 0x02, /// Texture will be allocated in memory which can be shared among multiple gpus TEXTURE_CREATION_FLAG_EXPORT_ADAPTER_BIT = 0x04, /// Texture will be imported from a handle created in another process TEXTURE_CREATION_FLAG_IMPORT_BIT = 0x08, /// Use ESRAM to store this texture TEXTURE_CREATION_FLAG_ESRAM = 0x10, /// Use on-tile memory to store this texture TEXTURE_CREATION_FLAG_ON_TILE = 0x20, /// Prevent compression meta data from generating (XBox) TEXTURE_CREATION_FLAG_NO_COMPRESSION = 0x40, /// Force 2D instead of automatically determining dimension based on width, height, depth TEXTURE_CREATION_FLAG_FORCE_2D = 0x80, /// Force 3D instead of automatically determining dimension based on width, height, depth TEXTURE_CREATION_FLAG_FORCE_3D = 0x100, /// Display target TEXTURE_CREATION_FLAG_ALLOW_DISPLAY_TARGET = 0x200, /// Create an sRGB texture. TEXTURE_CREATION_FLAG_SRGB = 0x400, } TextureCreationFlags; MAKE_ENUM_FLAG(uint32_t, TextureCreationFlags) typedef enum GPUPresetLevel { GPU_PRESET_NONE = 0, GPU_PRESET_OFFICE, //This means unsupported GPU_PRESET_LOW, GPU_PRESET_MEDIUM, GPU_PRESET_HIGH, GPU_PRESET_ULTRA, GPU_PRESET_COUNT } GPUPresetLevel; typedef struct BufferBarrier { Buffer* pBuffer; ResourceState mNewState; bool mSplit; } BufferBarrier; typedef struct TextureBarrier { Texture* pTexture; ResourceState mNewState; bool mSplit; } TextureBarrier; typedef struct RenderTargetBarrier { RenderTarget* pRenderTarget; ResourceState mNewState; bool mSplit; } RenderTargetBarrier; typedef struct ReadRange { uint64_t mOffset; uint64_t mSize; } ReadRange; typedef struct Region3D { uint32_t mXOffset; uint32_t mYOffset; uint32_t mZOffset; uint32_t mWidth; uint32_t mHeight; uint32_t mDepth; } Region3D; typedef struct Extent3D { uint32_t mWidth; uint32_t mHeight; uint32_t mDepth; } Extent3D; typedef enum QueryType { QUERY_TYPE_TIMESTAMP = 0, QUERY_TYPE_PIPELINE_STATISTICS, QUERY_TYPE_OCCLUSION, QUERY_TYPE_COUNT, } QueryType; typedef struct QueryPoolDesc { QueryType mType; uint32_t mQueryCount; uint32_t mNodeIndex; } QueryPoolDesc; typedef struct QueryDesc { uint32_t mIndex; } QueryDesc; typedef struct QueryPool { QueryPoolDesc mDesc; #if defined(DIRECT3D12) ID3D12QueryHeap* pDxQueryHeap; #endif #if defined(VULKAN) VkQueryPool pVkQueryPool; #endif #if defined(DIRECT3D11) ID3D11Query** ppDxQueries; #endif #if defined(METAL) uint64_t mGpuTimestampStart; uint64_t mGpuTimestampEnd; #endif #if defined(ORBIS) OrbisQueryPool mStruct; #endif } QueryPool; /// Data structure holding necessary info to create a Buffer typedef struct BufferDesc { /// Size of the buffer (in bytes) uint64_t mSize; /// Alignment uint32_t mAlignment; /// Decides which memory heap buffer will use (default, upload, readback) ResourceMemoryUsage mMemoryUsage; /// Creation flags of the buffer BufferCreationFlags mFlags; /// What state will the buffer get created in ResourceState mStartState; /// Specifies whether the buffer will have 32 bit or 16 bit indices (applicable to BUFFER_USAGE_INDEX) IndexType mIndexType; /// Vertex stride of the buffer (applicable to BUFFER_USAGE_VERTEX) uint32_t mVertexStride; /// Index of the first element accessible by the SRV/UAV (applicable to BUFFER_USAGE_STORAGE_SRV, BUFFER_USAGE_STORAGE_UAV) uint64_t mFirstElement; /// Number of elements in the buffer (applicable to BUFFER_USAGE_STORAGE_SRV, BUFFER_USAGE_STORAGE_UAV) uint64_t mElementCount; /// Size of each element (in bytes) in the buffer (applicable to BUFFER_USAGE_STORAGE_SRV, BUFFER_USAGE_STORAGE_UAV) uint64_t mStructStride; /// ICB draw type IndirectArgumentType mICBDrawType; /// ICB max vertex buffers slots count uint32_t mICBMaxVertexBufferBind; /// ICB max vertex buffers slots count uint32_t mICBMaxFragmentBufferBind; /// Set this to specify a counter buffer for this buffer (applicable to BUFFER_USAGE_STORAGE_SRV, BUFFER_USAGE_STORAGE_UAV) struct Buffer* pCounterBuffer; /// Format of the buffer (applicable to typed storage buffers (Buffer<T>) TinyImageFormat mFormat; /// Flags specifying the suitable usage of this buffer (Uniform buffer, Vertex Buffer, Index Buffer,...) DescriptorType mDescriptors; /// Debug name used in gpu profile const wchar_t* pDebugName; uint32_t* pSharedNodeIndices; uint32_t mNodeIndex; uint32_t mSharedNodeIndexCount; } BufferDesc; typedef struct Buffer { #if !defined(ORBIS) /// Position of dynamic buffer memory in the mapped resource uint64_t mPositionInHeap; #endif /// CPU address of the mapped buffer (appliacable to buffers created in CPU accessible heaps (CPU, CPU_TO_GPU, GPU_TO_CPU) void* pCpuMappedAddress; #if defined(DIRECT3D12) /// GPU Address D3D12_GPU_VIRTUAL_ADDRESS mDxGpuAddress; /// Descriptor handle of the CBV in a CPU visible descriptor heap (applicable to BUFFER_USAGE_UNIFORM) D3D12_CPU_DESCRIPTOR_HANDLE mDxCbvHandle; /// Descriptor handle of the SRV in a CPU visible descriptor heap (applicable to BUFFER_USAGE_STORAGE_SRV) D3D12_CPU_DESCRIPTOR_HANDLE mDxSrvHandle; /// Descriptor handle of the UAV in a CPU visible descriptor heap (applicable to BUFFER_USAGE_STORAGE_UAV) D3D12_CPU_DESCRIPTOR_HANDLE mDxUavHandle; /// Native handle of the underlying resource ID3D12Resource* pDxResource; /// Contains resource allocation info such as parent heap, offset in heap struct ResourceAllocation* pDxAllocation; #endif #if defined(DIRECT3D11) ID3D11Buffer* pDxResource; ID3D11ShaderResourceView* pDxSrvHandle; ID3D11UnorderedAccessView* pDxUavHandle; #endif #if defined(VULKAN) /// Native handle of the underlying resource VkBuffer pVkBuffer; /// Buffer view VkBufferView pVkStorageTexelView; VkBufferView pVkUniformTexelView; /// Contains resource allocation info such as parent heap, offset in heap struct VmaAllocation_T* pVkAllocation; /// Description for creating the descriptor for this buffer (applicable to BUFFER_USAGE_UNIFORM, BUFFER_USAGE_STORAGE_SRV, BUFFER_USAGE_STORAGE_UAV) VkDescriptorBufferInfo mVkBufferInfo; #endif #if defined(METAL) /// Contains resource allocation info such as parent heap, offset in heap struct ResourceAllocation* pMtlAllocation; /// Native handle of the underlying resource union { id<MTLBuffer> mtlBuffer; id<MTLIndirectCommandBuffer> mtlIndirectCommandBuffer; }; #endif #if defined(ORBIS) OrbisBuffer mStruct; #endif /// Buffer creation info BufferDesc mDesc; /// Current state of the buffer ResourceState mCurrentState; /// State of the buffer before mCurrentState (used for state tracking during a split barrier) ResourceState mPreviousState; #if defined(DIRECT3D12) DXGI_FORMAT mDxIndexFormat; #endif #if defined(DIRECT3D11) DXGI_FORMAT mDxIndexFormat; #endif } Buffer; typedef struct ClearValue { // Anonymous structures generates warnings in C++11. // See discussion here for more info: https://stackoverflow.com/questions/2253878/why-does-c-disallow-anonymous-structs #if defined(_MSC_VER) #pragma warning(push) #pragma warning(disable : 4201) // warning C4201: nonstandard extension used: nameless struct/union #endif union { struct { float r; float g; float b; float a; }; struct { float depth; uint32 stencil; }; }; #if defined(_MSC_VER) #pragma warning(pop) #endif } ClearValue; /// Data structure holding necessary info to create a Texture typedef struct TextureDesc { /// Texture creation flags (decides memory allocation strategy, sharing access,...) TextureCreationFlags mFlags; /// Width uint32_t mWidth; /// Height uint32_t mHeight; /// Depth (Should be 1 if not a mType is not TEXTURE_TYPE_3D) uint32_t mDepth; /// Texture array size (Should be 1 if texture is not a texture array or cubemap) uint32_t mArraySize; /// Number of mip levels uint32_t mMipLevels; /// Number of multisamples per pixel (currently Textures created with mUsage TEXTURE_USAGE_SAMPLED_IMAGE only support SAMPLE_COUNT_1) SampleCount mSampleCount; /// The image quality level. The higher the quality, the lower the performance. The valid range is between zero and the value appropriate for mSampleCount uint32_t mSampleQuality; /// image format TinyImageFormat mFormat; /// Optimized clear value (recommended to use this same value when clearing the rendertarget) ClearValue mClearValue; /// What state will the texture get created in ResourceState mStartState; /// Descriptor creation DescriptorType mDescriptors; /// Pointer to native texture handle if the texture does not own underlying resource const void* pNativeHandle; /// Debug name used in gpu profile const wchar_t* pDebugName; /// GPU indices to share this texture uint32_t* pSharedNodeIndices; /// Number of GPUs to share this texture uint32_t mSharedNodeIndexCount; /// GPU which will own this texture uint32_t mNodeIndex; /// Is the texture CPU accessible (applicable on hardware supporting CPU mapped textures (UMA)) bool mHostVisible; } TextureDesc; // Virtual texture page as a part of the partially resident texture // Contains memory bindings, offsets and status information struct VirtualTexturePage { /// Buffer which contains the image data and be used for copying it to Virtual texture Buffer* pIntermediateBuffer; /// Miplevel for this page uint32_t mipLevel; /// Array layer for this page uint32_t layer; /// Index for this page uint32_t index; #if defined(DIRECT3D12) /// Offset for this page D3D12_TILED_RESOURCE_COORDINATE offset; /// Size for this page D3D12_TILED_RESOURCE_COORDINATE extent; /// Byte size for this page uint32_t size; #endif #if defined(VULKAN) /// Offset for this page VkOffset3D offset; /// Size for this page VkExtent3D extent; /// Sparse image memory bind for this page VkSparseImageMemoryBind imageMemoryBind; /// Byte size for this page VkDeviceSize size; #endif VirtualTexturePage() { pIntermediateBuffer = NULL; #if defined(VULKAN) imageMemoryBind.memory = VK_NULL_HANDLE; #endif } }; typedef struct Texture { #if defined(DIRECT3D12) /// Descriptor handle of the SRV in a CPU visible descriptor heap (applicable to TEXTURE_USAGE_SAMPLED_IMAGE) D3D12_CPU_DESCRIPTOR_HANDLE mDxSRVDescriptor; D3D12_CPU_DESCRIPTOR_HANDLE* pDxUAVDescriptors; /// Native handle of the underlying resource ID3D12Resource* pDxResource; /// Contains resource allocation info such as parent heap, offset in heap struct ResourceAllocation* pDxAllocation; /// Memory for Sparse texture ID3D12Heap* pSparseImageMemory; /// Array for Sparse texture's pages void* pSparseCoordinates; /// Array for heap memory offsets void* pHeapRangeStartOffsets; #endif #if defined(VULKAN) /// Opaque handle used by shaders for doing read/write operations on the texture VkImageView pVkSRVDescriptor; /// Opaque handle used by shaders for doing read/write operations on the texture VkImageView* pVkUAVDescriptors; /// Opaque handle used by shaders for doing read/write operations on the texture VkImageView* pVkSRVStencilDescriptor; /// Native handle of the underlying resource VkImage pVkImage; /// Contains resource allocation info such as parent heap, offset in heap struct VmaAllocation_T* pVkAllocation; /// Flags specifying which aspects (COLOR,DEPTH,STENCIL) are included in the pVkImageView VkImageAspectFlags mVkAspectMask; /// Sparse queue binding information VkBindSparseInfo mBindSparseInfo; /// Sparse image memory bindings of all memory-backed virtual tables void* pSparseImageMemoryBinds; /// Sparse ?aque memory bindings for the mip tail (if present) void* pOpaqueMemoryBinds; /// First mip level in mip tail uint32_t mMipTailStart; /// Lstly filled mip level in mip tail uint32_t mLastFilledMip; /// Memory type for Sparse texture's memory uint32_t mSparseMemoryTypeIndex; /// Sparse image memory bind info VkSparseImageMemoryBindInfo mImageMemoryBindInfo; /// Sparse image opaque memory bind info (mip tail) VkSparseImageOpaqueMemoryBindInfo mOpaqueMemoryBindInfo; /// First mip level in mip tail uint32_t mipTailStart; #endif #if defined(METAL) /// Contains resource allocation info such as parent heap, offset in heap struct ResourceAllocation* pMtlAllocation; /// Native handle of the underlying resource id<MTLTexture> mtlTexture; id<MTLTexture> __strong* pMtlUAVDescriptors; id mpsTextureAllocator; MTLPixelFormat mtlPixelFormat; bool mIsCompressed; #endif #if defined(DIRECT3D11) ID3D11Resource* pDxResource; ID3D11ShaderResourceView* pDxSRVDescriptor; ID3D11UnorderedAccessView** pDxUAVDescriptors; #endif #if defined(ORBIS) OrbisTexture mStruct; /// Contains resource allocation info such as parent heap, offset in heap #endif /// Virtual Texture members /// Contains all virtual pages of the texture void* pPages; /// Visibility data Buffer* mVisibility; /// PrevVisibility data Buffer* mPrevVisibility; /// Alive Page's Index Buffer* mAlivePage; /// Page's Index which should be removed Buffer* mRemovePage; /// a { uint alive; uint remove; } count of pages which are alive or should be removed Buffer* mPageCounts; /// Original Pixel image data void* mVirtualImageData; /// Total pages count uint32_t mVirtualPageTotalCount; /// Sparse Virtual Texture Width uint64_t mSparseVirtualTexturePageWidth; /// Sparse Virtual Texture Height uint64_t mSparseVirtualTexturePageHeight; /// Texture creation info TextureDesc mDesc; /// Size of the texture (in bytes) uint64_t mTextureSize; /// Current state of the texture ResourceState mCurrentState; /// State of the texture before mCurrentState (used for state tracking during a split barrier) ResourceState mPreviousState; /// This value will be false if the underlying resource is not owned by the texture (swapchain textures,...) bool mOwnsImage; } Texture; typedef struct RenderTargetDesc { /// Texture creation flags (decides memory allocation strategy, sharing access,...) TextureCreationFlags mFlags; /// Width uint32_t mWidth; /// Height uint32_t mHeight; /// Depth (Should be 1 if not a mType is not TEXTURE_TYPE_3D) uint32_t mDepth; /// Texture array size (Should be 1 if texture is not a texture array or cubemap) uint32_t mArraySize; /// Number of mip levels uint32_t mMipLevels; /// MSAA SampleCount mSampleCount; /// Internal image format TinyImageFormat mFormat; /// Optimized clear value (recommended to use this same value when clearing the rendertarget) ClearValue mClearValue; /// The image quality level. The higher the quality, the lower the performance. The valid range is between zero and the value appropriate for mSampleCount uint32_t mSampleQuality; /// Descriptor creation DescriptorType mDescriptors; const void* pNativeHandle; /// Debug name used in gpu profile const wchar_t* pDebugName; /// GPU indices to share this texture uint32_t* pSharedNodeIndices; /// Number of GPUs to share this texture uint32_t mSharedNodeIndexCount; /// GPU which will own this texture uint32_t mNodeIndex; } RenderTargetDesc; typedef struct RenderTarget { RenderTargetDesc mDesc; Texture* pTexture; #if defined(DIRECT3D12) D3D12_CPU_DESCRIPTOR_HANDLE* pDxDescriptors; #endif #if defined(VULKAN) VkImageView* pVkDescriptors; uint64_t mId; #endif #if defined(DIRECT3D11) union { /// Resources ID3D11RenderTargetView** pDxRtvDescriptors; ID3D11DepthStencilView** pDxDsvDescriptors; }; #endif #if defined(ORBIS) OrbisRenderTarget mStruct; #endif } RenderTarget; typedef struct LoadActionsDesc { ClearValue mClearColorValues[MAX_RENDER_TARGET_ATTACHMENTS]; LoadActionType mLoadActionsColor[MAX_RENDER_TARGET_ATTACHMENTS]; ClearValue mClearDepth; LoadActionType mLoadActionDepth; LoadActionType mLoadActionStencil; } LoadActionsDesc; typedef struct SamplerDesc { FilterType mMinFilter; FilterType mMagFilter; MipMapMode mMipMapMode; AddressMode mAddressU; AddressMode mAddressV; AddressMode mAddressW; float mMipLodBias; float mMaxAnisotropy; CompareMode mCompareFunc; } SamplerDesc; typedef struct Sampler { #if defined(DIRECT3D12) /// Description for creating the Sampler descriptor for ths sampler D3D12_SAMPLER_DESC mDxDesc; /// Descriptor handle of the Sampler in a CPU visible descriptor heap D3D12_CPU_DESCRIPTOR_HANDLE mDxSamplerHandle; #endif #if defined(VULKAN) /// Native handle of the underlying resource VkSampler pVkSampler; #endif #if defined(METAL) /// Native handle of the underlying resource id<MTLSamplerState> mtlSamplerState; #endif #if defined(DIRECT3D11) /// Native handle of the underlying resource ID3D11SamplerState* pSamplerState; #endif #if defined(ORBIS) OrbisSampler mStruct; #endif } Sampler; typedef enum DescriptorUpdateFrequency { DESCRIPTOR_UPDATE_FREQ_NONE = 0, DESCRIPTOR_UPDATE_FREQ_PER_FRAME, DESCRIPTOR_UPDATE_FREQ_PER_BATCH, DESCRIPTOR_UPDATE_FREQ_PER_DRAW, DESCRIPTOR_UPDATE_FREQ_COUNT, } DescriptorUpdateFrequency; /// Data structure holding the layout for a descriptor typedef struct DescriptorInfo { #if defined(ORBIS) OrbisDescriptorInfo mStruct; #else /// Binding information generated from the shader reflection ShaderResource mDesc; /// Index in the descriptor set uint32_t mIndexInParent; /// Update frequency of this descriptor DescriptorUpdateFrequency mUpdateFrquency; uint32_t mHandleIndex; #if defined(METAL) Sampler* mStaticSampler; #endif #if defined(DIRECT3D12) D3D12_ROOT_PARAMETER_TYPE mDxType; #endif #if defined(VULKAN) VkDescriptorType mVkType; VkShaderStageFlags mVkStages; uint32_t mDynamicUniformIndex; #endif #endif } DescriptorInfo; typedef enum RootSignatureFlags { /// Default flag ROOT_SIGNATURE_FLAG_NONE = 0, /// Local root signature used mainly in raytracing shaders ROOT_SIGNATURE_FLAG_LOCAL_BIT = 0x1, } RootSignatureFlags; MAKE_ENUM_FLAG(uint32_t, RootSignatureFlags) typedef struct RootSignatureDesc { Shader** ppShaders; uint32_t mShaderCount; uint32_t mMaxBindlessTextures; const char** ppStaticSamplerNames; Sampler** ppStaticSamplers; uint32_t mStaticSamplerCount; RootSignatureFlags mFlags; } RootSignatureDesc; typedef struct RootSignature { /// Number of descriptors declared in the root signature layout uint32_t mDescriptorCount; /// Array of all descriptors declared in the root signature layout DescriptorInfo* pDescriptors; /// Translates hash of descriptor name to descriptor index in pDescriptors array DescriptorIndexMap* pDescriptorNameToIndexMap; PipelineType mPipelineType; #if defined(DIRECT3D12) uint32_t mDxViewDescriptorTableRootIndices[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint32_t mDxSamplerDescriptorTableRootIndices[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint32_t* pDxViewDescriptorIndices[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint32_t mDxViewDescriptorCounts[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint32_t mDxCumulativeViewDescriptorCounts[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint32_t* pDxSamplerDescriptorIndices[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint32_t mDxSamplerDescriptorCounts[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint32_t mDxCumulativeSamplerDescriptorCounts[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint32_t* pDxRootDescriptorRootIndices[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint32_t mDxRootDescriptorCounts[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint32_t* pDxRootConstantRootIndices; uint32_t mDxRootConstantCount; ID3D12RootSignature* pDxRootSignature; ID3DBlob* pDxSerializedRootSignatureString; #endif #if defined(VULKAN) VkDescriptorSetLayout mVkDescriptorSetLayouts[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint32_t mVkDescriptorCounts[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint32_t mVkDynamicDescriptorCounts[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint32_t mVkRaytracingDescriptorCounts[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint32_t mVkCumulativeDescriptorCounts[DESCRIPTOR_UPDATE_FREQ_COUNT]; VkPushConstantRange* pVkPushConstantRanges; VkPipelineLayout pPipelineLayout; VkDescriptorUpdateTemplate mUpdateTemplates[DESCRIPTOR_UPDATE_FREQ_COUNT]; VkDescriptorSet mVkEmptyDescriptorSets[DESCRIPTOR_UPDATE_FREQ_COUNT]; void* pUpdateTemplateData[DESCRIPTOR_UPDATE_FREQ_COUNT][MAX_GPUS]; uint32_t mVkPushConstantCount; #endif #if defined(METAL) // paramIndex support typedef struct IndexedDescriptor { const DescriptorInfo** pDescriptors; uint32_t mDescriptorCount; } IndexedDescriptor; Sampler** ppStaticSamplers; uint32_t* pStaticSamplerSlots; ShaderStage* pStaticSamplerStages; uint32_t mStaticSamplerCount; ShaderDescriptors* pShaderDescriptors; uint32_t mShaderDescriptorsCount; IndexedDescriptor* mIndexedDescriptorInfo; uint32_t mIndexedDescriptorCount; #endif #if defined(DIRECT3D11) ID3D11SamplerState** ppStaticSamplers; uint32_t* pStaticSamplerSlots; ShaderStage* pStaticSamplerStages; uint32_t mStaticSamplerCount; #endif #if defined(ORBIS) OrbisRootSignature mStruct; #endif } RootSignature; typedef struct DescriptorData { /// User can either set name of descriptor or index (index in pRootSignature->pDescriptors array) /// Name of descriptor const char* pName; union { struct { /// Offset to bind the buffer descriptor uint64_t* pOffsets; uint64_t* pSizes; }; // Descriptor set buffer extraction options struct { uint32_t mDescriptorSetBufferIndex; Shader* mDescriptorSetShader; ShaderStage mDescriptorSetShaderStage; }; uint32_t mUAVMipSlice; bool mBindStencilResource; }; /// Array of resources containing descriptor handles or constant to be used in ring buffer memory - DescriptorRange can hold only one resource type array union { /// Array of texture descriptors (srv and uav textures) Texture** ppTextures; /// Array of sampler descriptors Sampler** ppSamplers; /// Array of buffer descriptors (srv, uav and cbv buffers) Buffer** ppBuffers; /// Array of pipline descriptors Pipeline** ppPipelines; /// DescriptorSet buffer extraction DescriptorSet** ppDescriptorSet; /// Custom binding (raytracing acceleration structure ...) AccelerationStructure** ppAccelerationStructures; }; /// Number of resources in the descriptor(applies to array of textures, buffers,...) uint32_t mCount; uint32_t mIndex = (uint32_t)-1; bool mExtractBuffer = false; } DescriptorData; typedef struct CmdPoolDesc { Queue* pQueue; bool mTransient; } CmdPoolDesc; typedef struct CmdPool { Queue* pQueue; #if defined(DIRECT3D12) // Temporarily move to Cmd struct until we get the command allocator pool logic working //ID3D12CommandAllocator* pDxCmdAlloc; #endif #if defined(VULKAN) VkCommandPool pVkCmdPool; #endif } CmdPool; typedef struct CmdDesc { CmdPool* pPool; #if defined(ORBIS) uint32_t mMaxSize; #endif bool mSecondary; } CmdDesc; typedef struct Cmd { Renderer* pRenderer; const RootSignature* pBoundRootSignature; uint8_t mNodeIndex; uint8_t mQueueType; #if defined(DIRECT3D12) #if defined(_DURANGO) DmaCmd mDma; #endif // For now each command list will have its own allocator until we get the command allocator pool logic working ID3D12CommandAllocator* pDxCmdAlloc; ID3D12GraphicsCommandList* pDxCmdList; DescriptorSet* pBoundDescriptorSets[DESCRIPTOR_UPDATE_FREQ_COUNT]; uint16_t mBoundDescriptorSetIndices[DESCRIPTOR_UPDATE_FREQ_COUNT]; #endif #if defined(VULKAN) VkCommandBuffer pVkCmdBuf; VkRenderPass pVkActiveRenderPass; #endif #if defined(METAL) id<MTLCommandBuffer> mtlCommandBuffer; id<MTLRenderCommandEncoder> mtlRenderEncoder; id<MTLComputeCommandEncoder> mtlComputeEncoder; id<MTLBlitCommandEncoder> mtlBlitEncoder; MTLRenderPassDescriptor* pRenderPassDesc; Shader* pShader; Buffer* selectedIndexBuffer; uint64_t mSelectedIndexBufferOffset; QueryPool* pLastFrameQuery; MTLPrimitiveType selectedPrimitiveType; #endif #if defined(DIRECT3D11) uint8_t* pDescriptorCache; Buffer* pRootConstantBuffer; Buffer* pTransientConstantBuffer; uint32_t mDescriptorCacheOffset; #endif #if defined(ORBIS) OrbisCmd mStruct; #endif CmdDesc mDesc; } Cmd; typedef struct QueueDesc { QueueType mType; QueueFlag mFlag; QueuePriority mPriority; uint32_t mNodeIndex; } QueueDesc; typedef enum FenceStatus { FENCE_STATUS_COMPLETE = 0, FENCE_STATUS_INCOMPLETE, FENCE_STATUS_NOTSUBMITTED, } FenceStatus; typedef struct Fence { #if defined(DIRECT3D12) ID3D12Fence* pDxFence; HANDLE pDxWaitIdleFenceEvent; uint64_t mFenceValue; #endif #if defined(VULKAN) VkFence pVkFence; bool mSubmitted; #endif #if defined(METAL) dispatch_semaphore_t pMtlSemaphore; bool mSubmitted; #endif #if defined(ORBIS) OrbisFence mStruct; #endif } Fence; typedef struct Semaphore { #if defined(DIRECT3D12) // DirectX12 does not have a concept of semaphores // All synchronization is done using fences // Simlate semaphore signal and wait using DirectX12 fences // Semaphores used in DirectX12 only in queueSubmit // queueSubmit -> How the semaphores work in DirectX12 // pp_wait_semaphores -> queue->Wait is manually called on each fence in this // array before calling ExecuteCommandLists to make the fence work like a wait semaphore // pp_signal_semaphores -> Manually call queue->Signal on each fence in this array after // calling ExecuteCommandLists and increment the underlying fence value // queuePresent does not use the wait semaphore since the swapchain Present function // already does the synchronization in this case Fence* pFence; #endif #if defined(VULKAN) VkSemaphore pVkSemaphore; uint32_t mCurrentNodeIndex; bool mSignaled; #endif #if defined(METAL) dispatch_semaphore_t pMtlSemaphore; #endif #if defined(ORBIS) OrbisFence mStruct; #endif } Semaphore; typedef struct Queue { Renderer* pRenderer; #if defined(DIRECT3D12) ID3D12CommandQueue* pDxQueue; Fence* pFence; #endif #if defined(VULKAN) VkQueue pVkQueue; uint32_t mVkQueueFamilyIndex; uint32_t mVkQueueIndex; #endif #if defined(METAL) id<MTLCommandQueue> mtlCommandQueue; uint32_t mBarrierFlags; id<MTLFence> mtlQueueFence; #endif #if defined(ORBIS) OrbisQueue mStruct; #endif QueueDesc mDesc; Extent3D mUploadGranularity; } Queue; typedef struct ShaderMacro { const char* definition; const char* value; } ShaderMacro; #if defined(TARGET_IOS) typedef struct ShaderStageDesc { const char* pName; const char* pCode; const char* pEntryPoint; ShaderMacro* pMacros; uint32_t mMacroCount; } ShaderStageDesc; typedef struct ShaderDesc { ShaderStage mStages; ShaderStageDesc mVert; ShaderStageDesc mFrag; ShaderStageDesc mGeom; ShaderStageDesc mHull; ShaderStageDesc mDomain; ShaderStageDesc mComp; } ShaderDesc; #endif typedef struct BinaryShaderStageDesc { /// Byte code array const char* pByteCode; uint32_t mByteCodeSize; const char* pEntryPoint; #if defined(METAL) // Shader source is needed for reflection char* pSource; uint32_t mSourceSize; #endif } BinaryShaderStageDesc; typedef struct BinaryShaderDesc { ShaderStage mStages; BinaryShaderStageDesc mVert; BinaryShaderStageDesc mFrag; BinaryShaderStageDesc mGeom; BinaryShaderStageDesc mHull; BinaryShaderStageDesc mDomain; BinaryShaderStageDesc mComp; } BinaryShaderDesc; typedef struct Shader { ShaderStage mStages; PipelineReflection mReflection; #if defined(DIRECT3D12) ID3DBlob** pShaderBlobs; LPCWSTR* pEntryNames; #endif #if defined(VULKAN) VkShaderModule* pShaderModules; char** pEntryNames; #endif #if defined(METAL) id<MTLFunction> mtlVertexShader; id<MTLFunction> mtlFragmentShader; id<MTLFunction> mtlComputeShader; id<MTLLibrary> mtlLibrary; char** pEntryNames; uint32_t mNumThreadsPerGroup[3]; #endif #if defined(DIRECT3D11) ID3D11VertexShader* pDxVertexShader; ID3D11PixelShader* pDxPixelShader; ID3D11GeometryShader* pDxGeometryShader; ID3D11DomainShader* pDxDomainShader; ID3D11HullShader* pDxHullShader; ID3D11ComputeShader* pDxComputeShader; ID3DBlob* pDxInputSignature; #endif #if defined(ORBIS) OrbisShader mStruct; #endif } Shader; typedef struct BlendStateDesc { /// Source blend factor per render target. BlendConstant mSrcFactors[MAX_RENDER_TARGET_ATTACHMENTS]; /// Destination blend factor per render target. BlendConstant mDstFactors[MAX_RENDER_TARGET_ATTACHMENTS]; /// Source alpha blend factor per render target. BlendConstant mSrcAlphaFactors[MAX_RENDER_TARGET_ATTACHMENTS]; /// Destination alpha blend factor per render target. BlendConstant mDstAlphaFactors[MAX_RENDER_TARGET_ATTACHMENTS]; /// Blend mode per render target. BlendMode mBlendModes[MAX_RENDER_TARGET_ATTACHMENTS]; /// Alpha blend mode per render target. BlendMode mBlendAlphaModes[MAX_RENDER_TARGET_ATTACHMENTS]; /// Write mask per render target. int32_t mMasks[MAX_RENDER_TARGET_ATTACHMENTS]; /// Mask that identifies the render targets affected by the blend state. BlendStateTargets mRenderTargetMask; /// Set whether alpha to coverage should be enabled. bool mAlphaToCoverage; /// Set whether each render target has an unique blend function. When false the blend function in slot 0 will be used for all render targets. bool mIndependentBlend; } BlendStateDesc; typedef struct BlendState { #if defined(DIRECT3D12) D3D12_BLEND_DESC mDxBlendDesc; #endif #if defined(VULKAN) VkPipelineColorBlendAttachmentState RTBlendStates[MAX_RENDER_TARGET_ATTACHMENTS]; VkBool32 LogicOpEnable; VkLogicOp LogicOp; #endif #if defined(METAL) struct BlendStateData { MTLBlendFactor srcFactor; MTLBlendFactor destFactor; MTLBlendFactor srcAlphaFactor; MTLBlendFactor destAlphaFactor; MTLBlendOperation blendMode; MTLBlendOperation blendAlphaMode; }; BlendStateData blendStatePerRenderTarget[MAX_RENDER_TARGET_ATTACHMENTS]; bool alphaToCoverage; #endif #if defined(DIRECT3D11) ID3D11BlendState* pBlendState; #endif #if defined(ORBIS) OrbisBlendState mStruct; #endif } BlendState; typedef struct DepthStateDesc { bool mDepthTest; bool mDepthWrite; CompareMode mDepthFunc = CompareMode::CMP_LEQUAL; bool mStencilTest; uint8_t mStencilReadMask; uint8_t mStencilWriteMask; CompareMode mStencilFrontFunc = CompareMode::CMP_ALWAYS; StencilOp mStencilFrontFail; StencilOp mDepthFrontFail; StencilOp mStencilFrontPass; CompareMode mStencilBackFunc = CompareMode::CMP_ALWAYS; StencilOp mStencilBackFail; StencilOp mDepthBackFail; StencilOp mStencilBackPass; } DepthStateDesc; typedef struct DepthState { #if defined(DIRECT3D12) D3D12_DEPTH_STENCIL_DESC mDxDepthStencilDesc; #endif #if defined(VULKAN) VkBool32 DepthTestEnable; VkBool32 DepthWriteEnable; VkCompareOp DepthCompareOp; VkBool32 DepthBoundsTestEnable; VkBool32 StencilTestEnable; VkStencilOpState Front; VkStencilOpState Back; float MinDepthBounds; float MaxDepthBounds; #endif #if defined(METAL) id<MTLDepthStencilState> mtlDepthState; #endif #if defined(DIRECT3D11) ID3D11DepthStencilState* pDxDepthStencilState; #endif #if defined(ORBIS) OrbisDepthState mStruct; #endif } DepthState; typedef struct RasterizerStateDesc { CullMode mCullMode; int32_t mDepthBias; float mSlopeScaledDepthBias; FillMode mFillMode; bool mMultiSample; bool mScissor; FrontFace mFrontFace; bool mDepthClampEnable; } RasterizerStateDesc; typedef struct RasterizerState { #if defined(DIRECT3D12) D3D12_RASTERIZER_DESC mDxRasterizerDesc; #endif #if defined(VULKAN) VkBool32 DepthClampEnable; VkPolygonMode PolygonMode; VkCullModeFlags CullMode; VkFrontFace FrontFace; VkBool32 DepthBiasEnable; float DepthBiasConstantFactor; float DepthBiasClamp; float DepthBiasSlopeFactor; float LineWidth; #endif #if defined(METAL) MTLCullMode cullMode; MTLTriangleFillMode fillMode; float depthBiasSlopeFactor; float depthBias; bool scissorEnable; bool multisampleEnable; MTLWinding frontFace; #endif #if defined(DIRECT3D11) ID3D11RasterizerState* pDxRasterizerState; #endif #if defined(ORBIS) OrbisRasterizerState mStruct; #endif } RasterizerState; typedef enum VertexAttribRate { VERTEX_ATTRIB_RATE_VERTEX = 0, VERTEX_ATTRIB_RATE_INSTANCE = 1, VERTEX_ATTRIB_RATE_COUNT, } VertexAttribRate; typedef struct VertexAttrib { ShaderSemantic mSemantic; uint32_t mSemanticNameLength; char mSemanticName[MAX_SEMANTIC_NAME_LENGTH]; TinyImageFormat mFormat; uint32_t mBinding; uint32_t mLocation; uint32_t mOffset; VertexAttribRate mRate; } VertexAttrib; typedef struct VertexLayout { uint32_t mAttribCount; VertexAttrib mAttribs[MAX_VERTEX_ATTRIBS]; } VertexLayout; /************************************************************************/ // #pGlobalRootSignature - Root Signature used by all shaders in the ppShaders array // #ppShaders - Array of all shaders which can be called during the raytracing operation // This includes the ray generation shader, all miss, any hit, closest hit shaders // #pHitGroups - Name of the hit groups which will tell the pipeline about which combination of hit shaders to use // #mPayloadSize - Size of the payload struct for passing data to and from the shaders. // Example - float4 payload sent by raygen shader which will be filled by miss shader as a skybox color // or by hit shader as shaded color // #mAttributeSize - Size of the intersection attribute. As long as user uses the default intersection shader // this size is sizeof(float2) which represents the ZW of the barycentric co-ordinates of the intersection /************************************************************************/ typedef struct RaytracingPipelineDesc { Raytracing* pRaytracing; RootSignature* pGlobalRootSignature; Shader* pRayGenShader; RootSignature* pRayGenRootSignature; Shader** ppMissShaders; RootSignature** ppMissRootSignatures; RaytracingHitGroup* pHitGroups; RootSignature* pEmptyRootSignature; unsigned mMissShaderCount; unsigned mHitGroupCount; // #TODO : Remove this after adding shader reflection for raytracing shaders unsigned mPayloadSize; // #TODO : Remove this after adding shader reflection for raytracing shaders unsigned mAttributeSize; unsigned mMaxTraceRecursionDepth; unsigned mMaxRaysCount; } RaytracingPipelineDesc; typedef struct GraphicsPipelineDesc { Shader* pShaderProgram; RootSignature* pRootSignature; VertexLayout* pVertexLayout; BlendState* pBlendState; DepthState* pDepthState; RasterizerState* pRasterizerState; TinyImageFormat* pColorFormats; uint32_t mRenderTargetCount; SampleCount mSampleCount; uint32_t mSampleQuality; TinyImageFormat mDepthStencilFormat; PrimitiveTopology mPrimitiveTopo; bool mSupportIndirectCommandBuffer; } GraphicsPipelineDesc; typedef struct ComputePipelineDesc { Shader* pShaderProgram; RootSignature* pRootSignature; } ComputePipelineDesc; typedef struct PipelineDesc { PipelineType mType; union { ComputePipelineDesc mComputeDesc; GraphicsPipelineDesc mGraphicsDesc; RaytracingPipelineDesc mRaytracingDesc; }; } PipelineDesc; //this is needed because unit tests have different WindowsSDK versions. //Minimum 10.0.17763.0 is required in every project to remove this typedef typedef struct ID3D12StateObject ID3D12StateObject; #ifdef METAL typedef struct RaytracingPipeline RaytracingPipeline; #endif typedef struct Pipeline { PipelineType mType; #if defined(DIRECT3D12) ID3D12PipelineState* pDxPipelineState; D3D_PRIMITIVE_TOPOLOGY mDxPrimitiveTopology; ID3D12StateObject* pDxrPipeline; #endif #if defined(VULKAN) VkPipeline pVkPipeline; //In DX12 this information is stored in ID3D12StateObject. //But for Vulkan we need to store it manually const char** ppShaderStageNames; uint32_t mShaderStageCount; #endif #if defined(METAL) RenderTarget* pRenderPasspRenderTarget; Shader* pShader; id<MTLRenderPipelineState> mtlRenderPipelineState; id<MTLComputePipelineState> mtlComputePipelineState; RaytracingPipeline* pRaytracingPipeline; MTLCullMode mCullMode; #endif #if defined(DIRECT3D11) ID3D11VertexShader* pDxVertexShader; ID3D11PixelShader* pDxPixelShader; ID3D11GeometryShader* pDxGeometryShader; ID3D11DomainShader* pDxDomainShader; ID3D11HullShader* pDxHullShader; ID3D11ComputeShader* pDxComputeShader; ID3D11InputLayout* pDxInputLayout; D3D_PRIMITIVE_TOPOLOGY mDxPrimitiveTopology; #endif #if defined(ORBIS) OrbisPipeline mStruct; #endif union { GraphicsPipelineDesc mGraphics; ComputePipelineDesc mCompute; }; } Pipeline; typedef struct SubresourceDataDesc { // Source description uint64_t mBufferOffset; uint32_t mRowPitch; uint32_t mSlicePitch; // Destination description uint32_t mArrayLayer; uint32_t mMipLevel; Region3D mRegion; } SubresourceDataDesc; typedef struct SwapChainDesc { /// Window handle WindowHandle mWindowHandle; /// Queues which should be allowed to present Queue** ppPresentQueues; /// Number of present queues uint32_t mPresentQueueCount; /// Number of backbuffers in this swapchain uint32_t mImageCount; /// Width of the swapchain uint32_t mWidth; /// Height of the swapchain uint32_t mHeight; /// Sample count SampleCount mSampleCount; /// Sample quality (DirectX12 only) uint32_t mSampleQuality; /// Color format of the swapchain TinyImageFormat mColorFormat; /// Clear value ClearValue mColorClearValue; /// Set whether swap chain will be presented using vsync bool mEnableVsync; } SwapChainDesc; typedef struct SwapChain { SwapChainDesc mDesc; /// Render targets created from the swapchain back buffers RenderTarget** ppRenderTargets; #if defined(_DURANGO) IDXGISwapChain1* pDxSwapChain; UINT mDxSyncInterval; ID3D12Resource** ppDxSwapChainResources; uint32_t mFlags; #elif defined(DIRECT3D12) /// Use IDXGISwapChain3 for now since IDXGISwapChain4 /// isn't supported by older devices. IDXGISwapChain3* pDxSwapChain; /// Sync interval to specify how interval for vsync UINT mDxSyncInterval; ID3D12Resource** ppDxSwapChainResources; uint32_t mFlags; #endif #if defined(DIRECT3D11) /// Use IDXGISwapChain3 for now since IDXGISwapChain4 /// isn't supported by older devices. IDXGISwapChain* pDxSwapChain; /// Sync interval to specify how interval for vsync UINT mDxSyncInterval; ID3D11Resource** ppDxSwapChainResources; uint32_t mFlags; #endif #if defined(VULKAN) /// Present queue if one exists (queuePresent will use this queue if the hardware has a dedicated present queue) VkQueue pPresentQueue; VkSwapchainKHR pSwapChain; VkSurfaceKHR pVkSurface; VkImage* ppVkSwapChainImages; uint32_t mPresentQueueFamilyIndex; #endif #if defined(METAL) # if defined(TARGET_IOS) UIView* pForgeView; # else NSView* pForgeView; #endif id<CAMetalDrawable> mMTKDrawable; id<MTLCommandBuffer> presentCommandBuffer; #endif #if defined(ORBIS) OrbisSwapChain mStruct; #endif } SwapChain; typedef enum ShaderTarget { // We only need SM 5.0 for supporting D3D11 fallback #if defined(DIRECT3D11) shader_target_5_0, #else // 5.1 is supported on all DX12 hardware shader_target_5_1, shader_target_6_0, shader_target_6_1, shader_target_6_2, shader_target_6_3, //required for Raytracing #endif } ShaderTarget; typedef enum GpuMode { GPU_MODE_SINGLE = 0, GPU_MODE_LINKED, // #TODO GPU_MODE_UNLINKED, } GpuMode; typedef struct RendererDesc { LogFn pLogFn; RendererApi mApi; ShaderTarget mShaderTarget; GpuMode mGpuMode; #if defined(VULKAN) const char** ppInstanceLayers; uint32_t mInstanceLayerCount; const char** ppInstanceExtensions; uint32_t mInstanceExtensionCount; const char** ppDeviceExtensions; uint32_t mDeviceExtensionCount; #endif #if defined(DIRECT3D12) D3D_FEATURE_LEVEL mDxFeatureLevel; #endif /// This results in new validation not possible during API calls on the CPU, by creating patched shaders that have validation added directly to the shader. /// However, it can slow things down a lot, especially for applications with numerous PSOs. Time to see the first render frame may take several minutes bool mEnableGPUBasedValidation; } RendererDesc; typedef struct GPUVendorPreset { char mVendorId[MAX_GPU_VENDOR_STRING_LENGTH]; char mModelId[MAX_GPU_VENDOR_STRING_LENGTH]; char mRevisionId[MAX_GPU_VENDOR_STRING_LENGTH]; // OPtional as not all gpu's have that. Default is : 0x00 GPUPresetLevel mPresetLevel; char mGpuName[MAX_GPU_VENDOR_STRING_LENGTH]; //If GPU Name is missing then value will be empty string } GPUVendorPreset; typedef struct GPUCapBits { bool canShaderReadFrom[TinyImageFormat_Count]; bool canShaderWriteTo[TinyImageFormat_Count]; bool canRenderTargetWriteTo[TinyImageFormat_Count]; } GPUCapBits; typedef enum DefaultResourceAlignment { RESOURCE_BUFFER_ALIGNMENT = 4U, } DefaultResourceAlignment; typedef struct GPUSettings { uint32_t mUniformBufferAlignment; uint32_t mUploadBufferTextureAlignment; uint32_t mUploadBufferTextureRowAlignment; uint32_t mMaxVertexInputBindings; uint32_t mMaxRootSignatureDWORDS; uint32_t mWaveLaneCount; GPUVendorPreset mGpuVendorPreset; bool mMultiDrawIndirect; bool mROVsSupported; bool mPartialUpdateConstantBufferSupported; #ifdef METAL uint32_t mArgumentBufferMaxTextures; #endif } GPUSettings; typedef struct Renderer { char* pName; RendererDesc mSettings; uint32_t mNumOfGPUs; GPUSettings* pActiveGpuSettings; GPUSettings mGpuSettings[MAX_GPUS]; uint32_t mLinkedNodeCount; #if defined(DIRECT3D12) // Default NULL Descriptors for binding at empty descriptor slots to make sure all descriptors are bound at submit D3D12_CPU_DESCRIPTOR_HANDLE mNullTextureSRV[TEXTURE_DIM_COUNT]; D3D12_CPU_DESCRIPTOR_HANDLE mNullTextureUAV[TEXTURE_DIM_COUNT]; D3D12_CPU_DESCRIPTOR_HANDLE mNullBufferSRV; D3D12_CPU_DESCRIPTOR_HANDLE mNullBufferUAV; D3D12_CPU_DESCRIPTOR_HANDLE mNullBufferCBV; D3D12_CPU_DESCRIPTOR_HANDLE mNullSampler; // API specific descriptor heap and memory allocator struct DescriptorHeap* pCPUDescriptorHeaps[D3D12_DESCRIPTOR_HEAP_TYPE_NUM_TYPES]; struct DescriptorHeap* pCbvSrvUavHeaps[MAX_GPUS]; struct DescriptorHeap* pSamplerHeaps[MAX_GPUS]; struct ResourceAllocator* pResourceAllocator; #if defined(_DEBUG) || defined(PROFILE) ID3D12Debug* pDXDebug; #endif #endif #if defined(_DURANGO) IDXGIFactory2* pDXGIFactory; IDXGIAdapter* pDxGPUs[MAX_GPUS]; IDXGIAdapter* pDxActiveGPU; ID3D12Device* pDxDevice; EsramManager* pESRAMManager; #elif defined(DIRECT3D12) IDXGIFactory5* pDXGIFactory; IDXGIAdapter3* pDxGPUs[MAX_GPUS]; IDXGIAdapter3* pDxActiveGPU; ID3D12Device* pDxDevice; #endif #if defined(DIRECT3D11) IDXGIFactory1* pDXGIFactory; IDXGIAdapter1* pDxGPUs[MAX_GPUS]; IDXGIAdapter1* pDxActiveGPU; ID3D11Device* pDxDevice; ID3D11DeviceContext* pDxContext; #endif #if defined(VULKAN) VkInstance pVkInstance; VkPhysicalDevice pVkGPUs[MAX_GPUS]; VkPhysicalDevice pVkActiveGPU; VkPhysicalDeviceProperties2 mVkGpuProperties[MAX_GPUS]; #ifdef VK_NV_RAY_TRACING_SPEC_VERSION VkPhysicalDeviceRayTracingPropertiesNV mVkRaytracingProperties[MAX_GPUS]; #endif VkPhysicalDeviceMemoryProperties mVkGpuMemoryProperties[MAX_GPUS]; VkPhysicalDeviceFeatures2KHR mVkGpuFeatures[MAX_GPUS]; uint32_t mVkQueueFamilyPropertyCount[MAX_GPUS]; VkQueueFamilyProperties* mVkQueueFamilyProperties[MAX_GPUS]; uint32_t mActiveGPUIndex; VkPhysicalDeviceMemoryProperties* pVkActiveGpuMemoryProperties; VkPhysicalDeviceFeatures2KHR* pVkActiveGpuFeatures; #ifdef VK_NV_RAY_TRACING_SPEC_VERSION VkPhysicalDeviceRayTracingPropertiesNV* pVkActiveCPURaytracingProperties; #endif VkPhysicalDeviceProperties2* pVkActiveGPUProperties; VkDevice pVkDevice; #ifdef USE_DEBUG_UTILS_EXTENSION VkDebugUtilsMessengerEXT pVkDebugUtilsMessenger; #else VkDebugReportCallbackEXT pVkDebugReport; #endif const char** ppInstanceLayers; uint32_t mInstanceLayerCount; uint32_t mVkUsedQueueCount[MAX_GPUS][16]; Texture* pDefaultTextureSRV[MAX_GPUS][TEXTURE_DIM_COUNT]; Texture* pDefaultTextureUAV[MAX_GPUS][TEXTURE_DIM_COUNT]; Buffer* pDefaultBufferSRV[MAX_GPUS]; Buffer* pDefaultBufferUAV[MAX_GPUS]; Sampler* pDefaultSampler; struct DescriptorPool* pDescriptorPool; struct VmaAllocator_T* pVmaAllocator; // These are the extensions that we have loaded const char* gVkInstanceExtensions[MAX_INSTANCE_EXTENSIONS]; // These are the extensions that we have loaded const char* gVkDeviceExtensions[MAX_DEVICE_EXTENSIONS]; #endif #if defined(METAL) id<MTLDevice> pDevice; struct ResourceAllocator* pResourceAllocator; #endif #if defined(ORBIS) Texture* pDefaultTextureSRV[TEXTURE_DIM_COUNT]; Texture* pDefaultTextureUAV[TEXTURE_DIM_COUNT]; Buffer* pDefaultBufferSRV; Buffer* pDefaultBufferUAV; Buffer* pDefaultBufferSRVRaw; Buffer* pDefaultBufferUAVRaw; Sampler* pDefaultSampler; #endif uint32_t mCurrentFrameIdx; // Default states used if user does not specify them in pipeline creation BlendState* pDefaultBlendState; DepthState* pDefaultDepthState; RasterizerState* pDefaultRasterizerState; ShaderMacro* pBuiltinShaderDefines; uint32_t mBuiltinShaderDefinesCount; GPUCapBits capBits; } Renderer; // Indirect command sturcture define typedef struct IndirectArgumentDescriptor { IndirectArgumentType mType; const char* pName; uint32_t mIndex; uint32_t mCount; uint32_t mDivisor; } IndirectArgumentDescriptor; typedef struct CommandSignatureDesc { CmdPool* pCmdPool; RootSignature* pRootSignature; uint32_t mIndirectArgCount; IndirectArgumentDescriptor* pArgDescs; } CommandSignatureDesc; typedef struct CommandSignature { CommandSignatureDesc mDesc; uint32_t mIndirectArgDescCounts; uint32_t mDrawCommandStride; #if defined(DIRECT3D12) ID3D12CommandSignature* pDxCommandSignautre; #endif #if defined(VULKAN) IndirectArgumentType mDrawType; #endif #if defined(METAL) IndirectArgumentType mDrawType; #endif #if defined(ORBIS) IndirectArgumentType mDrawType; #endif } CommandSignature; typedef struct DescriptorSetDesc { RootSignature* pRootSignature; DescriptorUpdateFrequency mUpdateFrequency; uint32_t mMaxSets; uint32_t mNodeIndex; // const wchar_t* pDebugName; } DescriptorSetDesc; typedef struct QueueSubmitDesc { uint32_t mCmdCount; Cmd** ppCmds; Fence* pSignalFence; uint32_t mWaitSemaphoreCount; Semaphore** ppWaitSemaphores; uint32_t mSignalSemaphoreCount; Semaphore** ppSignalSemaphores; bool mSubmitDone; } QueueSubmitDesc; typedef struct QueuePresentDesc { SwapChain* pSwapChain; uint32_t mWaitSemaphoreCount; Semaphore** ppWaitSemaphores; uint8_t mIndex; bool mSubmitDone; } QueuePresentDesc; #define API_INTERFACE // clang-format off // API functions // allocates memory and initializes the renderer -> returns pRenderer // API_INTERFACE void FORGE_CALLCONV initRenderer(const char* app_name, const RendererDesc* p_settings, Renderer** ppRenderer); API_INTERFACE void FORGE_CALLCONV removeRenderer(Renderer* pRenderer); API_INTERFACE void FORGE_CALLCONV addFence(Renderer* pRenderer, Fence** pp_fence); API_INTERFACE void FORGE_CALLCONV removeFence(Renderer* pRenderer, Fence* p_fence); API_INTERFACE void FORGE_CALLCONV addSemaphore(Renderer* pRenderer, Semaphore** pp_semaphore); API_INTERFACE void FORGE_CALLCONV removeSemaphore(Renderer* pRenderer, Semaphore* p_semaphore); API_INTERFACE void FORGE_CALLCONV addQueue(Renderer* pRenderer, QueueDesc* pQDesc, Queue** ppQueue); API_INTERFACE void FORGE_CALLCONV removeQueue(Renderer* pRenderer, Queue* pQueue); API_INTERFACE void FORGE_CALLCONV addSwapChain(Renderer* pRenderer, const SwapChainDesc* p_desc, SwapChain** pp_swap_chain); API_INTERFACE void FORGE_CALLCONV removeSwapChain(Renderer* pRenderer, SwapChain* p_swap_chain); // command pool functions API_INTERFACE void FORGE_CALLCONV addCmdPool(Renderer* pRenderer, const CmdPoolDesc* p_desc, CmdPool** pp_CmdPool); API_INTERFACE void FORGE_CALLCONV removeCmdPool(Renderer* pRenderer, CmdPool* p_CmdPool); API_INTERFACE void FORGE_CALLCONV addCmd(Renderer* pRenderer, const CmdDesc* p_desc, Cmd** pp_cmd); API_INTERFACE void FORGE_CALLCONV removeCmd(Renderer* p_CmdPool, Cmd* pCmd); API_INTERFACE void FORGE_CALLCONV addCmd_n(Renderer* pRenderer, const CmdDesc* p_desc, uint32_t cmd_count, Cmd*** ppp_cmd); API_INTERFACE void FORGE_CALLCONV removeCmd_n(Renderer* pRenderer, uint32_t cmd_count, Cmd** pp_cmd); // // All buffer, texture loading handled by resource system -> IResourceLoader.* // API_INTERFACE void FORGE_CALLCONV addRenderTarget(Renderer* pRenderer, const RenderTargetDesc* p_desc, RenderTarget** pp_render_target); API_INTERFACE void FORGE_CALLCONV removeRenderTarget(Renderer* pRenderer, RenderTarget* p_render_target); API_INTERFACE void FORGE_CALLCONV addSampler(Renderer* pRenderer, const SamplerDesc* pDesc, Sampler** pp_sampler); API_INTERFACE void FORGE_CALLCONV removeSampler(Renderer* pRenderer, Sampler* p_sampler); // shader functions #if defined(TARGET_IOS) API_INTERFACE void FORGE_CALLCONV addShader(Renderer* pRenderer, const ShaderDesc* p_desc, Shader** p_shader_program); #endif API_INTERFACE void FORGE_CALLCONV addShaderBinary(Renderer* pRenderer, const BinaryShaderDesc* p_desc, Shader** p_shader_program); API_INTERFACE void FORGE_CALLCONV removeShader(Renderer* pRenderer, Shader* p_shader_program); API_INTERFACE void FORGE_CALLCONV addRootSignature(Renderer* pRenderer, const RootSignatureDesc* pRootDesc, RootSignature** pp_root_signature); API_INTERFACE void FORGE_CALLCONV removeRootSignature(Renderer* pRenderer, RootSignature* pRootSignature); // pipeline functions API_INTERFACE void FORGE_CALLCONV addPipeline(Renderer* pRenderer, const PipelineDesc* p_pipeline_settings, Pipeline** pp_pipeline); API_INTERFACE void FORGE_CALLCONV removePipeline(Renderer* pRenderer, Pipeline* p_pipeline); // Descriptor Set functions API_INTERFACE void FORGE_CALLCONV addDescriptorSet(Renderer* pRenderer, const DescriptorSetDesc* pDesc, DescriptorSet** ppDescriptorSet); API_INTERFACE void FORGE_CALLCONV removeDescriptorSet(Renderer* pRenderer, DescriptorSet* pDescriptorSet); API_INTERFACE void FORGE_CALLCONV updateDescriptorSet(Renderer* pRenderer, uint32_t index, DescriptorSet* pDescriptorSet, uint32_t count, const DescriptorData* pParams); /// Pipeline State Functions API_INTERFACE void FORGE_CALLCONV addBlendState(Renderer* pRenderer, const BlendStateDesc* pDesc, BlendState** ppBlendState); API_INTERFACE void FORGE_CALLCONV removeBlendState(BlendState* pBlendState); API_INTERFACE void FORGE_CALLCONV addDepthState(Renderer* pRenderer, const DepthStateDesc* pDesc, DepthState** ppDepthState); API_INTERFACE void FORGE_CALLCONV removeDepthState(DepthState* pDepthState); API_INTERFACE void FORGE_CALLCONV addRasterizerState(Renderer* pRenderer, const RasterizerStateDesc* pDesc, RasterizerState** ppRasterizerState); API_INTERFACE void FORGE_CALLCONV removeRasterizerState(RasterizerState* pRasterizerState); // command buffer functions API_INTERFACE void FORGE_CALLCONV beginCmd(Cmd* p_cmd); API_INTERFACE void FORGE_CALLCONV endCmd(Cmd* p_cmd); API_INTERFACE void FORGE_CALLCONV cmdBindRenderTargets(Cmd* p_cmd, uint32_t render_target_count, RenderTarget** pp_render_targets, RenderTarget* p_depth_stencil, const LoadActionsDesc* loadActions, uint32_t* pColorArraySlices, uint32_t* pColorMipSlices, uint32_t depthArraySlice, uint32_t depthMipSlice); API_INTERFACE void FORGE_CALLCONV cmdSetViewport(Cmd* p_cmd, float x, float y, float width, float height, float min_depth, float max_depth); API_INTERFACE void FORGE_CALLCONV cmdSetScissor(Cmd* p_cmd, uint32_t x, uint32_t y, uint32_t width, uint32_t height); API_INTERFACE void FORGE_CALLCONV cmdBindPipeline(Cmd* p_cmd, Pipeline* p_pipeline); API_INTERFACE void FORGE_CALLCONV cmdBindDescriptorSet(Cmd* pCmd, uint32_t index, DescriptorSet* pDescriptorSet); API_INTERFACE void FORGE_CALLCONV cmdBindPushConstants(Cmd* pCmd, RootSignature* pRootSignature, const char* pName, const void* pConstants); API_INTERFACE void FORGE_CALLCONV cmdBindPushConstantsByIndex(Cmd* pCmd, RootSignature* pRootSignature, uint32_t paramIndex, const void* pConstants); API_INTERFACE void FORGE_CALLCONV cmdBindIndexBuffer(Cmd* p_cmd, Buffer* p_buffer, uint64_t offset); API_INTERFACE void FORGE_CALLCONV cmdBindVertexBuffer(Cmd* p_cmd, uint32_t buffer_count, Buffer** pp_buffers, uint64_t* pOffsets); API_INTERFACE void FORGE_CALLCONV cmdDraw(Cmd* p_cmd, uint32_t vertex_count, uint32_t first_vertex); API_INTERFACE void FORGE_CALLCONV cmdDrawInstanced(Cmd* pCmd, uint32_t vertexCount, uint32_t firstVertex, uint32_t instanceCount, uint32_t firstInstance); API_INTERFACE void FORGE_CALLCONV cmdDrawIndexed(Cmd* p_cmd, uint32_t index_count, uint32_t first_index, uint32_t first_vertex); API_INTERFACE void FORGE_CALLCONV cmdDrawIndexedInstanced(Cmd* pCmd, uint32_t indexCount, uint32_t firstIndex, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); API_INTERFACE void FORGE_CALLCONV cmdDispatch(Cmd* p_cmd, uint32_t group_count_x, uint32_t group_count_y, uint32_t group_count_z); // Transition Commands API_INTERFACE void FORGE_CALLCONV cmdResourceBarrier(Cmd* p_cmd, uint32_t buffer_barrier_count, BufferBarrier* p_buffer_barriers, uint32_t texture_barrier_count, TextureBarrier* p_texture_barriers, uint32_t rt_barrier_count, RenderTargetBarrier* p_rt_barriers); // Virtual Textures API_INTERFACE void FORGE_CALLCONV cmdUpdateVirtualTexture(Cmd* pCmd, Texture* pTexture); // // All buffer, texture update handled by resource system -> IResourceLoader.* // // queue/fence/swapchain functions API_INTERFACE void FORGE_CALLCONV acquireNextImage(Renderer* pRenderer, SwapChain* p_swap_chain, Semaphore* p_signal_semaphore, Fence* p_fence, uint32_t* p_image_index); API_INTERFACE void FORGE_CALLCONV queueSubmit(Queue* p_queue, const QueueSubmitDesc* p_desc); API_INTERFACE void FORGE_CALLCONV queuePresent(Queue* p_queue, const QueuePresentDesc* p_desc); API_INTERFACE void FORGE_CALLCONV waitQueueIdle(Queue* p_queue); API_INTERFACE void FORGE_CALLCONV getFenceStatus(Renderer* pRenderer, Fence* p_fence, FenceStatus* p_fence_status); API_INTERFACE void FORGE_CALLCONV waitForFences(Renderer* pRenderer, uint32_t fence_count, Fence** pp_fences); API_INTERFACE void FORGE_CALLCONV toggleVSync(Renderer* pRenderer, SwapChain** ppSwapchain); //Returns the recommended format for the swapchain. //If true is passed for the hintHDR parameter, it will return an HDR format IF the platform supports it //If false is passed or the platform does not support HDR a non HDR format is returned. API_INTERFACE TinyImageFormat FORGE_CALLCONV getRecommendedSwapchainFormat(bool hintHDR); //indirect Draw functions API_INTERFACE void FORGE_CALLCONV addIndirectCommandSignature(Renderer* pRenderer, const CommandSignatureDesc* p_desc, CommandSignature** ppCommandSignature); API_INTERFACE void FORGE_CALLCONV removeIndirectCommandSignature(Renderer* pRenderer, CommandSignature* pCommandSignature); API_INTERFACE void FORGE_CALLCONV cmdExecuteIndirect(Cmd* pCmd, CommandSignature* pCommandSignature, uint maxCommandCount, Buffer* pIndirectBuffer, uint64_t bufferOffset, Buffer* pCounterBuffer, uint64_t counterBufferOffset); /************************************************************************/ // GPU Query Interface /************************************************************************/ API_INTERFACE void FORGE_CALLCONV getTimestampFrequency(Queue* pQueue, double* pFrequency); API_INTERFACE void FORGE_CALLCONV addQueryPool(Renderer* pRenderer, const QueryPoolDesc* pDesc, QueryPool** ppQueryPool); API_INTERFACE void FORGE_CALLCONV removeQueryPool(Renderer* pRenderer, QueryPool* pQueryPool); API_INTERFACE void FORGE_CALLCONV cmdResetQueryPool(Cmd* pCmd, QueryPool* pQueryPool, uint32_t startQuery, uint32_t queryCount); API_INTERFACE void FORGE_CALLCONV cmdBeginQuery(Cmd* pCmd, QueryPool* pQueryPool, QueryDesc* pQuery); API_INTERFACE void FORGE_CALLCONV cmdEndQuery(Cmd* pCmd, QueryPool* pQueryPool, QueryDesc* pQuery); API_INTERFACE void FORGE_CALLCONV cmdResolveQuery(Cmd* pCmd, QueryPool* pQueryPool, Buffer* pReadbackBuffer, uint32_t startQuery, uint32_t queryCount); /************************************************************************/ // Stats Info Interface /************************************************************************/ API_INTERFACE void FORGE_CALLCONV calculateMemoryStats(Renderer* pRenderer, char** stats); API_INTERFACE void FORGE_CALLCONV freeMemoryStats(Renderer* pRenderer, char* stats); /************************************************************************/ // Debug Marker Interface /************************************************************************/ API_INTERFACE void FORGE_CALLCONV cmdBeginDebugMarker(Cmd* pCmd, float r, float g, float b, const char* pName); API_INTERFACE void FORGE_CALLCONV cmdEndDebugMarker(Cmd* pCmd); API_INTERFACE void FORGE_CALLCONV cmdAddDebugMarker(Cmd* pCmd, float r, float g, float b, const char* pName); /************************************************************************/ // Resource Debug Naming Interface /************************************************************************/ API_INTERFACE void FORGE_CALLCONV setBufferName(Renderer* pRenderer, Buffer* pBuffer, const char* pName); API_INTERFACE void FORGE_CALLCONV setTextureName(Renderer* pRenderer, Texture* pTexture, const char* pName); /************************************************************************/ /************************************************************************/ // clang-format on
Giperion/The-Forge
Common_3/Renderer/IResourceLoader.h
<reponame>Giperion/The-Forge /* * Copyright (c) 2018-2020 The Forge Interactive Inc. * * This file is part of The-Forge * (see https://github.com/ConfettiFX/The-Forge). * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ // *************************************************** // NOTE: // "IRenderer.h" MUST be included before this header! // *************************************************** #pragma once #include "../Renderer/IRenderer.h" #include "../OS/Core/Atomics.h" #include "../OS/Interfaces/IFileSystem.h" #include "../ThirdParty/OpenSource/tinyimageformat/tinyimageformat_base.h" #include "../Renderer/ResourceLoaderInternalTypes.h" typedef struct BufferUpdateDesc BufferUpdateDesc; typedef struct BufferLoadDesc BufferLoadDesc; typedef struct RenderMesh RenderMesh; // MARK: - Resource Loading typedef enum LoadPriority { // This load priority is only used for updates that // have their data stored in GPU memory (e.g. from an // updateResource call). LOAD_PRIORITY_UPDATE = 0, // LoadPriorities High, Normal, and Low are for loads // where the data is not already stored in GPU memory. LOAD_PRIORITY_HIGH, LOAD_PRIORITY_NORMAL, LOAD_PRIORITY_LOW, LOAD_PRIORITY_COUNT } LoadPriority; typedef struct BufferLoadDesc { Buffer** ppBuffer; const void* pData; BufferDesc mDesc; /// Force Reset buffer to NULL bool mForceReset; /// Whether to skip uploading any data to the buffer. /// Automatically set to true if using addResource (rather than begin/endAddResource) /// with pData = NULL and mForceReset = false bool mSkipUpload; BufferUpdateInternalData mInternalData; } BufferLoadDesc; typedef struct RawImageData { uint8_t* pRawData; TinyImageFormat mFormat; uint32_t mWidth, mHeight, mDepth, mArraySize, mMipLevels; bool mMipsAfterSlices; // The stride between subsequent rows. // If using a beginUpdateResource/endUpdateResource pair, // copies to pRawData should use this stride. // A stride of 0 means the data is tightly packed. uint32_t mRowStride; } RawImageData; typedef struct BinaryImageData { void* pBinaryData; size_t mSize; const char* pExtension; } BinaryImageData; typedef struct TextureLoadDesc { Texture** ppTexture; /// Load empty texture TextureDesc* pDesc; /// Load texture from disk const Path* pFilePath; uint32_t mNodeIndex; /// Load texture from raw data RawImageData* pRawImageData = NULL; /// Load texture from binary data (with header) BinaryImageData* pBinaryImageData = NULL; // Following is ignored if pDesc != NULL. pDesc->mFlags will be considered instead. TextureCreationFlags mCreationFlag; struct { MappedMemoryRange mMappedRange; } mInternalData; } TextureLoadDesc; typedef struct Geometry { struct Hair { uint32_t mVertexCountPerStrand; uint32_t mGuideCountPerStrand; }; struct ShadowData { void* pIndices; void* pAttributes[MAX_VERTEX_ATTRIBS]; }; /// Index buffer to bind when drawing this geometry Buffer* pIndexBuffer; /// The array of vertex buffers to bind when drawing this geometry Buffer* pVertexBuffers[MAX_VERTEX_BINDINGS]; /// The array of traditional draw arguments to draw each subset in this geometry IndirectDrawIndexArguments* pDrawArgs; /// Shadow copy of the geometry vertex and index data if requested through the load flags ShadowData* pShadow; /// The array of joint inverse bind-pose matrices ( object-space ) mat4* pInverseBindPoses; /// The array of data to remap skin batch local joint ids to global joint ids uint32_t* pJointRemaps; /// Hair data Hair mHair; /// Number of vertex buffers in this geometry uint32_t mVertexBufferCount : 8; /// Index type (32 or 16 bit) uint32_t mIndexType : 2; /// Number of joints in the skinned geometry uint32_t mJointCount : 16; /// Number of draw args in the geometry uint32_t mDrawArgCount; /// Number of indices in the geometry uint32_t mIndexCount; /// Number of vertices in the geometry uint32_t mVertexCount; uint32_t mPadA; uint32_t mPadB; } Geometry; static_assert(sizeof(Geometry) % 16 == 0, "GLTFContainer size must be a multiple of 16"); typedef enum GeometryLoadFlags { /// Keep shadow copy of indices and vertices for CPU GEOMETRY_LOAD_FLAG_SHADOWED = 0x1, /// Use structured buffers instead of raw buffers GEOMETRY_LOAD_FLAG_STRUCTURED_BUFFERS = 0x2, } GeometryLoadFlags; MAKE_ENUM_FLAG(uint32_t, GeometryLoadFlags) typedef struct GeometryLoadDesc { /// Output geometry Geometry** ppGeometry; /// Path to geometry container const Path* pFilePath; /// Loading flags GeometryLoadFlags mFlags; /// Linked gpu node uint32_t mNodeIndex; /// Specifies how to arrange the vertex data loaded from the file into GPU memory VertexLayout* pVertexLayout; } GeometryLoadDesc; typedef struct VirtualTexturePageInfo { uint pageAlive; uint TexID; uint mipLevel; uint padding1; } VirtualTexturePageInfo; typedef struct BufferUpdateDesc { Buffer* pBuffer; uint64_t mDstOffset; uint64_t mSize; /// To be filled by the caller /// Example: /// BufferUpdateDesc update = { pBuffer, bufferDstOffset }; /// beginUpdateResource(&update); /// ParticleVertex* vertices = (ParticleVertex*)update.pMappedData; /// for (uint32_t i = 0; i < particleCount; ++i) /// vertices[i] = { rand() }; /// endUpdateResource(&update, &token); void* pMappedData; // Internal BufferUpdateInternalData mInternalData; } BufferUpdateDesc; typedef struct TextureUpdateDesc { Texture* pTexture; RawImageData* pRawImageData = NULL; void* pMappedData; struct { MappedMemoryRange mMappedRange; } mInternalData; } TextureUpdateDesc; typedef struct ShaderStageLoadDesc { const char* pFileName; ShaderMacro* pMacros; uint32_t mMacroCount; ResourceDirectory mRoot; const char* pEntryPointName; } ShaderStageLoadDesc; typedef struct ShaderLoadDesc { ShaderStageLoadDesc mStages[SHADER_STAGE_COUNT]; ShaderTarget mTarget; } ShaderLoadDesc; typedef struct SyncToken { uint64_t mWaitIndex[LOAD_PRIORITY_COUNT]; } SyncToken; typedef struct ResourceLoaderDesc { uint64_t mBufferSize; uint32_t mBufferCount; } ResourceLoaderDesc; extern ResourceLoaderDesc gDefaultResourceLoaderDesc; // MARK: - Resource Loader Functions void initResourceLoaderInterface(Renderer* pRenderer, ResourceLoaderDesc* pDesc = nullptr); void exitResourceLoaderInterface(Renderer* pRenderer); // MARK: addResource and updateResource /// Adding and updating resources can be done using a addResource or /// beginUpdateResource/endUpdateResource pair. /// if addResource(BufferLoadDesc) is called with a data size larger than the ResourceLoader's staging buffer, the ResourceLoader /// will perform multiple copies/flushes rather than failing the copy. /// If token is NULL, the resource will be available when allResourceLoadsCompleted() returns true. /// If token is non NULL, the resource will be available after isTokenCompleted(token) returns true. void addResource(BufferLoadDesc* pBufferDesc, SyncToken* token, LoadPriority priority); void addResource(TextureLoadDesc* pTextureDesc, SyncToken* token, LoadPriority priority); void addResource(GeometryLoadDesc* pGeomDesc, SyncToken* token, LoadPriority priority); void beginUpdateResource(BufferUpdateDesc* pBufferDesc); void beginUpdateResource(TextureUpdateDesc* pTextureDesc); void endUpdateResource(BufferUpdateDesc* pBuffer, SyncToken* token); void endUpdateResource(TextureUpdateDesc* pTexture, SyncToken* token); // MARK: removeResource void removeResource(Buffer* pBuffer); void removeResource(Texture* pTexture); void removeResource(Geometry* pGeom); // MARK: Waiting for Loads /// Returns whether all submitted resource loads and updates have been completed. bool allResourceLoadsCompleted(); /// Blocks the calling thread until allResourceLoadsCompleted() returns true. /// Note that if more resource loads or updates are submitted from a different thread while /// while the calling thread is blocked, those loads or updates are not guaranteed to have /// completed when this function returns. void waitForAllResourceLoads(); /// A SyncToken is an array of monotonically increasing integers. /// getLastTokenCompleted() returns the last value for which /// isTokenCompleted(token) is guaranteed to return true. SyncToken getLastTokenCompleted(); bool isTokenCompleted(const SyncToken* token); void waitForToken(const SyncToken* token); /// Either loads the cached shader bytecode or compiles the shader to create new bytecode depending on whether source is newer than binary void addShader(Renderer* pRenderer, const ShaderLoadDesc* pDesc, Shader** ppShader);
Kiandr/dataStructure
interviews/LifeLabs/IsPowerOfTow.c
<filename>interviews/LifeLabs/IsPowerOfTow.c<gh_stars>0 #include<stdio.h> #include<stdbool.h> /* Function to check if x is power of 2*/ bool isPowerOfTwo(int n) { if (n == 0) return 0; while (n != 1) { if (n%2 != 0) return 0; n = n/2; } return 1; } /*Driver program to test above function*/ int main() { isPowerOfTwo(31)? printf("Yes\n"): printf("No\n"); isPowerOfTwo(64)? printf("Yes\n"): printf("No\n"); return 0; }
Kiandr/dataStructure
crackingCodingInterview/C++/Tree/LowestCommonAncestor/interation.c
Node *lca(Node *root, int a,int b) { if (root == NULL) return NULL; Node *prtR = root; while (prtR!=NULL) { if (prtR->data >a && prtR->data>b && prtR->left !=NULL) prtR = prtR->left; else if (prtR->data <a && prtR->data<b && prtR->right !=NULL) prtR = prtR->right; else return prtR; } return root; }
Kiandr/dataStructure
crackingCodingInterview/C++/Mathematics/Fibonacci/Recursive.c
<gh_stars>0 #include<stdio.h> #include <stdbool.h> int Fib(int val){ // if (val==0) return 0; // if (val==1) return 1; if (val<=1) return val; return Fib(val-1)+Fib(val-2); } int main(){ int FibArray[20]= {0,1,1,2,3,5,8,13,21,34,55,89,144,233,377,610,987,1597,2584,4181,6765}; for (int i=0;i<20;i++){ printf("Fibonacci [%d] is valid: [%d], Resulat Is %d and Expected resulat was %d\n",i, Fib(i)==FibArray[i] ? 1:0,Fib(i),FibArray[i]); } return 0; }
Kiandr/dataStructure
hackerRank/30DaysOfCodeChallenges/Day6LetsReview/main.c
#include "stdio.h" #include "string.h" void foo(char *head, int len) { // debugging // printf("[%d\n",&, len); char *prtH = head; int lenEv =0; int lenOd =0; { while (prtH<=(head+len) && *prtH != '\n') { if (*prtH%2 == 0) lenEv++; else lenOd++; prtH++; } // printf("sizeof odd = %d\n", lenOd); // printf("sizeof even = %d\n", lenEv); char arrayEv[lenEv]; char arrayOd[lenOd]; char *prtArrayEv = arrayEv; char *prtAarrayOd = arrayOd; prtH = head; while (prtH<=(head+len) && *prtH != '\n') { if (*prtH%2 == 0 &&*prtH!=0){ // printf("Even letter is %d -> %c\n",*prtH, *prtH); *prtArrayEv++ = *prtH++; } else { // printf("Odd letter is %d -> %c\n",*prtH, *prtH); *prtAarrayOd++ = *prtH++; } } // printf("START\n"); for (int i=0;i<lenEv;i++) printf("%c",arrayEv[i]); printf(" "); for (int i=0;i<lenOd;i++) printf("%c",arrayOd[i]); printf("\n"); } } int main() { int loop=0; char array[100]; scanf("%d",&loop); for (int i=0;i<loop;i++){ scanf("%s", array); int len = strlen (array); char newArray [len]; strcpy (newArray, array); // printf("[%lu->%s]\n", strlen(newArray), newArray); foo(newArray, sizeof(newArray)/sizeof(char)); } return 0; }
Kiandr/dataStructure
hackerRank/30DaysOfCodeChallenges/Round/main.c
<gh_stars>0 #include <stdio.h> #include <string.h> #include <math.h> #include <stdlib.h> void calculateTotal (float *head, int len){ float total = 0; total = head[0] +(head[0]*head[1]/100)+(head[0]*head[2]/100); int totalRounded = round(total); printf ("The total meal cost is %d dollars.",totalRounded); } int main() { /* Enter your code here. Read input from STDIN. Print output to STDOUT */ int x = 3; float array [3]; float len = sizeof(array)/sizeof(float); for (int i =0;i<len;i++) { scanf("%f", &array[i]); x--; } calculateTotal (array, len); return 0; } // 10.25 // 17 // 5 // expected 13
Kiandr/dataStructure
crackingCodingInterview/C++/Tree/LowestCommonAncestor/recursive.c
Node *lca(Node *root, int a,int b) { // Write your code here. Node *prt = root; //if (root == NULL) return root; if ( prt->data>a && prt->data > b && prt->left != NULL) prt = lca(prt->left,a,b); if ( prt->data<a && prt->data < b && prt->right != NULL) prt =lca(prt->right,a,b); return prt; }
Kiandr/dataStructure
hackerRank/30DaysOfCodeChallenges/Day8DictionariesandMaps/main.c
#include "stdio.h" #include "string.h" #include "stdlib.h" #include <stdio.h> #include <string.h> typedef struct node { char *key; int data[8]; }node; void addNewNode(char* inputString){ char *prtH = inputString; char *key; int val [8]; int i=0; printf("%s--\n",(prtH)); while (*prtH !=' '&& *prtH !='\n'){ printf("%c",*prtH++); i++; } key = (char*)malloc(i); prtH = inputString; char *prtVal = (inputString+i+1); char *pp = prtVal; while (*prtH !=' '&& *prtH !='\n'){ printf("%c",*prtH); *key++= *prtH++; } i=0; printf("\n"); int k=0; while (*prtH >'0'&& *prtH <'0' &&*prtH !='\n'){ printf("--%c---\n",*prtH); *prtVal = *prtH++; printf("--%c---\n",*prtVal++); } for (int k=0;k<8;k++) { printf("%c",*pp+k); } printf("\n END"); } int main() { /* Enter your code here. Read input from STDIN. Print output to STDOUT */ /* int loop = 0; scanf("%d",&loop); char *placeHolder[loop]; for (int i=0;i<loop;i++){ placeHolder[i] = (char*)malloc(100); scanf("%s",placeHolder[i]); } for (int i=0;i<loop;i++){ printf("here is i %d -> %s\n",i,placeHolder[i]); } */ char test [] = "TEST 12345678"; addNewNode(test); return 0; }
Kiandr/dataStructure
crackingCodingInterview/C++/Mathematics/Fibonacci/DynamicProgramming .c
#include <stdio.h> int Fib(int val){ // Adding space for 0 int Fib[val+1]; int i=0; // Dynamic Programming setting 0 and 1 case manually. Fib[0] = 0; Fib[1] = 1; for ( i=2;i<=val;i++) Fib[i]= Fib[i-1]+Fib[i-2]; return Fib[val]; } int main(){ int FibArray[20]= {0,1,1,2,3,5,8,13,21,34,55,89,144,233,377,610,987,1597,2584,4181,6765}; for (int i=0;i<20;i++){ printf("Fibonacci [%d] is valid: [%d], Resulat Is %d and Expected resulat was %d\n",i, Fib(i)==FibArray[i] ? 1:0,Fib(i),FibArray[i]); } return 0; }
Kiandr/dataStructure
hackerRank/30DaysOfCodeChallenges/Factorial/main.c
<filename>hackerRank/30DaysOfCodeChallenges/Factorial/main.c #include "stdio.h" unsigned int factorial(long int n){ if (n >= 1) return n* factorial(n-1); else return 1; } int factorialFor(int n){ unsigned long long factorial = 1; for(int i=1; i<=n; ++i) { factorial *= i; // factorial = factorial*i; } return factorial; } int main(){ int n ; scanf("%d",&n); //printf("%d",*n); for ( int i=0;i<n;i++){ printf("factoria of %d is %u\n",i,factorial (i)); printf("factoria of %d is %u\n",i,factorialFor (i)); } return 0; }
Kiandr/dataStructure
hackerRank/30DaysOfCodeChallenges/Day9Recursion/main.c
#include <math.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> #include <limits.h> #include <stdbool.h> int Factorial(int n) { // Complete this function // base class if (n<=1) return 1; else { // recusrive method return n*Factorial(n-1); } } int main() { int n; scanf("%i", &n); int result; if (n>1 && n<13){ result = Factorial(n); printf("%d\n", result); } return 0; }
Kiandr/dataStructure
crackingCodingInterview/C++/Mathematics/IsPowerOfTwo/main.c
#include<stdio.h> int IsPowerOfTwo(int val){ if (val ==0) return 0; while (val!=1){ if (val%2!=0) return 0; val=val/2; } return 1; } int main (){ printf("[%d]\n",IsPowerOfTwo(8)); printf("[%d]\n",IsPowerOfTwo(0)); printf("[%d]\n",IsPowerOfTwo(7)); return 0; }
Kiandr/dataStructure
hackerRank/30DaysOfCodeChallenges/Day1DataTypes/main.c
#include <stdio.h> #include <string.h> #include <math.h> #include <stdlib.h> int main() { int i = 4; double d = 4.0; char s[] = "HackerRank"; // Declare second integer, double, and String variables. int ii =0; float f =0; char ss[50]; // Read and save an integer, double, and String to your variables. scanf("%d\n",&ii); scanf("%f\n",&f); // scanf("%s\n",ss); fgets(ss, 50, stdin); // Print the sum of both integer variables on a new line. printf("%d\n",i+ii); // Print the sum of the double variables on a new line. printf("%.1f\n",f+d); // Concatenate and print the String variables on a new line printf("%s%s\n",s,ss); // The 's' variable above should be printed first. return 0; }
Kiandr/dataStructure
crackingCodingInterview/C++/Mathematics/ThePowerSum/main.c
<filename>crackingCodingInterview/C++/Mathematics/ThePowerSum/main.c #include<stdio.h> /*https://www.geeksforgeeks.org/write-a-c-program-to-calculate-powxn/*/ /* Function to calculate x raised to the power y */ int power(int x, unsigned int y) { if (y == 0) return 1; else if (y%2 == 0) return power(x, y/2)*power(x, y/2); else return x*power(x, y/2)*power(x, y/2); } /* Program to test function power */ int main() { int x = 2; unsigned int y = 3; printf("%d", power(x, y)); return 0; }
Kiandr/dataStructure
hackerRank/30DaysOfCodeChallenges/OddNumber/main.c
#include <math.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> #include <limits.h> #include <stdbool.h> void isOdd (int n ){ if (n%2 == 0) printf("Not Weird\n"); else printf("Weird\n"); } int main(){ // int N; // scanf("%d",&N); for (int i=0;i<50;i++){ printf("i=%d",i); isOdd(i); } return 0; }
Kiandr/dataStructure
crackingCodingInterview/C++/Mathematics/IsPrimeNumber/main.c
<gh_stars>0 // A school method based C++ program to // check if a number is prime #include <stdio.h> using namespace std; // function check whether a number // is prime or not bool isPrime(int n) { // Corner case if (n <= 1) return false; // Check from 2 to n-1 for (int i = 2; i < n; i++) if (n % i == 0) return false; return true; } // Driver Program int main() { isPrime(11) ? cout << " true\n" : cout << " false\n"; return 0; }
Kiandr/dataStructure
crackingCodingInterview/C++/Mathematics/Factorial/FactorialIterative.c
<reponame>Kiandr/dataStructure<filename>crackingCodingInterview/C++/Mathematics/Factorial/FactorialIterative.c #include <stdio.h> int Factorial( int val ){ if (val == 0) return 1; int temp =1; for (int i=2;i<=val;i++) temp=temp*i; return temp; } int main () { int FactorialArray[10] = { 1,2,6,24,120,720,5040,40320,362880,3628800 }; for (int i = 0; i < 10; i++) { printf ("Factorial [%d] is valid: [%d], Resulat Is %d and Expected resulat was %d\n", i, Factorial (i) == FactorialArray[i] ? 1 : 0, Factorial (i), FactorialArray[i]); } return 0; }
Kiandr/dataStructure
hackerRank/30DaysOfCodeChallenges/Day20Sorting/main.c
<reponame>Kiandr/dataStructure #include <math.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> #include <limits.h> #include <stdbool.h> int main() { int n; scanf("%d", &n); int *a = malloc(sizeof(int) * n); int *prtS = a; for(int i = 0; i < n; i++){ scanf("%d",prtS+i); } int swap = 0; int *prtH = a; for (int i=0; i<n;i++){ for (int j=0; j<n-i-1;j++){ if (prtH[j]>prtH[j+1]){ swap++; int temp = prtH[j]; prtH[j]= prtH[j+1]; prtH[j+1] = temp; } } } /* int *prtP = a; printf("===START=\n"); for(int i = 0; i <= n+5; i++) printf("a[%d]=%d\n",i,prtP[i]); printf("\n===END=\n"); */ printf("Array is sorted in %d swaps.\n",swap); printf("First Element: %d \n",*a); printf("Last Element: %d \n",a[n-1]); return 0; }
y1024/SensorsAnalyticsSDK
SensorsAnalyticsSDK/SensorsAnalyticsSDK/UIWindow+SASnapshotImage/UIWindow+SASnapshotImage.h
<filename>SensorsAnalyticsSDK/SensorsAnalyticsSDK/UIWindow+SASnapshotImage/UIWindow+SASnapshotImage.h<gh_stars>1-10 // // UIWindow+SnapshotImage.h // SensorsAnalyticsSDK // // Created by 杜晓星 on 2017/11/29. // Copyright © 2017年 SensorsData. All rights reserved. // #import <UIKit/UIKit.h> @interface UIWindow (SASnapshotImage) - (nullable UIImage *)snapshotImage; @end
aloknnikhil/folly
folly/experimental/QuotientMultiSet.h
<reponame>aloknnikhil/folly<gh_stars>1-10 /* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <deque> #include <utility> #include <folly/Portability.h> #include <folly/Range.h> #include <folly/experimental/Instructions.h> #include <folly/io/IOBuf.h> #include <folly/io/IOBufQueue.h> // A 128-bit integer type is needed for fast division. #define FOLLY_QUOTIENT_MULTI_SET_SUPPORTED FOLLY_HAVE_INT128_T #if FOLLY_QUOTIENT_MULTI_SET_SUPPORTED namespace folly { namespace qms_detail { using UInt64InverseType = __uint128_t; } // namespace qms_detail /** * A space-efficient static data structure to store a non-decreasing sequence of * b-bit integers. If the integers are uniformly distributed lookup is O(1)-time * and performs a single random memory lookup with high probability. * * Space for n keys is bounded by (5 + b - log(n / loadFactor)) / loadFactor * bits per element, which makes it particularly efficient for very dense * sets. Note that 1 bit is taken up by the user-provided block payloads, and 1 * depends on how close the table size is to a power of 2. Experimentally, * performance is good up to load factor 95%. * * Lookup returns a range of positions in the table. The intended use case is to * store hashes, as the first layer of a multi-layer hash table. If b is sized * to floor(log(n)) + k, the probability of a false positive (a non-empty range * is returned for a non-existent key) is approximately 2^-k, which makes it * competitive with a Bloom filter for low FP probabilities, with the additional * benefit that it also returns a range of positions to restrict the search in * subsequent layers. * * The data structure is inspired by the Rank-Select Quotient Filter * introduced in * * <NAME>, <NAME>, <NAME> and <NAME>, * A General-Purpose Counting Filter: Making Every Bit Count, SIGMOD, 2017 * * Besides being static, QuotientMultiSet differs from the data structure from * the paper in the following ways: * * - The table size can be arbitrary, rather than just powers-of-2. This can * waste up to a bit for each residual, but it prevents 2x overhead when the * desired table size is slightly larger than a power of 2. * * - Within each block all the holes are moved at the end. This enables * efficient iteration, and makes the returned positions a contiguous range * for each block, which allows to use them to index into a secondary data * structure. An arbitrary 64-bit payload can be attached to each block; for * example, this can be used to store the number of elements up to that block, * so that positions can be translated to the element rank. Alternatively, the * payload can be used to address blocks in the secondary data structure. * * - Correctness does not depend on the keys being uniformly distributed. * However, performance does, as for arbitrary keys the worst-case lookup time * can be linear. * * Implemented by Matt Ma based on a prototype by <NAME> and * <NAME>. * * Data layout: * ------------------------------------------------------------------------ * | Block | Block | Block | Block | ... | Block | * ------------------------------------------------------------------------ * / | * ------------------------------------------------------------------------ * | Payload | Occupieds | Offset | Runends | Remainders * 64 | * ------------------------------------------------------------------------ * * Each block contains 64 slots. Keys mapping to the same slot are stored * contiguously in a run. The occupieds and runends bitvectors are the * concatenation of the corresponding words in each block. * * - Occupieds bit indicates whether there is a key mapping to this quotient. * * - Offset stores the position of the runend of the first run in this block. * * - Runends bit indicates whether the slot is the end of some run. 1s in * occupieds and runends bits are in 1-1 correspondence: the i-th 1 in the * runends vector marks the run end of the i-th 1 in the occupieds. */ template <class Instructions = compression::instructions::Default> class QuotientMultiSet final { public: explicit QuotientMultiSet(StringPiece data); // Each block contains 64 elements. static constexpr size_t kBlockSize = 64; // Position range of given key. End is not included. Range can be empty if the // key is not found, in which case the values of begin and end are // unspecified. struct SlotRange { size_t begin = 0; size_t end = 0; explicit operator bool() const { DCHECK_LE(begin, end); return begin < end; } }; class Iterator; // Get the position range for the given key. SlotRange equalRange(uint64_t key) const; // Get payload of given block. uint64_t getBlockPayload(uint64_t blockIndex) const; friend class QuotientMultiSetBuilder; private: // Metadata to describe a quotient table. struct Metadata; // Block contains payload, occupieds, runends, offsets and 64 remainders. struct Block; using BlockPtr = std::unique_ptr<Block, decltype(free)*>; const Block* getBlock(size_t blockIndex) const { return Block::get(data_ + blockIndex * blockSize_); } FOLLY_ALWAYS_INLINE std::pair<uint64_t, const Block*> findRunend( uint64_t occupiedRank, uint64_t startPos) const; const Metadata* metadata_; const char* data_; // Total number of blocks. size_t numBlocks_; size_t numSlots_; // Number of bytes per block. size_t blockSize_; // Divisor for mapping from keys to slots. uint64_t divisor_; // fraction_ = 1 / divisor_. qms_detail::UInt64InverseType fraction_; // Number of key bits. size_t keyBits_; uint64_t maxKey_; // Number of remainder bits. size_t remainderBits_; uint64_t remainderMask_; }; template <class Instructions> class QuotientMultiSet<Instructions>::Iterator { public: explicit Iterator(QuotientMultiSet<Instructions>* qms); // Advance to the next key. bool next(); // Skip forward to the first key >= the given key. bool skipTo(uint64_t key); bool done() const { return pos_ == qms_->numSlots_; } // Return current key. uint64_t key() const { return key_; } // Return current position in quotient multiset. size_t pos() const { return pos_; } private: // Position the iterator at the end and return false. // Shortcut for use when implementing doNext, etc: return setEnd(); bool setEnd() { pos_ = qms_->numSlots_; return false; } // Move to next occupied. bool nextOccupied(); QuotientMultiSet<Instructions>* qms_; uint64_t key_; // State members for the quotient occupied position. // Block index of key_'s occupied slot. size_t occBlockIndex_; // Block offset of key_'s occupied slot. uint64_t occOffsetInBlock_; // Occupied words of the occupiedBlock_ after quotientBlockOffset_. uint64_t occWord_; // Block of the current occupied slot. const Block* occBlock_; // State member for the actual key position. // Position of the current key_. size_t pos_; }; /* * Class to build a QuotientMultiSet. * * The builder requires inserting elements in non-decreasing order. * Example usage: * QuotientMultiSetBuilder builder(...); * while (...) { * if (builder.insert(key)) { * builder.setBlockPayload(payload); * } * if (builder.numReadyBlocks() > N) { * buff = builder.flush(); * write(buff); * } * } * buff = builder.close(); * write(buff) */ class QuotientMultiSetBuilder final { public: QuotientMultiSetBuilder( size_t keyBits, size_t expectedElements, double loadFactor = kDefaultMaxLoadFactor); ~QuotientMultiSetBuilder(); using Metadata = QuotientMultiSet<>::Metadata; using Block = QuotientMultiSet<>::Block; // Keeps load factor <= 0.95. constexpr static double kDefaultMaxLoadFactor = 0.95; constexpr static size_t kBlockSize = QuotientMultiSet<>::kBlockSize; // Returns whether the key's slot is in a newly created block. // Only allows insert keys in nondecreasing order. bool insert(uint64_t key); // Set payload of the latest created block. // Can only be called immediately after an add() that returns true. void setBlockPayload(uint64_t payload); // Return all ready blocks till now. The ownership of these blocks will be // transferred to the caller. void flush(IOBufQueue& buff); // Return all remaining blocks since last flush call and the final quotient // table metadata. The ownership of these blocks will be transferred to the // caller. void close(folly::IOBufQueue& buff); size_t numReadyBlocks() { return readyBlocks_; } private: using BlockPtr = QuotientMultiSet<>::BlockPtr; struct BlockWithState { BlockWithState(BlockPtr ptr, size_t idx) : block(std::move(ptr)), index(idx), ready(false) {} BlockPtr block; size_t index; bool ready; }; // Allocate space for blocks until limitIndex (included). bool maybeAllocateBlocks(size_t limitIndex); // Close the previous run. void closePreviousRun(); // Move ready blocks to given IOBufQueue. void moveReadyBlocks(IOBufQueue& buff); // Get block for given block index. BlockWithState& getBlock(uint64_t blockIndex) { CHECK_GE(blockIndex, blocks_.front().index); return blocks_[blockIndex - blocks_.front().index]; } // Number of key bits. const size_t keyBits_; const uint64_t maxKey_; // Total number of blocks. size_t numBlocks_ = 0; // Number of bytes per block. size_t blockSize_ = 0; // Divisor for mapping from keys to slots. uint64_t divisor_; // fraction_ = 1 / divisor_. qms_detail::UInt64InverseType fraction_; // Number of remainder bits. uint64_t remainderBits_; size_t numKeys_ = 0; size_t numRuns_ = 0; uint64_t prevKey_ = 0; // Next slot to be used. size_t nextSlot_ = 0; // The actual start of previous run. size_t prevRunStart_ = 0; // The quotient of previous run. size_t prevOccupiedQuotient_ = 0; // Number of ready blocks in deque. size_t readyBlocks_ = 0; // Contains blocks since last flush call. std::deque<BlockWithState> blocks_; IOBufQueue buff_; }; } // namespace folly #include <folly/experimental/QuotientMultiSet-inl.h> #endif // FOLLY_QUOTIENT_MULTI_SET_SUPPORTED
aloknnikhil/folly
folly/python/iobuf.h
<gh_stars>1-10 /* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <Python.h> #include <folly/Executor.h> #include <folly/Function.h> #include <folly/ScopeGuard.h> #include <folly/io/IOBuf.h> #if PY_VERSION_HEX < 0x03040000 #define PyGILState_Check() (true) #endif namespace thrift { namespace py3 { struct PyBufferData { folly::Executor* executor; PyObject* py_object; }; std::unique_ptr<folly::IOBuf> iobuf_from_python( folly::Executor* executor, PyObject* py_object, void* buf, uint64_t length) { Py_INCREF(py_object); auto* userData = new PyBufferData(); userData->executor = executor; userData->py_object = py_object; return folly::IOBuf::takeOwnership( buf, length, [](void* buf, void* userData) { auto* py_data = (PyBufferData*)userData; auto* py_object = py_data->py_object; if (PyGILState_Check()) { Py_DECREF(py_object); } else if (py_data->executor) { py_data->executor->add( [py_object]() mutable { Py_DECREF(py_object); }); } else { /* This is the last ditch effort. We don't have the GIL and we have no asyncio executor. In this case we will attempt to use the pendingCall interface to cpython. This is likely to fail under heavy load due to lock contention. */ int ret = Py_AddPendingCall( [](void* userData) { Py_DECREF((PyObject*)userData); return 0; }, (void*)py_object); if (ret != 0) { LOG(ERROR) << "an IOBuf was created from a non-asyncio thread, and all attempts " << "to free the underlying buffer has failed, memory has leaked!"; } else { LOG(WARNING) << "an IOBuf was created from a non-asyncio thread, and we successful " << "handled cleanup but this is not a reliable interface, it will fail " << "under heavy load, do not create IOBufs from non-asyncio threads. "; } } delete py_data; }, userData); } bool check_iobuf_equal(const folly::IOBuf* a, const folly::IOBuf* b) { return folly::IOBufEqualTo{}(a, b); } bool check_iobuf_less(const folly::IOBuf* a, const folly::IOBuf* b) { return folly::IOBufLess{}(a, b); } } // namespace py3 } // namespace thrift
aloknnikhil/folly
folly/experimental/observer/Observer-inl.h
<filename>folly/experimental/observer/Observer-inl.h /* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <folly/experimental/observer/detail/ObserverManager.h> namespace folly { namespace observer { template <typename T> Snapshot<T> Observer<T>::getSnapshot() const { auto data = core_->getData(); return Snapshot<T>( *core_, std::static_pointer_cast<const T>(std::move(data.data)), data.version); } template <typename T> Observer<T>::Observer(observer_detail::Core::Ptr core) : core_(std::move(core)) {} template <typename F> Observer<observer_detail::ResultOfUnwrapSharedPtr<F>> makeObserver( F&& creator) { auto core = observer_detail::Core::create( [creator = std::forward<F>(creator)]() mutable { return std::static_pointer_cast<const void>(creator()); }); observer_detail::ObserverManager::initCore(core); return Observer<observer_detail::ResultOfUnwrapSharedPtr<F>>(core); } template <typename F> Observer<observer_detail::ResultOf<F>> makeObserver(F&& creator) { return makeObserver([creator = std::forward<F>(creator)]() mutable { return std::make_shared<observer_detail::ResultOf<F>>(creator()); }); } template <typename T> TLObserver<T>::TLObserver(Observer<T> observer) : observer_(observer), snapshot_([&] { return new Snapshot<T>(observer_.getSnapshot()); }) {} template <typename T> TLObserver<T>::TLObserver(const TLObserver<T>& other) : TLObserver(other.observer_) {} template <typename T> const Snapshot<T>& TLObserver<T>::getSnapshotRef() const { auto& snapshot = *snapshot_; if (observer_.needRefresh(snapshot) || observer_detail::ObserverManager::inManagerThread()) { snapshot = observer_.getSnapshot(); } return snapshot; } struct CallbackHandle::Context { Optional<Observer<folly::Unit>> observer; Synchronized<bool> canceled{false}; }; inline CallbackHandle::CallbackHandle() {} template <typename T> CallbackHandle::CallbackHandle( Observer<T> observer, folly::Function<void(Snapshot<T>)> callback) { context_ = std::make_shared<Context>(); context_->observer = makeObserver([observer = std::move(observer), callback = std::move(callback), context = context_]() mutable { auto rCanceled = context->canceled.rlock(); if (*rCanceled) { return folly::unit; } callback(*observer); return folly::unit; }); } inline CallbackHandle::~CallbackHandle() { cancel(); } inline void CallbackHandle::cancel() { if (!context_) { return; } context_->observer.reset(); context_->canceled = true; context_.reset(); } template <typename T> CallbackHandle Observer<T>::addCallback( folly::Function<void(Snapshot<T>)> callback) const { return CallbackHandle(*this, std::move(callback)); } } // namespace observer } // namespace folly
aloknnikhil/folly
folly/container/detail/F14Policy.h
<filename>folly/container/detail/F14Policy.h /* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <memory> #include <type_traits> #include <utility> #include <folly/Memory.h> #include <folly/Portability.h> #include <folly/Traits.h> #include <folly/Unit.h> #include <folly/container/HeterogeneousAccess.h> #include <folly/container/detail/F14Table.h> #include <folly/hash/Hash.h> #include <folly/lang/Align.h> #include <folly/lang/SafeAssert.h> #include <folly/memory/Malloc.h> #if FOLLY_F14_VECTOR_INTRINSICS_AVAILABLE namespace folly { namespace f14 { namespace detail { template <typename Ptr> using NonConstPtr = typename std::pointer_traits<Ptr>::template rebind< std::remove_const_t<typename std::pointer_traits<Ptr>::element_type>>; template <typename KeyType, typename MappedType> using MapValueType = std::pair<KeyType const, MappedType>; template <typename KeyType, typename MappedTypeOrVoid> using SetOrMapValueType = std::conditional_t< std::is_same<MappedTypeOrVoid, void>::value, KeyType, MapValueType<KeyType, MappedTypeOrVoid>>; // Used to enable EBO for Hasher, KeyEqual, and Alloc. std::tuple of // all empty objects is empty in libstdc++ but not libc++. template < char Tag, typename T, bool Inherit = std::is_empty<T>::value && !std::is_final<T>::value> struct ObjectHolder { T value_; template <typename... Args> ObjectHolder(Args&&... args) : value_{std::forward<Args>(args)...} {} T& operator*() { return value_; } T const& operator*() const { return value_; } }; template <char Tag, typename T> struct ObjectHolder<Tag, T, true> : private T { template <typename... Args> ObjectHolder(Args&&... args) : T{std::forward<Args>(args)...} {} T& operator*() { return *this; } T const& operator*() const { return *this; } }; // Policy provides the functionality of hasher, key_equal, and // allocator_type. In addition, it can add indirection to the values // contained in the base table by defining a non-trivial value() method. // // To facilitate stateful implementations it is guaranteed that there // will be a 1:1 relationship between BaseTable and Policy instance: // policies will only be copied when their owning table is copied, and // they will only be moved when their owning table is moved. // // Key equality will have the user-supplied search key as its first // argument and the table contents as its second. Heterogeneous lookup // should be handled on the first argument. // // Item is the data stored inline in the hash table's chunks. The policy // controls how this is mapped to the corresponding Value. // // The policies defined in this file work for either set or map types. // Most of the functionality is identical. A few methods detect the // collection type by checking to see if MappedType is void, and then use // SFINAE to select the appropriate implementation. template < typename KeyType, typename MappedTypeOrVoid, typename HasherOrVoid, typename KeyEqualOrVoid, typename AllocOrVoid, typename ItemType> struct BasePolicy : private ObjectHolder< 'H', Defaulted<HasherOrVoid, DefaultHasher<KeyType>>>, private ObjectHolder< 'E', Defaulted<KeyEqualOrVoid, DefaultKeyEqual<KeyType>>>, private ObjectHolder< 'A', Defaulted< AllocOrVoid, DefaultAlloc<SetOrMapValueType<KeyType, MappedTypeOrVoid>>>> { //////// user-supplied types using Key = KeyType; using Mapped = MappedTypeOrVoid; using Value = SetOrMapValueType<Key, Mapped>; using Item = ItemType; using Hasher = Defaulted<HasherOrVoid, DefaultHasher<Key>>; using KeyEqual = Defaulted<KeyEqualOrVoid, DefaultKeyEqual<Key>>; using Alloc = Defaulted<AllocOrVoid, DefaultAlloc<Value>>; using AllocTraits = std::allocator_traits<Alloc>; using ByteAlloc = typename AllocTraits::template rebind_alloc<uint8_t>; using ByteAllocTraits = typename std::allocator_traits<ByteAlloc>; using BytePtr = typename ByteAllocTraits::pointer; //////// info about user-supplied types static_assert( std::is_same<typename AllocTraits::value_type, Value>::value, "wrong allocator value_type"); private: using HasherHolder = ObjectHolder<'H', Hasher>; using KeyEqualHolder = ObjectHolder<'E', KeyEqual>; using AllocHolder = ObjectHolder<'A', Alloc>; // emulate c++17's std::allocator_traits<A>::is_always_equal template <typename A, typename = void> struct AllocIsAlwaysEqual : std::is_empty<A> {}; template <typename A> struct AllocIsAlwaysEqual<A, typename A::is_always_equal> : A::is_always_equal {}; public: static constexpr bool kAllocIsAlwaysEqual = AllocIsAlwaysEqual<Alloc>::value; static constexpr bool kDefaultConstructIsNoexcept = std::is_nothrow_default_constructible<Hasher>::value && std::is_nothrow_default_constructible<KeyEqual>::value && std::is_nothrow_default_constructible<Alloc>::value; static constexpr bool kSwapIsNoexcept = kAllocIsAlwaysEqual && IsNothrowSwappable<Hasher>{} && IsNothrowSwappable<KeyEqual>{}; static constexpr bool isAvalanchingHasher() { return IsAvalanchingHasher<Hasher, Key>::value; } //////// internal types and constants using InternalSizeType = std::size_t; // if false, F14Table will be smaller but F14Table::begin() won't work static constexpr bool kEnableItemIteration = true; using Chunk = F14Chunk<Item>; using ChunkPtr = typename std::pointer_traits< typename AllocTraits::pointer>::template rebind<Chunk>; using ItemIter = F14ItemIter<ChunkPtr>; static constexpr bool kIsMap = !std::is_same<Key, Value>::value; static_assert( kIsMap == !std::is_void<MappedTypeOrVoid>::value, "Assumption for the kIsMap check violated."); using MappedOrBool = std::conditional_t<kIsMap, Mapped, bool>; // if true, bucket_count() after reserve(n) will be as close as possible // to n for multi-chunk tables static constexpr bool kContinuousCapacity = false; //////// methods BasePolicy(Hasher const& hasher, KeyEqual const& keyEqual, Alloc const& alloc) : HasherHolder{hasher}, KeyEqualHolder{keyEqual}, AllocHolder{alloc} {} BasePolicy(BasePolicy const& rhs) : HasherHolder{rhs.hasher()}, KeyEqualHolder{rhs.keyEqual()}, AllocHolder{ AllocTraits::select_on_container_copy_construction(rhs.alloc())} {} BasePolicy(BasePolicy const& rhs, Alloc const& alloc) : HasherHolder{rhs.hasher()}, KeyEqualHolder{rhs.keyEqual()}, AllocHolder{alloc} {} BasePolicy(BasePolicy&& rhs) noexcept : HasherHolder{std::move(rhs.hasher())}, KeyEqualHolder{std::move(rhs.keyEqual())}, AllocHolder{std::move(rhs.alloc())} {} BasePolicy(BasePolicy&& rhs, Alloc const& alloc) noexcept : HasherHolder{std::move(rhs.hasher())}, KeyEqualHolder{std::move(rhs.keyEqual())}, AllocHolder{alloc} {} private: template <typename Src> void maybeAssignAlloc(std::true_type, Src&& src) { alloc() = std::forward<Src>(src); } template <typename Src> void maybeAssignAlloc(std::false_type, Src&&) {} template <typename A> void maybeSwapAlloc(std::true_type, A& rhs) { using std::swap; swap(alloc(), rhs); } template <typename A> void maybeSwapAlloc(std::false_type, A&) {} public: BasePolicy& operator=(BasePolicy const& rhs) { hasher() = rhs.hasher(); keyEqual() = rhs.keyEqual(); maybeAssignAlloc( typename AllocTraits::propagate_on_container_copy_assignment{}, rhs.alloc()); return *this; } BasePolicy& operator=(BasePolicy&& rhs) noexcept { hasher() = std::move(rhs.hasher()); keyEqual() = std::move(rhs.keyEqual()); maybeAssignAlloc( typename AllocTraits::propagate_on_container_move_assignment{}, std::move(rhs.alloc())); return *this; } void swapBasePolicy(BasePolicy& rhs) { using std::swap; swap(hasher(), rhs.hasher()); swap(keyEqual(), rhs.keyEqual()); maybeSwapAlloc( typename AllocTraits::propagate_on_container_swap{}, rhs.alloc()); } Hasher& hasher() { return *static_cast<HasherHolder&>(*this); } Hasher const& hasher() const { return *static_cast<HasherHolder const&>(*this); } KeyEqual& keyEqual() { return *static_cast<KeyEqualHolder&>(*this); } KeyEqual const& keyEqual() const { return *static_cast<KeyEqualHolder const&>(*this); } Alloc& alloc() { return *static_cast<AllocHolder&>(*this); } Alloc const& alloc() const { return *static_cast<AllocHolder const&>(*this); } template <typename K> std::size_t computeKeyHash(K const& key) const { static_assert( isAvalanchingHasher() == IsAvalanchingHasher<Hasher, K>::value, ""); static_assert( !isAvalanchingHasher() || sizeof(decltype(hasher()(key))) >= sizeof(std::size_t), "hasher is not avalanching if it doesn't return enough bits"); return hasher()(key); } Key const& keyForValue(Key const& v) const { return v; } Key const& keyForValue(std::pair<Key const, MappedOrBool> const& p) const { return p.first; } Key const& keyForValue(std::pair<Key&&, MappedOrBool&&> const& p) const { return p.first; } // map's choice of pair<K const, T> as value_type is unfortunate, // because it means we either need a proxy iterator, a pointless key // copy when moving items during rehash, or some sort of UB hack. // // This code implements the hack. Use moveValue(v) instead of // std::move(v) as the source of a move construction. enable_if_t is // used so that this works for maps while being a no-op for sets. template <typename Dummy = int> static std::pair<Key&&, MappedOrBool&&> moveValue( std::pair<Key const, MappedOrBool>& value, std::enable_if_t<kIsMap, Dummy> = 0) { return {std::move(const_cast<Key&>(value.first)), std::move(value.second)}; } template <typename Dummy = int> static Value&& moveValue(Value& value, std::enable_if_t<!kIsMap, Dummy> = 0) { return std::move(value); } template <typename P> bool beforeBuild(std::size_t /*size*/, std::size_t /*capacity*/, P&& /*rhs*/) { return false; } template <typename P> void afterBuild( bool /*undoState*/, bool /*success*/, std::size_t /*size*/, std::size_t /*capacity*/, P&& /*rhs*/) {} std::size_t alignedAllocSize(std::size_t n) const { if (kRequiredVectorAlignment <= alignof(max_align_t) || std::is_same<ByteAlloc, std::allocator<uint8_t>>::value) { return n; } else { return n + kRequiredVectorAlignment; } } bool beforeRehash( std::size_t /*size*/, std::size_t /*oldCapacity*/, std::size_t /*newCapacity*/, std::size_t chunkAllocSize, BytePtr& outChunkAllocation) { outChunkAllocation = allocateOverAligned<ByteAlloc, kRequiredVectorAlignment>( ByteAlloc{alloc()}, chunkAllocSize); return false; } void afterRehash( bool /*undoState*/, bool /*success*/, std::size_t /*size*/, std::size_t /*oldCapacity*/, std::size_t /*newCapacity*/, BytePtr chunkAllocation, std::size_t chunkAllocSize) { // on success, this will be the old allocation, on failure the new one if (chunkAllocation != nullptr) { deallocateOverAligned<ByteAlloc, kRequiredVectorAlignment>( ByteAlloc{alloc()}, chunkAllocation, chunkAllocSize); } } void beforeClear(std::size_t /*size*/, std::size_t /*capacity*/) {} void afterClear(std::size_t /*size*/, std::size_t /*capacity*/) {} void beforeReset(std::size_t /*size*/, std::size_t /*capacity*/) {} void afterReset( std::size_t /*size*/, std::size_t /*capacity*/, BytePtr chunkAllocation, std::size_t chunkAllocSize) { deallocateOverAligned<ByteAlloc, kRequiredVectorAlignment>( ByteAlloc{alloc()}, chunkAllocation, chunkAllocSize); } void prefetchValue(Item const&) const { // Subclass should disable with prefetchBeforeRehash(), // prefetchBeforeCopy(), and prefetchBeforeDestroy(). if they don't // override this method, because neither gcc nor clang can figure // out that DenseMaskIter with an empty body can be elided. FOLLY_SAFE_DCHECK(false, "should be disabled"); } void afterDestroyWithoutDeallocate(Value* addr, std::size_t n) { if (kIsLibrarySanitizeAddress) { memset(static_cast<void*>(addr), 0x66, sizeof(Value) * n); } } }; // BaseIter is a convenience for concrete set and map implementations template <typename ValuePtr, typename Item> class BaseIter : public std::iterator< std::forward_iterator_tag, std::remove_const_t< typename std::pointer_traits<ValuePtr>::element_type>, std::ptrdiff_t, ValuePtr, decltype(*std::declval<ValuePtr>())> { protected: using Chunk = F14Chunk<Item>; using ChunkPtr = typename std::pointer_traits<ValuePtr>::template rebind<Chunk>; using ItemIter = F14ItemIter<ChunkPtr>; using ValueConstPtr = typename std::pointer_traits<ValuePtr>::template rebind< std::add_const_t<typename std::pointer_traits<ValuePtr>::element_type>>; }; //////// ValueContainer template < typename Key, typename Mapped, typename HasherOrVoid, typename KeyEqualOrVoid, typename AllocOrVoid> class ValueContainerPolicy; template <typename ValuePtr> using ValueContainerIteratorBase = BaseIter< ValuePtr, std::remove_const_t<typename std::pointer_traits<ValuePtr>::element_type>>; template <typename ValuePtr> class ValueContainerIterator : public ValueContainerIteratorBase<ValuePtr> { using Super = ValueContainerIteratorBase<ValuePtr>; using ItemIter = typename Super::ItemIter; using ValueConstPtr = typename Super::ValueConstPtr; public: using pointer = typename Super::pointer; using reference = typename Super::reference; using value_type = typename Super::value_type; ValueContainerIterator() = default; ValueContainerIterator(ValueContainerIterator const&) = default; ValueContainerIterator(ValueContainerIterator&&) = default; ValueContainerIterator& operator=(ValueContainerIterator const&) = default; ValueContainerIterator& operator=(ValueContainerIterator&&) = default; ~ValueContainerIterator() = default; /*implicit*/ operator ValueContainerIterator<ValueConstPtr>() const { return ValueContainerIterator<ValueConstPtr>{underlying_}; } reference operator*() const { return underlying_.item(); } pointer operator->() const { return std::pointer_traits<pointer>::pointer_to(**this); } ValueContainerIterator& operator++() { underlying_.advance(); return *this; } ValueContainerIterator operator++(int) { auto cur = *this; ++*this; return cur; } bool operator==(ValueContainerIterator<ValueConstPtr> const& rhs) const { return underlying_ == rhs.underlying_; } bool operator!=(ValueContainerIterator<ValueConstPtr> const& rhs) const { return !(*this == rhs); } private: ItemIter underlying_; explicit ValueContainerIterator(ItemIter const& underlying) : underlying_{underlying} {} template <typename K, typename M, typename H, typename E, typename A> friend class ValueContainerPolicy; template <typename P> friend class ValueContainerIterator; }; template < typename Key, typename MappedTypeOrVoid, typename HasherOrVoid, typename KeyEqualOrVoid, typename AllocOrVoid> class ValueContainerPolicy : public BasePolicy< Key, MappedTypeOrVoid, HasherOrVoid, KeyEqualOrVoid, AllocOrVoid, SetOrMapValueType<Key, MappedTypeOrVoid>> { public: using Super = BasePolicy< Key, MappedTypeOrVoid, HasherOrVoid, KeyEqualOrVoid, AllocOrVoid, SetOrMapValueType<Key, MappedTypeOrVoid>>; using Alloc = typename Super::Alloc; using AllocTraits = typename Super::AllocTraits; using Item = typename Super::Item; using ItemIter = typename Super::ItemIter; using Value = typename Super::Value; private: using ByteAlloc = typename Super::ByteAlloc; using Super::kIsMap; public: using ConstIter = ValueContainerIterator<typename AllocTraits::const_pointer>; using Iter = std::conditional_t< kIsMap, ValueContainerIterator<typename AllocTraits::pointer>, ConstIter>; //////// F14Table policy static constexpr bool prefetchBeforeRehash() { return false; } static constexpr bool prefetchBeforeCopy() { return false; } static constexpr bool prefetchBeforeDestroy() { return false; } static constexpr bool destroyItemOnClear() { return !std::is_trivially_destructible<Item>::value || !AllocatorHasDefaultObjectDestroy<Alloc, Item>::value; } // inherit constructors using Super::Super; void swapPolicy(ValueContainerPolicy& rhs) { this->swapBasePolicy(rhs); } using Super::keyForValue; static_assert( std::is_same<Item, Value>::value, "Item and Value should be the same type for ValueContainerPolicy."); std::size_t computeItemHash(Item const& item) const { return this->computeKeyHash(keyForValue(item)); } template <typename K> bool keyMatchesItem(K const& key, Item const& item) const { return this->keyEqual()(key, keyForValue(item)); } Value const& buildArgForItem(Item const& item) const& { return item; } // buildArgForItem(Item&)&& is used when moving between unequal allocators decltype(auto) buildArgForItem(Item& item) && { return Super::moveValue(item); } Value const& valueAtItem(Item const& item) const { return item; } Value&& valueAtItemForExtract(Item& item) { return std::move(item); } template <typename Table, typename... Args> void constructValueAtItem(Table&&, Item* itemAddr, Args&&... args) { Alloc& a = this->alloc(); // GCC < 6 doesn't use the fact that itemAddr came from a reference // to avoid a null-check in the placement new. folly::assume-ing it // here gets rid of that branch. The branch is very predictable, // but spoils some further optimizations. All clang versions that // compile folly seem to be okay. // // TODO(T31574848): clean up assume-s used to optimize placement new assume(itemAddr != nullptr); AllocTraits::construct(a, itemAddr, std::forward<Args>(args)...); } template <typename T> std::enable_if_t<std::is_nothrow_move_constructible<T>::value> complainUnlessNothrowMove() {} template <typename T> [[deprecated( "use F14NodeMap/Set or mark key and mapped type move constructor nothrow")]] std:: enable_if_t<!std::is_nothrow_move_constructible<T>::value> complainUnlessNothrowMove() {} void moveItemDuringRehash(Item* itemAddr, Item& src) { complainUnlessNothrowMove<Key>(); complainUnlessNothrowMove<lift_unit_t<MappedTypeOrVoid>>(); constructValueAtItem(0, itemAddr, Super::moveValue(src)); if (destroyItemOnClear()) { if (kIsMap) { // Laundering in the standard is only described as a solution // for changes to const fields due to the creation of a new // object lifetime (destroy and then placement new in the same // location), but it seems highly likely that it will also cause // the compiler to drop such assumptions that are violated due // to our UB const_cast in moveValue. destroyItem(*launder(std::addressof(src))); } else { destroyItem(src); } } } void destroyItem(Item& item) { Alloc& a = this->alloc(); auto ptr = std::addressof(item); AllocTraits::destroy(a, ptr); this->afterDestroyWithoutDeallocate(ptr, 1); } template <typename V> void visitPolicyAllocationClasses( std::size_t chunkAllocSize, std::size_t /*size*/, std::size_t /*capacity*/, V&& visitor) const { if (chunkAllocSize > 0) { visitor( allocationBytesForOverAligned<ByteAlloc, kRequiredVectorAlignment>( chunkAllocSize), 1); } } //////// F14BasicMap/Set policy FOLLY_ALWAYS_INLINE Iter makeIter(ItemIter const& underlying) const { return Iter{underlying}; } ConstIter makeConstIter(ItemIter const& underlying) const { return ConstIter{underlying}; } ItemIter const& unwrapIter(ConstIter const& iter) const { return iter.underlying_; } }; //////// NodeContainer template < typename Key, typename Mapped, typename HasherOrVoid, typename KeyEqualOrVoid, typename AllocOrVoid> class NodeContainerPolicy; template <typename ValuePtr> class NodeContainerIterator : public BaseIter<ValuePtr, NonConstPtr<ValuePtr>> { using Super = BaseIter<ValuePtr, NonConstPtr<ValuePtr>>; using ItemIter = typename Super::ItemIter; using ValueConstPtr = typename Super::ValueConstPtr; public: using pointer = typename Super::pointer; using reference = typename Super::reference; using value_type = typename Super::value_type; NodeContainerIterator() = default; NodeContainerIterator(NodeContainerIterator const&) = default; NodeContainerIterator(NodeContainerIterator&&) = default; NodeContainerIterator& operator=(NodeContainerIterator const&) = default; NodeContainerIterator& operator=(NodeContainerIterator&&) = default; ~NodeContainerIterator() = default; /*implicit*/ operator NodeContainerIterator<ValueConstPtr>() const { return NodeContainerIterator<ValueConstPtr>{underlying_}; } reference operator*() const { return *underlying_.item(); } pointer operator->() const { return std::pointer_traits<pointer>::pointer_to(**this); } NodeContainerIterator& operator++() { underlying_.advance(); return *this; } NodeContainerIterator operator++(int) { auto cur = *this; ++*this; return cur; } bool operator==(NodeContainerIterator<ValueConstPtr> const& rhs) const { return underlying_ == rhs.underlying_; } bool operator!=(NodeContainerIterator<ValueConstPtr> const& rhs) const { return !(*this == rhs); } private: ItemIter underlying_; explicit NodeContainerIterator(ItemIter const& underlying) : underlying_{underlying} {} template <typename K, typename M, typename H, typename E, typename A> friend class NodeContainerPolicy; template <typename P> friend class NodeContainerIterator; }; template < typename Key, typename MappedTypeOrVoid, typename HasherOrVoid, typename KeyEqualOrVoid, typename AllocOrVoid> class NodeContainerPolicy : public BasePolicy< Key, MappedTypeOrVoid, HasherOrVoid, KeyEqualOrVoid, AllocOrVoid, typename std::allocator_traits<Defaulted< AllocOrVoid, DefaultAlloc<std::conditional_t< std::is_void<MappedTypeOrVoid>::value, Key, MapValueType<Key, MappedTypeOrVoid>>>>>::pointer> { public: using Super = BasePolicy< Key, MappedTypeOrVoid, HasherOrVoid, KeyEqualOrVoid, AllocOrVoid, typename std::allocator_traits<Defaulted< AllocOrVoid, DefaultAlloc<std::conditional_t< std::is_void<MappedTypeOrVoid>::value, Key, MapValueType<Key, MappedTypeOrVoid>>>>>::pointer>; using Alloc = typename Super::Alloc; using AllocTraits = typename Super::AllocTraits; using Item = typename Super::Item; using ItemIter = typename Super::ItemIter; using Value = typename Super::Value; private: using ByteAlloc = typename Super::ByteAlloc; using Super::kIsMap; public: using ConstIter = NodeContainerIterator<typename AllocTraits::const_pointer>; using Iter = std::conditional_t< kIsMap, NodeContainerIterator<typename AllocTraits::pointer>, ConstIter>; //////// F14Table policy static constexpr bool prefetchBeforeRehash() { return true; } static constexpr bool prefetchBeforeCopy() { return true; } static constexpr bool prefetchBeforeDestroy() { return !std::is_trivially_destructible<Value>::value; } static constexpr bool destroyItemOnClear() { return true; } // inherit constructors using Super::Super; void swapPolicy(NodeContainerPolicy& rhs) { this->swapBasePolicy(rhs); } using Super::keyForValue; std::size_t computeItemHash(Item const& item) const { return this->computeKeyHash(keyForValue(*item)); } template <typename K> bool keyMatchesItem(K const& key, Item const& item) const { return this->keyEqual()(key, keyForValue(*item)); } Value const& buildArgForItem(Item const& item) const& { return *item; } // buildArgForItem(Item&)&& is used when moving between unequal allocators decltype(auto) buildArgForItem(Item& item) && { return Super::moveValue(*item); } Value const& valueAtItem(Item const& item) const { return *item; } Value&& valueAtItemForExtract(Item& item) { return std::move(*item); } template <typename Table, typename... Args> void constructValueAtItem(Table&&, Item* itemAddr, Args&&... args) { Alloc& a = this->alloc(); // TODO(T31574848): clean up assume-s used to optimize placement new assume(itemAddr != nullptr); new (itemAddr) Item{AllocTraits::allocate(a, 1)}; auto p = std::addressof(**itemAddr); // TODO(T31574848): clean up assume-s used to optimize placement new assume(p != nullptr); auto rollback = makeGuard([&] { AllocTraits::deallocate(a, p, 1); }); AllocTraits::construct(a, p, std::forward<Args>(args)...); rollback.dismiss(); } void moveItemDuringRehash(Item* itemAddr, Item& src) { // This is basically *itemAddr = src; src = nullptr, but allowing // for fancy pointers. // TODO(T31574848): clean up assume-s used to optimize placement new assume(itemAddr != nullptr); new (itemAddr) Item{std::move(src)}; src = nullptr; src.~Item(); } void prefetchValue(Item const& item) const { prefetchAddr(std::addressof(*item)); } void destroyItem(Item& item) { if (item != nullptr) { Alloc& a = this->alloc(); AllocTraits::destroy(a, std::addressof(*item)); AllocTraits::deallocate(a, item, 1); } item.~Item(); } template <typename V> void visitPolicyAllocationClasses( std::size_t chunkAllocSize, std::size_t size, std::size_t /*capacity*/, V&& visitor) const { if (chunkAllocSize > 0) { visitor( allocationBytesForOverAligned<ByteAlloc, kRequiredVectorAlignment>( chunkAllocSize), 1); } if (size > 0) { visitor(sizeof(Value), size); } } //////// F14BasicMap/Set policy FOLLY_ALWAYS_INLINE Iter makeIter(ItemIter const& underlying) const { return Iter{underlying}; } ConstIter makeConstIter(ItemIter const& underlying) const { return Iter{underlying}; } ItemIter const& unwrapIter(ConstIter const& iter) const { return iter.underlying_; } }; //////// VectorContainer template < typename Key, typename MappedTypeOrVoid, typename HasherOrVoid, typename KeyEqualOrVoid, typename AllocOrVoid, typename EligibleForPerturbedInsertionOrder> class VectorContainerPolicy; template <typename ValuePtr> class VectorContainerIterator : public BaseIter<ValuePtr, uint32_t> { using Super = BaseIter<ValuePtr, uint32_t>; using ValueConstPtr = typename Super::ValueConstPtr; public: using pointer = typename Super::pointer; using reference = typename Super::reference; using value_type = typename Super::value_type; VectorContainerIterator() = default; VectorContainerIterator(VectorContainerIterator const&) = default; VectorContainerIterator(VectorContainerIterator&&) = default; VectorContainerIterator& operator=(VectorContainerIterator const&) = default; VectorContainerIterator& operator=(VectorContainerIterator&&) = default; ~VectorContainerIterator() = default; /*implicit*/ operator VectorContainerIterator<ValueConstPtr>() const { return VectorContainerIterator<ValueConstPtr>{current_, lowest_}; } reference operator*() const { return *current_; } pointer operator->() const { return current_; } VectorContainerIterator& operator++() { if (UNLIKELY(current_ == lowest_)) { current_ = nullptr; } else { --current_; } return *this; } VectorContainerIterator operator++(int) { auto cur = *this; ++*this; return cur; } bool operator==(VectorContainerIterator<ValueConstPtr> const& rhs) const { return current_ == rhs.current_; } bool operator!=(VectorContainerIterator<ValueConstPtr> const& rhs) const { return !(*this == rhs); } private: ValuePtr current_; ValuePtr lowest_; explicit VectorContainerIterator(ValuePtr current, ValuePtr lowest) : current_(current), lowest_(lowest) {} std::size_t index() const { return current_ - lowest_; } template < typename K, typename M, typename H, typename E, typename A, typename P> friend class VectorContainerPolicy; template <typename P> friend class VectorContainerIterator; }; struct VectorContainerIndexSearch { uint32_t index_; }; template < typename Key, typename MappedTypeOrVoid, typename HasherOrVoid, typename KeyEqualOrVoid, typename AllocOrVoid, typename EligibleForPerturbedInsertionOrder> class VectorContainerPolicy : public BasePolicy< Key, MappedTypeOrVoid, HasherOrVoid, KeyEqualOrVoid, AllocOrVoid, uint32_t> { public: using Super = BasePolicy< Key, MappedTypeOrVoid, HasherOrVoid, KeyEqualOrVoid, AllocOrVoid, uint32_t>; using Alloc = typename Super::Alloc; using AllocTraits = typename Super::AllocTraits; using ByteAlloc = typename Super::ByteAlloc; using ByteAllocTraits = typename Super::ByteAllocTraits; using BytePtr = typename Super::BytePtr; using Hasher = typename Super::Hasher; using Item = typename Super::Item; using ItemIter = typename Super::ItemIter; using KeyEqual = typename Super::KeyEqual; using Value = typename Super::Value; using Super::kAllocIsAlwaysEqual; private: using Super::kIsMap; public: static constexpr bool kEnableItemIteration = false; static constexpr bool kContinuousCapacity = true; using InternalSizeType = Item; using ConstIter = VectorContainerIterator<typename AllocTraits::const_pointer>; using Iter = std::conditional_t< kIsMap, VectorContainerIterator<typename AllocTraits::pointer>, ConstIter>; using ConstReverseIter = typename AllocTraits::const_pointer; using ReverseIter = std:: conditional_t<kIsMap, typename AllocTraits::pointer, ConstReverseIter>; using ValuePtr = typename AllocTraits::pointer; //////// F14Table policy static constexpr bool prefetchBeforeRehash() { return true; } static constexpr bool prefetchBeforeCopy() { return false; } static constexpr bool prefetchBeforeDestroy() { return false; } static constexpr bool destroyItemOnClear() { return false; } private: static constexpr bool valueIsTriviallyCopyable() { return AllocatorHasDefaultObjectConstruct<Alloc, Value, Value>::value && AllocatorHasDefaultObjectDestroy<Alloc, Value>::value && is_trivially_copyable<Value>::value; } public: VectorContainerPolicy( Hasher const& hasher, KeyEqual const& keyEqual, Alloc const& alloc) : Super{hasher, keyEqual, alloc} {} VectorContainerPolicy(VectorContainerPolicy const& rhs) : Super{rhs} { // values_ will get allocated later to do the copy } VectorContainerPolicy(VectorContainerPolicy const& rhs, Alloc const& alloc) : Super{rhs, alloc} { // values_ will get allocated later to do the copy } VectorContainerPolicy(VectorContainerPolicy&& rhs) noexcept : Super{std::move(rhs)}, values_{rhs.values_} { rhs.values_ = nullptr; } VectorContainerPolicy( VectorContainerPolicy&& rhs, Alloc const& alloc) noexcept : Super{std::move(rhs), alloc} { if (kAllocIsAlwaysEqual || this->alloc() == rhs.alloc()) { // common case values_ = rhs.values_; rhs.values_ = nullptr; } else { // table must be constructed in new memory values_ = nullptr; } } VectorContainerPolicy& operator=(VectorContainerPolicy const& rhs) { if (this != &rhs) { FOLLY_SAFE_DCHECK(values_ == nullptr, ""); Super::operator=(rhs); } return *this; } VectorContainerPolicy& operator=(VectorContainerPolicy&& rhs) noexcept { if (this != &rhs) { FOLLY_SAFE_DCHECK(values_ == nullptr, ""); bool transfer = AllocTraits::propagate_on_container_move_assignment::value || kAllocIsAlwaysEqual || this->alloc() == rhs.alloc(); Super::operator=(std::move(rhs)); if (transfer) { values_ = rhs.values_; rhs.values_ = nullptr; } } return *this; } void swapPolicy(VectorContainerPolicy& rhs) { using std::swap; this->swapBasePolicy(rhs); swap(values_, rhs.values_); } template <typename K> std::size_t computeKeyHash(K const& key) const { static_assert( Super::isAvalanchingHasher() == IsAvalanchingHasher<Hasher, K>::value, ""); return this->hasher()(key); } std::size_t computeKeyHash(VectorContainerIndexSearch const& key) const { return computeItemHash(key.index_); } using Super::keyForValue; std::size_t computeItemHash(Item const& item) const { return this->computeKeyHash(keyForValue(values_[item])); } bool keyMatchesItem(VectorContainerIndexSearch const& key, Item const& item) const { return key.index_ == item; } template <typename K> bool keyMatchesItem(K const& key, Item const& item) const { return this->keyEqual()(key, keyForValue(values_[item])); } Key const& keyForValue(VectorContainerIndexSearch const& arg) const { return keyForValue(values_[arg.index_]); } VectorContainerIndexSearch buildArgForItem(Item const& item) const { return {item}; } Value const& valueAtItem(Item const& item) const { return values_[item]; } Value&& valueAtItemForExtract(Item& item) { return std::move(values_[item]); } template <typename Table> void constructValueAtItem( Table&&, Item* itemAddr, VectorContainerIndexSearch arg) { *itemAddr = arg.index_; } template <typename Table, typename... Args> void constructValueAtItem(Table&& table, Item* itemAddr, Args&&... args) { Alloc& a = this->alloc(); std::size_t size = table.size(); FOLLY_SAFE_DCHECK(size < std::numeric_limits<InternalSizeType>::max(), ""); *itemAddr = static_cast<InternalSizeType>(size); auto dst = std::addressof(values_[size]); // TODO(T31574848): clean up assume-s used to optimize placement new assume(dst != nullptr); AllocTraits::construct(a, dst, std::forward<Args>(args)...); constexpr bool perturb = FOLLY_F14_PERTURB_INSERTION_ORDER; if (EligibleForPerturbedInsertionOrder::value && perturb && !tlsPendingSafeInserts()) { // Pick a random victim. We have to do this post-construction // because the item and tag are already set in the table before // calling constructValueAtItem, so if there is a tag collision // find may evaluate values_[size] during the search. auto i = tlsMinstdRand(size + 1); if (i != size) { auto& lhsItem = *itemAddr; auto rhsIter = table.find( VectorContainerIndexSearch{static_cast<InternalSizeType>(i)}); FOLLY_SAFE_DCHECK(!rhsIter.atEnd(), ""); auto& rhsItem = rhsIter.item(); FOLLY_SAFE_DCHECK(lhsItem == size, ""); FOLLY_SAFE_DCHECK(rhsItem == i, ""); aligned_storage_for_t<Value> tmp; Value* tmpValue = static_cast<Value*>(static_cast<void*>(&tmp)); transfer(a, std::addressof(values_[i]), tmpValue, 1); transfer( a, std::addressof(values_[size]), std::addressof(values_[i]), 1); transfer(a, tmpValue, std::addressof(values_[size]), 1); lhsItem = i; rhsItem = size; } } } void moveItemDuringRehash(Item* itemAddr, Item& src) { *itemAddr = src; } void prefetchValue(Item const& item) const { prefetchAddr(std::addressof(values_[item])); } void destroyItem(Item&) {} template <typename T> std::enable_if_t<std::is_nothrow_move_constructible<T>::value> complainUnlessNothrowMove() {} template <typename T> [[deprecated( "use F14NodeMap/Set or mark key and mapped type move constructor nothrow")]] std:: enable_if_t<!std::is_nothrow_move_constructible<T>::value> complainUnlessNothrowMove() {} void transfer(Alloc& a, Value* src, Value* dst, std::size_t n) { complainUnlessNothrowMove<Key>(); complainUnlessNothrowMove<lift_unit_t<MappedTypeOrVoid>>(); auto origSrc = src; if (valueIsTriviallyCopyable()) { std::memcpy( static_cast<void*>(dst), static_cast<void const*>(src), n * sizeof(Value)); } else { for (std::size_t i = 0; i < n; ++i, ++src, ++dst) { // TODO(T31574848): clean up assume-s used to optimize placement new assume(dst != nullptr); AllocTraits::construct(a, dst, Super::moveValue(*src)); if (kIsMap) { AllocTraits::destroy(a, launder(src)); } else { AllocTraits::destroy(a, src); } } } this->afterDestroyWithoutDeallocate(origSrc, n); } template <typename P, typename V> bool beforeBuildImpl(std::size_t size, P&& rhs, V const& constructorArgFor) { Alloc& a = this->alloc(); FOLLY_SAFE_DCHECK(values_ != nullptr, ""); auto src = std::addressof(rhs.values_[0]); Value* dst = std::addressof(values_[0]); if (valueIsTriviallyCopyable()) { std::memcpy( static_cast<void*>(dst), static_cast<void const*>(src), size * sizeof(Value)); } else { for (std::size_t i = 0; i < size; ++i, ++src, ++dst) { try { // TODO(T31574848): clean up assume-s used to optimize placement new assume(dst != nullptr); AllocTraits::construct(a, dst, constructorArgFor(*src)); } catch (...) { for (Value* cleanup = std::addressof(values_[0]); cleanup != dst; ++cleanup) { AllocTraits::destroy(a, cleanup); } throw; } } } return true; } bool beforeBuild( std::size_t size, std::size_t /*capacity*/, VectorContainerPolicy const& rhs) { return beforeBuildImpl(size, rhs, [](Value const& v) { return v; }); } bool beforeBuild( std::size_t size, std::size_t /*capacity*/, VectorContainerPolicy&& rhs) { return beforeBuildImpl( size, rhs, [](Value& v) { return Super::moveValue(v); }); } template <typename P> void afterBuild( bool /*undoState*/, bool success, std::size_t /*size*/, std::size_t /*capacity*/, P&& /*rhs*/) { // buildArgForItem can be used to construct a new item trivially, // so no failure between beforeBuild and afterBuild should be possible FOLLY_SAFE_DCHECK(success, ""); } private: // Returns the byte offset of the first Value in a unified allocation // that first holds prefixBytes of data, where prefixBytes comes from // Chunk storage and may be only 4-byte aligned due to sub-chunk // allocation. static std::size_t valuesOffset(std::size_t prefixBytes) { FOLLY_SAFE_DCHECK((prefixBytes % alignof(Item)) == 0, ""); if (alignof(Value) > alignof(Item)) { prefixBytes = -(-prefixBytes & ~(alignof(Value) - 1)); } FOLLY_SAFE_DCHECK((prefixBytes % alignof(Value)) == 0, ""); return prefixBytes; } // Returns the total number of bytes that should be allocated to store // prefixBytes of Chunks and valueCapacity values. static std::size_t allocSize( std::size_t prefixBytes, std::size_t valueCapacity) { return valuesOffset(prefixBytes) + sizeof(Value) * valueCapacity; } public: ValuePtr beforeRehash( std::size_t size, std::size_t oldCapacity, std::size_t newCapacity, std::size_t chunkAllocSize, BytePtr& outChunkAllocation) { FOLLY_SAFE_DCHECK( size <= oldCapacity && ((values_ == nullptr) == (oldCapacity == 0)) && newCapacity > 0 && newCapacity <= (std::numeric_limits<Item>::max)(), ""); outChunkAllocation = allocateOverAligned<ByteAlloc, kRequiredVectorAlignment>( ByteAlloc{Super::alloc()}, allocSize(chunkAllocSize, newCapacity)); ValuePtr before = values_; ValuePtr after = std::pointer_traits<ValuePtr>::pointer_to( *static_cast<Value*>(static_cast<void*>( &*outChunkAllocation + valuesOffset(chunkAllocSize)))); if (size > 0) { Alloc& a = this->alloc(); transfer(a, std::addressof(before[0]), std::addressof(after[0]), size); } values_ = after; return before; } FOLLY_NOINLINE void afterFailedRehash(ValuePtr state, std::size_t size) { // state holds the old storage Alloc& a = this->alloc(); if (size > 0) { transfer(a, std::addressof(values_[0]), std::addressof(state[0]), size); } values_ = state; } void afterRehash( ValuePtr state, bool success, std::size_t size, std::size_t oldCapacity, std::size_t newCapacity, BytePtr chunkAllocation, std::size_t chunkAllocSize) { if (!success) { afterFailedRehash(state, size); } // on success, chunkAllocation is the old allocation, on failure it is the // new one if (chunkAllocation != nullptr) { deallocateOverAligned<ByteAlloc, kRequiredVectorAlignment>( ByteAlloc{Super::alloc()}, chunkAllocation, allocSize(chunkAllocSize, (success ? oldCapacity : newCapacity))); } } void beforeClear(std::size_t size, std::size_t capacity) { FOLLY_SAFE_DCHECK( size <= capacity && ((values_ == nullptr) == (capacity == 0)), ""); Alloc& a = this->alloc(); for (std::size_t i = 0; i < size; ++i) { AllocTraits::destroy(a, std::addressof(values_[i])); } } void beforeReset(std::size_t size, std::size_t capacity) { beforeClear(size, capacity); } void afterReset( std::size_t /*size*/, std::size_t capacity, BytePtr chunkAllocation, std::size_t chunkAllocSize) { if (chunkAllocation != nullptr) { deallocateOverAligned<ByteAlloc, kRequiredVectorAlignment>( ByteAlloc{Super::alloc()}, chunkAllocation, allocSize(chunkAllocSize, capacity)); values_ = nullptr; } } template <typename V> void visitPolicyAllocationClasses( std::size_t chunkAllocSize, std::size_t /*size*/, std::size_t capacity, V&& visitor) const { FOLLY_SAFE_DCHECK((chunkAllocSize == 0) == (capacity == 0), ""); if (chunkAllocSize > 0) { visitor( allocationBytesForOverAligned<ByteAlloc, kRequiredVectorAlignment>( allocSize(chunkAllocSize, capacity)), 1); } } // Iterator stuff Iter linearBegin(std::size_t size) const { return Iter{(size > 0 ? values_ + size - 1 : nullptr), values_}; } Iter linearEnd() const { return Iter{nullptr, nullptr}; } //////// F14BasicMap/Set policy Iter makeIter(ItemIter const& underlying) const { if (underlying.atEnd()) { return linearEnd(); } else { assume(values_ + underlying.item() != nullptr); assume(values_ != nullptr); return Iter{values_ + underlying.item(), values_}; } } ConstIter makeConstIter(ItemIter const& underlying) const { return makeIter(underlying); } Item iterToIndex(ConstIter const& iter) const { auto n = iter.index(); assume(n <= std::numeric_limits<Item>::max()); return static_cast<Item>(n); } Iter indexToIter(Item index) const { return Iter{values_ + index, values_}; } Iter iter(ReverseIter it) { return Iter{it, values_}; } ConstIter iter(ConstReverseIter it) const { return ConstIter{it, values_}; } ReverseIter riter(Iter it) { return it.current_; } ConstReverseIter riter(ConstIter it) const { return it.current_; } ValuePtr values_{nullptr}; }; template < template <typename, typename, typename, typename, typename, typename...> class Policy, typename Key, typename Mapped, typename Hasher, typename KeyEqual, typename Alloc, typename... Args> using MapPolicyWithDefaults = Policy< Key, Mapped, VoidDefault<Hasher, DefaultHasher<Key>>, VoidDefault<KeyEqual, DefaultKeyEqual<Key>>, VoidDefault<Alloc, DefaultAlloc<std::pair<Key const, Mapped>>>, Args...>; template < template <typename, typename, typename, typename, typename, typename...> class Policy, typename Key, typename Hasher, typename KeyEqual, typename Alloc, typename... Args> using SetPolicyWithDefaults = Policy< Key, void, VoidDefault<Hasher, DefaultHasher<Key>>, VoidDefault<KeyEqual, DefaultKeyEqual<Key>>, VoidDefault<Alloc, DefaultAlloc<Key>>, Args...>; } // namespace detail } // namespace f14 } // namespace folly #endif // FOLLY_F14_VECTOR_INTRINSICS_AVAILABLE
aloknnikhil/folly
folly/logging/LogName.h
<reponame>aloknnikhil/folly /* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <folly/Range.h> namespace folly { /** * The LogName class contains utility functions for processing log category * names. It primarily handles canonicalization of names. * * For instance, "foo.bar", "foo/bar", "foo..bar", and ".foo.bar..." all refer * to the same log category. */ class LogName { public: /** * Return a canonicalized version of the log name. * * '/' and '\\' characters are converted to '.', then leading and trailing * '.' characters are removed, and all sequences of consecutive '.' * characters are replaced with a single '.' */ static std::string canonicalize(folly::StringPiece name); /** * Hash a log name. * * The log name does not need to be pre-canonicalized. * The hash for equivalent log names will always be equal. */ static size_t hash(folly::StringPiece name); /** * Compare two log names. * * The log name does not need to be pre-canonicalized. * Returns 0 if and only if the two names refer to the same log category. * Otherwise, returns -1 if the canonical version of nameA is less than the * canonical version of nameB. */ static int cmp(folly::StringPiece nameA, folly::StringPiece nameB); /** * Get the name of the parent log category. * * Returns a StringPiece pointing into the input data. * As a result, the parent log name may not be canonical if the input log * name is not already canonical. * * If the input log name refers to the root log category, an empty * StringPiece will be returned. */ static folly::StringPiece getParent(folly::StringPiece name); /** * Hash functor that can be used with standard library containers. */ struct Hash { size_t operator()(folly::StringPiece key) const { return LogName::hash(key); } }; /** * Equality functor that can be used with standard library containers. */ struct Equals { bool operator()(folly::StringPiece a, folly::StringPiece b) const { return LogName::cmp(a, b) == 0; } }; }; } // namespace folly
aloknnikhil/folly
folly/logging/LogHandlerConfig.h
<gh_stars>1-10 /* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <string> #include <unordered_map> #include <folly/Optional.h> #include <folly/Range.h> namespace folly { /** * Configuration for a LogHandler */ class LogHandlerConfig { public: using Options = std::unordered_map<std::string, std::string>; LogHandlerConfig(); explicit LogHandlerConfig(StringPiece type); explicit LogHandlerConfig(Optional<StringPiece> type); LogHandlerConfig(StringPiece type, Options options); LogHandlerConfig(Optional<StringPiece> type, Options options); /** * Update this LogHandlerConfig object by merging in settings from another * LogConfig. * * The other LogHandlerConfig must not have a type set. */ void update(const LogHandlerConfig& other); bool operator==(const LogHandlerConfig& other) const; bool operator!=(const LogHandlerConfig& other) const; /** * The handler type name. * * If this field is unset than this configuration object is intended to be * used to update an existing LogHandler object. This field must always * be set in the configuration for all existing LogHandler objects. */ Optional<std::string> type; Options options; }; } // namespace folly
aloknnikhil/folly
folly/futures/SharedPromise.h
/* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <folly/Portability.h> #include <folly/executors/InlineExecutor.h> #include <folly/futures/Promise.h> #include <folly/lang/Exception.h> namespace folly { /* * SharedPromise provides the same interface as Promise, but you can extract * multiple Futures from it, i.e. you can call getFuture() as many times as * you'd like. When the SharedPromise is fulfilled, all of the Futures are * completed. Calls to getFuture() after the SharedPromise is fulfilled return * a completed Future. If you find yourself constructing collections of Promises * and fulfilling them simultaneously with the same value, consider this * utility instead. Likewise, if you find yourself in need of setting multiple * callbacks on the same Future (which is indefinitely unsupported), consider * refactoring to use SharedPromise to "split" the Future. * * The SharedPromise must be kept alive manually. Consider FutureSplitter for * automatic lifetime management. */ template <class T> class SharedPromise { public: /** * Return a Future tied to the shared core state. Unlike Promise::getFuture, * this can be called an unlimited number of times per SharedPromise. */ SemiFuture<T> getSemiFuture(); /** * Return a Future tied to the shared core state. Unlike Promise::getFuture, * this can be called an unlimited number of times per SharedPromise. * NOTE: This function is deprecated. Please use getSemiFuture and pass the * appropriate executor to .via on the returned SemiFuture to get a * valid Future where necessary. */ Future<T> getFuture(); /** Return the number of Futures associated with this SharedPromise */ size_t size(); /** Fulfill the SharedPromise with an exception_wrapper */ void setException(exception_wrapper ew); /** Fulfill the SharedPromise with an exception type E, which can be passed to make_exception_wrapper(). Useful for originating exceptions. If you caught an exception the exception_wrapper form is more appropriate. */ template <class E> typename std::enable_if<std::is_base_of<std::exception, E>::value>::type setException(E const&); /// Set an interrupt handler to handle interrupts. See the documentation for /// Future::raise(). Your handler can do whatever it wants, but if you /// bother to set one then you probably will want to fulfill the SharedPromise /// with an exception (or special value) indicating how the interrupt was /// handled. void setInterruptHandler(std::function<void(exception_wrapper const&)>); /// Sugar to fulfill this SharedPromise<Unit> template <class B = T> typename std::enable_if<std::is_same<Unit, B>::value, void>::type setValue() { setTry(Try<T>(T())); } /** Set the value (use perfect forwarding for both move and copy) */ template <class M> void setValue(M&& value); void setTry(Try<T>&& t); /** Fulfill this SharedPromise with the result of a function that takes no arguments and returns something implicitly convertible to T. Captures exceptions. e.g. p.setWith([] { do something that may throw; return a T; }); */ template <class F> void setWith(F&& func); bool isFulfilled(); private: // this allows SharedPromise move-ctor/move-assign to be defaulted struct Mutex : std::mutex { Mutex() = default; Mutex(Mutex&&) noexcept {} Mutex& operator=(Mutex&&) noexcept { return *this; } }; template <typename V> struct Defaulted { using Noexcept = StrictConjunction< std::is_nothrow_default_constructible<V>, std::is_nothrow_move_constructible<V>, std::is_nothrow_move_assignable<V>>; V value{V()}; Defaulted() = default; Defaulted(Defaulted&& that) noexcept(Noexcept::value) : value(std::exchange(that.value, V())) {} Defaulted& operator=(Defaulted&& that) noexcept(Noexcept::value) { value = std::exchange(that.value, V()); return *this; } }; bool hasResult() { return try_.value.hasValue() || try_.value.hasException(); } Mutex mutex_; Defaulted<size_t> size_; Defaulted<Try<T>> try_; std::vector<Promise<T>> promises_; std::function<void(exception_wrapper const&)> interruptHandler_; }; } // namespace folly #include <folly/futures/Future.h> #include <folly/futures/SharedPromise-inl.h>
aloknnikhil/folly
folly/logging/CustomLogFormatter.h
<filename>folly/logging/CustomLogFormatter.h /* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <folly/Range.h> #include <folly/logging/LogFormatter.h> #include <string> namespace folly { /** * A LogFormatter implementation that produces messages in a format specified * using a config. * * The glog message format is: * * {L}{m:02d}{D:02d} {H:2d}:{M:02d}:{S:02d}.{USECS:06d} {THREAD:5d} * {FILE}:{LINE}] * * L: A 1-character code describing the log level (e.g., E, W, I, V) * m: month * D: day * H: hour, 24-hour format * M: minute * S: second * USECS: microseconds * THREAD: Thread ID * FILE: Filename (just the last component) * FUN: The function that logged the message * LINE: Line number * * TODO: enable support for the following 2: * - THREADNAME: the thread name. * - THREADCTX: thread-local log context data, if it has been set. (This is * a Facebook-specific modification) */ class CustomLogFormatter : public LogFormatter { public: explicit CustomLogFormatter(StringPiece format, bool colored); std::string formatMessage( const LogMessage& message, const LogCategory* handlerCategory) override; private: void parseFormatString(StringPiece format); std::string logFormat_; std::string singleLineLogFormat_; std::size_t staticEstimatedWidth_{0}; std::size_t fileNameCount_{0}; std::size_t functionNameCount_{0}; const bool colored_; }; } // namespace folly
aloknnikhil/folly
folly/test/SynchronizedTestLib-inl.h
/* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <folly/Random.h> #include <folly/Synchronized.h> #include <folly/container/Foreach.h> #include <folly/portability/GTest.h> #include <glog/logging.h> #include <algorithm> #include <condition_variable> #include <functional> #include <map> #include <random> #include <thread> #include <vector> namespace folly { namespace sync_tests { inline std::mt19937& getRNG() { static const auto seed = folly::randomNumberSeed(); static std::mt19937 rng(seed); return rng; } void randomSleep(std::chrono::milliseconds min, std::chrono::milliseconds max) { std::uniform_int_distribution<> range(min.count(), max.count()); std::chrono::milliseconds duration(range(getRNG())); /* sleep override */ std::this_thread::sleep_for(duration); } /* * Run a functon simultaneously in a number of different threads. * * The function will be passed the index number of the thread it is running in. * This function makes an attempt to synchronize the start of the threads as * best as possible. It waits for all threads to be allocated and started * before invoking the function. */ template <class Function> void runParallel(size_t numThreads, const Function& function) { std::vector<std::thread> threads; threads.reserve(numThreads); // Variables used to synchronize all threads to try and start them // as close to the same time as possible folly::Synchronized<size_t, std::mutex> threadsReady(0); std::condition_variable readyCV; folly::Synchronized<bool, std::mutex> go(false); std::condition_variable goCV; auto worker = [&](size_t threadIndex) { // Signal that we are ready ++(*threadsReady.lock()); readyCV.notify_one(); // Wait until we are given the signal to start // The purpose of this is to try and make sure all threads start // as close to the same time as possible. { auto lockedGo = go.lock(); goCV.wait(lockedGo.getUniqueLock(), [&] { return *lockedGo; }); } function(threadIndex); }; // Start all of the threads for (size_t threadIndex = 0; threadIndex < numThreads; ++threadIndex) { threads.emplace_back([threadIndex, &worker]() { worker(threadIndex); }); } // Wait for all threads to become ready { auto readyLocked = threadsReady.lock(); readyCV.wait(readyLocked.getUniqueLock(), [&] { return *readyLocked == numThreads; }); } // Now signal the threads that they can go go = true; goCV.notify_all(); // Wait for all threads to finish for (auto& thread : threads) { thread.join(); } } // testBasic() version for shared lock types template <class Mutex> typename std::enable_if<folly::LockTraits<Mutex>::is_shared>::type testBasicImpl() { folly::Synchronized<std::vector<int>, Mutex> obj; const auto& constObj = obj; obj.wlock()->resize(1000); folly::Synchronized<std::vector<int>, Mutex> obj2{*obj.wlock()}; EXPECT_EQ(1000, obj2.rlock()->size()); { auto lockedObj = obj.wlock(); lockedObj->push_back(10); EXPECT_EQ(1001, lockedObj->size()); EXPECT_EQ(10, lockedObj->back()); EXPECT_EQ(1000, obj2.wlock()->size()); EXPECT_EQ(1000, obj2.rlock()->size()); { auto unlocker = lockedObj.scopedUnlock(); EXPECT_EQ(1001, obj.wlock()->size()); } } { auto lockedObj = obj.rlock(); EXPECT_EQ(1001, lockedObj->size()); EXPECT_EQ(1001, obj.rlock()->size()); { auto unlocker = lockedObj.scopedUnlock(); EXPECT_EQ(1001, obj.wlock()->size()); } } obj.wlock()->front() = 2; { // contextualLock() on a const reference should grab a shared lock auto lockedObj = constObj.contextualLock(); EXPECT_EQ(2, lockedObj->front()); EXPECT_EQ(2, constObj.rlock()->front()); EXPECT_EQ(2, obj.rlock()->front()); } EXPECT_EQ(1001, obj.rlock()->size()); EXPECT_EQ(2, obj.rlock()->front()); EXPECT_EQ(10, obj.rlock()->back()); EXPECT_EQ(1000, obj2.rlock()->size()); } // testBasic() version for non-shared lock types template <class Mutex> typename std::enable_if<!folly::LockTraits<Mutex>::is_shared>::type testBasicImpl() { folly::Synchronized<std::vector<int>, Mutex> obj; const auto& constObj = obj; obj.lock()->resize(1000); folly::Synchronized<std::vector<int>, Mutex> obj2{*obj.lock()}; EXPECT_EQ(1000, obj2.lock()->size()); { auto lockedObj = obj.lock(); lockedObj->push_back(10); EXPECT_EQ(1001, lockedObj->size()); EXPECT_EQ(10, lockedObj->back()); EXPECT_EQ(1000, obj2.lock()->size()); { auto unlocker = lockedObj.scopedUnlock(); EXPECT_EQ(1001, obj.lock()->size()); } } { auto lockedObj = constObj.lock(); EXPECT_EQ(1001, lockedObj->size()); EXPECT_EQ(10, lockedObj->back()); EXPECT_EQ(1000, obj2.lock()->size()); } obj.lock()->front() = 2; EXPECT_EQ(1001, obj.lock()->size()); EXPECT_EQ(2, obj.lock()->front()); EXPECT_EQ(2, obj.contextualLock()->front()); EXPECT_EQ(10, obj.lock()->back()); EXPECT_EQ(1000, obj2.lock()->size()); } template <class Mutex> void testBasic() { testBasicImpl<Mutex>(); } // testWithLock() version for shared lock types template <class Mutex> typename std::enable_if<folly::LockTraits<Mutex>::is_shared>::type testWithLock() { folly::Synchronized<std::vector<int>, Mutex> obj; const auto& constObj = obj; // Test withWLock() and withRLock() obj.withWLock([](std::vector<int>& lockedObj) { lockedObj.resize(1000); lockedObj.push_back(10); lockedObj.push_back(11); }); obj.withWLock([](const std::vector<int>& lockedObj) { EXPECT_EQ(1002, lockedObj.size()); }); constObj.withWLock([](const std::vector<int>& lockedObj) { EXPECT_EQ(1002, lockedObj.size()); EXPECT_EQ(11, lockedObj.back()); }); obj.withRLock([](const std::vector<int>& lockedObj) { EXPECT_EQ(1002, lockedObj.size()); EXPECT_EQ(11, lockedObj.back()); }); constObj.withRLock([](const std::vector<int>& lockedObj) { EXPECT_EQ(1002, lockedObj.size()); }); #if __cpp_generic_lambdas >= 201304 obj.withWLock([](auto& lockedObj) { lockedObj.push_back(12); }); obj.withWLock( [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); }); constObj.withWLock([](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); EXPECT_EQ(12, lockedObj.back()); }); obj.withRLock([](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); EXPECT_EQ(12, lockedObj.back()); }); constObj.withRLock( [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); }); obj.withWLock([](auto& lockedObj) { lockedObj.pop_back(); }); #endif // Test withWLockPtr() and withRLockPtr() using SynchType = folly::Synchronized<std::vector<int>, Mutex>; #if __cpp_generic_lambdas >= 201304 obj.withWLockPtr([](auto&& lockedObj) { lockedObj->push_back(13); }); obj.withRLockPtr([](auto&& lockedObj) { EXPECT_EQ(1003, lockedObj->size()); EXPECT_EQ(13, lockedObj->back()); }); constObj.withRLockPtr([](auto&& lockedObj) { EXPECT_EQ(1003, lockedObj->size()); EXPECT_EQ(13, lockedObj->back()); }); obj.withWLockPtr([&](auto&& lockedObj) { lockedObj->push_back(14); { auto unlocker = lockedObj.scopedUnlock(); obj.wlock()->push_back(15); } EXPECT_EQ(15, lockedObj->back()); }); constObj.withWLockPtr([](auto&& lockedObj) { EXPECT_EQ(1005, lockedObj->size()); EXPECT_EQ(15, lockedObj->back()); }); #else obj.withWLockPtr([](typename SynchType::LockedPtr&& lockedObj) { lockedObj->push_back(13); lockedObj->push_back(14); lockedObj->push_back(15); }); #endif obj.withWLockPtr([](typename SynchType::LockedPtr&& lockedObj) { lockedObj->push_back(16); EXPECT_EQ(1006, lockedObj->size()); }); constObj.withWLockPtr([](typename SynchType::ConstWLockedPtr&& lockedObj) { EXPECT_EQ(1006, lockedObj->size()); EXPECT_EQ(16, lockedObj->back()); }); obj.withRLockPtr([](typename SynchType::RLockedPtr&& lockedObj) { EXPECT_TRUE( (std::is_const<std::remove_reference_t<decltype(*lockedObj)>>{})); EXPECT_EQ(1006, lockedObj->size()); EXPECT_EQ(16, lockedObj->back()); }); constObj.withRLockPtr([](typename SynchType::ConstRLockedPtr&& lockedObj) { EXPECT_TRUE( (std::is_const<std::remove_reference_t<decltype(*lockedObj)>>{})); EXPECT_EQ(1006, lockedObj->size()); EXPECT_EQ(16, lockedObj->back()); }); } // testWithLock() version for non-shared lock types template <class Mutex> typename std::enable_if<!folly::LockTraits<Mutex>::is_shared>::type testWithLock() { folly::Synchronized<std::vector<int>, Mutex> obj; // Test withLock() obj.withLock([](std::vector<int>& lockedObj) { lockedObj.resize(1000); lockedObj.push_back(10); lockedObj.push_back(11); }); obj.withLock([](const std::vector<int>& lockedObj) { EXPECT_EQ(1002, lockedObj.size()); }); #if __cpp_generic_lambdas >= 201304 obj.withLock([](auto& lockedObj) { lockedObj.push_back(12); }); obj.withLock( [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); }); obj.withLock([](auto& lockedObj) { lockedObj.pop_back(); }); #endif // Test withLockPtr() using SynchType = folly::Synchronized<std::vector<int>, Mutex>; #if __cpp_generic_lambdas >= 201304 obj.withLockPtr([](auto&& lockedObj) { lockedObj->push_back(13); }); obj.withLockPtr([](auto&& lockedObj) { EXPECT_EQ(1003, lockedObj->size()); EXPECT_EQ(13, lockedObj->back()); }); obj.withLockPtr([&](auto&& lockedObj) { lockedObj->push_back(14); { auto unlocker = lockedObj.scopedUnlock(); obj.lock()->push_back(15); } EXPECT_EQ(1005, lockedObj->size()); EXPECT_EQ(15, lockedObj->back()); }); #else obj.withLockPtr([](typename SynchType::LockedPtr&& lockedObj) { lockedObj->push_back(13); lockedObj->push_back(14); lockedObj->push_back(15); }); #endif obj.withLockPtr([](typename SynchType::LockedPtr&& lockedObj) { lockedObj->push_back(16); EXPECT_EQ(1006, lockedObj->size()); }); const auto& constObj = obj; constObj.withLockPtr([](typename SynchType::ConstLockedPtr&& lockedObj) { EXPECT_EQ(1006, lockedObj->size()); EXPECT_EQ(16, lockedObj->back()); }); } template <class Mutex> void testUnlockCommon() { folly::Synchronized<int, Mutex> value{7}; const auto& cv = value; { auto lv = value.contextualLock(); EXPECT_EQ(7, *lv); *lv = 5; lv.unlock(); EXPECT_TRUE(lv.isNull()); EXPECT_FALSE(lv); auto rlv = cv.contextualLock(); EXPECT_EQ(5, *rlv); rlv.unlock(); EXPECT_TRUE(rlv.isNull()); EXPECT_FALSE(rlv); auto rlv2 = cv.contextualRLock(); EXPECT_EQ(5, *rlv2); rlv2.unlock(); lv = value.contextualLock(); EXPECT_EQ(5, *lv); *lv = 9; } EXPECT_EQ(9, *value.contextualRLock()); } // testUnlock() version for shared lock types template <class Mutex> typename std::enable_if<folly::LockTraits<Mutex>::is_shared>::type testUnlock() { folly::Synchronized<int, Mutex> value{10}; { auto lv = value.wlock(); EXPECT_EQ(10, *lv); *lv = 5; lv.unlock(); EXPECT_FALSE(lv); EXPECT_TRUE(lv.isNull()); auto rlv = value.rlock(); EXPECT_EQ(5, *rlv); rlv.unlock(); EXPECT_FALSE(rlv); EXPECT_TRUE(rlv.isNull()); auto lv2 = value.wlock(); EXPECT_EQ(5, *lv2); *lv2 = 7; lv = std::move(lv2); EXPECT_FALSE(lv2); EXPECT_TRUE(lv2.isNull()); EXPECT_FALSE(lv.isNull()); EXPECT_EQ(7, *lv); } testUnlockCommon<Mutex>(); } // testUnlock() version for non-shared lock types template <class Mutex> typename std::enable_if<!folly::LockTraits<Mutex>::is_shared>::type testUnlock() { folly::Synchronized<int, Mutex> value{10}; { auto lv = value.lock(); EXPECT_EQ(10, *lv); *lv = 5; lv.unlock(); EXPECT_TRUE(lv.isNull()); EXPECT_FALSE(lv); auto lv2 = value.lock(); EXPECT_EQ(5, *lv2); *lv2 = 6; lv2.unlock(); EXPECT_TRUE(lv2.isNull()); EXPECT_FALSE(lv2); lv = value.lock(); EXPECT_EQ(6, *lv); *lv = 7; lv2 = std::move(lv); EXPECT_TRUE(lv.isNull()); EXPECT_FALSE(lv); EXPECT_FALSE(lv2.isNull()); EXPECT_EQ(7, *lv2); } testUnlockCommon<Mutex>(); } // Testing the deprecated SYNCHRONIZED and SYNCHRONIZED_CONST APIs template <class Mutex> void testDeprecated() { folly::Synchronized<std::vector<int>, Mutex> obj; obj.contextualLock()->resize(1000); auto obj2 = obj; EXPECT_EQ(1000, obj2.contextualLock()->size()); SYNCHRONIZED(obj) { obj.push_back(10); EXPECT_EQ(1001, obj.size()); EXPECT_EQ(10, obj.back()); EXPECT_EQ(1000, obj2.contextualLock()->size()); } SYNCHRONIZED_CONST(obj) { EXPECT_EQ(1001, obj.size()); } SYNCHRONIZED(lockedObj, *&obj) { lockedObj.front() = 2; } EXPECT_EQ(1001, obj.contextualLock()->size()); EXPECT_EQ(10, obj.contextualLock()->back()); EXPECT_EQ(1000, obj2.contextualLock()->size()); EXPECT_EQ(FB_ARG_2_OR_1(1, 2), 2); EXPECT_EQ(FB_ARG_2_OR_1(1), 1); } template <class Mutex> void testConcurrency() { folly::Synchronized<std::vector<int>, Mutex> v; static const size_t numThreads = 100; // Note: I initially tried using itersPerThread = 1000, // which works fine for most lock types, but std::shared_timed_mutex // appears to be extraordinarily slow. It could take around 30 seconds // to run this test with 1000 iterations per thread using shared_timed_mutex. static const size_t itersPerThread = 100; auto pushNumbers = [&](size_t threadIdx) { // Test lock() for (size_t n = 0; n < itersPerThread; ++n) { v.contextualLock()->push_back((itersPerThread * threadIdx) + n); std::this_thread::yield(); } }; runParallel(numThreads, pushNumbers); std::vector<int> result; v.swap(result); EXPECT_EQ(numThreads * itersPerThread, result.size()); sort(result.begin(), result.end()); for (size_t i = 0; i < itersPerThread * numThreads; ++i) { EXPECT_EQ(i, result[i]); } } template <class Mutex> void testAcquireLocked() { folly::Synchronized<std::vector<int>, Mutex> v; folly::Synchronized<std::map<int, int>, Mutex> m; auto dualLockWorker = [&](size_t threadIdx) { // Note: this will be less awkward with C++ 17's structured // binding functionality, which will make it easier to use the returned // std::tuple. if (threadIdx & 1) { auto ret = acquireLocked(v, m); std::get<0>(ret)->push_back(threadIdx); (*std::get<1>(ret))[threadIdx] = threadIdx + 1; } else { auto ret = acquireLocked(m, v); std::get<1>(ret)->push_back(threadIdx); (*std::get<0>(ret))[threadIdx] = threadIdx + 1; } }; static const size_t numThreads = 100; runParallel(numThreads, dualLockWorker); std::vector<int> result; v.swap(result); EXPECT_EQ(numThreads, result.size()); sort(result.begin(), result.end()); for (size_t i = 0; i < numThreads; ++i) { EXPECT_EQ(i, result[i]); } } template <class Mutex> void testAcquireLockedWithConst() { folly::Synchronized<std::vector<int>, Mutex> v; folly::Synchronized<std::map<int, int>, Mutex> m; auto dualLockWorker = [&](size_t threadIdx) { const auto& cm = m; if (threadIdx & 1) { auto ret = acquireLocked(v, cm); (void)std::get<1>(ret)->size(); std::get<0>(ret)->push_back(threadIdx); } else { auto ret = acquireLocked(cm, v); (void)std::get<0>(ret)->size(); std::get<1>(ret)->push_back(threadIdx); } }; static const size_t numThreads = 100; runParallel(numThreads, dualLockWorker); std::vector<int> result; v.swap(result); EXPECT_EQ(numThreads, result.size()); sort(result.begin(), result.end()); for (size_t i = 0; i < numThreads; ++i) { EXPECT_EQ(i, result[i]); } } // Testing the deprecated SYNCHRONIZED_DUAL API template <class Mutex> void testDualLocking() { folly::Synchronized<std::vector<int>, Mutex> v; folly::Synchronized<std::map<int, int>, Mutex> m; auto dualLockWorker = [&](size_t threadIdx) { if (threadIdx & 1) { SYNCHRONIZED_DUAL(lv, v, lm, m) { lv.push_back(threadIdx); lm[threadIdx] = threadIdx + 1; } } else { SYNCHRONIZED_DUAL(lm, m, lv, v) { lv.push_back(threadIdx); lm[threadIdx] = threadIdx + 1; } } }; static const size_t numThreads = 100; runParallel(numThreads, dualLockWorker); std::vector<int> result; v.swap(result); EXPECT_EQ(numThreads, result.size()); sort(result.begin(), result.end()); for (size_t i = 0; i < numThreads; ++i) { EXPECT_EQ(i, result[i]); } } // Testing the deprecated SYNCHRONIZED_DUAL API template <class Mutex> void testDualLockingWithConst() { folly::Synchronized<std::vector<int>, Mutex> v; folly::Synchronized<std::map<int, int>, Mutex> m; auto dualLockWorker = [&](size_t threadIdx) { const auto& cm = m; if (threadIdx & 1) { SYNCHRONIZED_DUAL(lv, v, lm, cm) { (void)lm.size(); lv.push_back(threadIdx); } } else { SYNCHRONIZED_DUAL(lm, cm, lv, v) { (void)lm.size(); lv.push_back(threadIdx); } } }; static const size_t numThreads = 100; runParallel(numThreads, dualLockWorker); std::vector<int> result; v.swap(result); EXPECT_EQ(numThreads, result.size()); sort(result.begin(), result.end()); for (size_t i = 0; i < numThreads; ++i) { EXPECT_EQ(i, result[i]); } } template <class Mutex> void testTimed() { folly::Synchronized<std::vector<int>, Mutex> v; folly::Synchronized<uint64_t, Mutex> numTimeouts; auto worker = [&](size_t threadIdx) { // Test directly using operator-> on the lock result v.contextualLock()->push_back(2 * threadIdx); // Test using lock with a timeout for (;;) { auto lv = v.contextualLock(std::chrono::milliseconds(5)); if (!lv) { ++(*numTimeouts.contextualLock()); continue; } // Sleep for a random time to ensure we trigger timeouts // in other threads randomSleep(std::chrono::milliseconds(5), std::chrono::milliseconds(15)); lv->push_back(2 * threadIdx + 1); break; } }; static const size_t numThreads = 100; runParallel(numThreads, worker); std::vector<int> result; v.swap(result); EXPECT_EQ(2 * numThreads, result.size()); sort(result.begin(), result.end()); for (size_t i = 0; i < 2 * numThreads; ++i) { EXPECT_EQ(i, result[i]); } // We generally expect a large number of number timeouts here. // I'm not adding a check for it since it's theoretically possible that // we might get 0 timeouts depending on the CPU scheduling if our threads // don't get to run very often. LOG(INFO) << "testTimed: " << *numTimeouts.contextualRLock() << " timeouts"; // Make sure we can lock with various timeout duration units { auto lv = v.contextualLock(std::chrono::milliseconds(5)); EXPECT_TRUE(bool(lv)); EXPECT_FALSE(lv.isNull()); auto lv2 = v.contextualLock(std::chrono::microseconds(5)); // We may or may not acquire lv2 successfully, depending on whether // or not this is a recursive mutex type. } { auto lv = v.contextualLock(std::chrono::seconds(1)); EXPECT_TRUE(bool(lv)); } } template <class Mutex> void testTimedShared() { folly::Synchronized<std::vector<int>, Mutex> v; folly::Synchronized<uint64_t, Mutex> numTimeouts; auto worker = [&](size_t threadIdx) { // Test directly using operator-> on the lock result v.wlock()->push_back(threadIdx); // Test lock() with a timeout for (;;) { auto lv = v.rlock(std::chrono::milliseconds(10)); if (!lv) { ++(*numTimeouts.contextualLock()); continue; } // Sleep while holding the lock. // // This will block other threads from acquiring the write lock to add // their thread index to v, but it won't block threads that have entered // the for loop and are trying to acquire a read lock. // // For lock types that give preference to readers rather than writers, // this will tend to serialize all threads on the wlock() above. randomSleep(std::chrono::milliseconds(5), std::chrono::milliseconds(15)); auto found = std::find(lv->begin(), lv->end(), threadIdx); CHECK(found != lv->end()); break; } }; static const size_t numThreads = 100; runParallel(numThreads, worker); std::vector<int> result; v.swap(result); EXPECT_EQ(numThreads, result.size()); sort(result.begin(), result.end()); for (size_t i = 0; i < numThreads; ++i) { EXPECT_EQ(i, result[i]); } // We generally expect a small number of timeouts here. // For locks that give readers preference over writers this should usually // be 0. With locks that give writers preference we do see a small-ish // number of read timeouts. LOG(INFO) << "testTimedShared: " << *numTimeouts.contextualRLock() << " timeouts"; } // Testing the deprecated TIMED_SYNCHRONIZED API template <class Mutex> void testTimedSynchronized() { folly::Synchronized<std::vector<int>, Mutex> v; folly::Synchronized<uint64_t, Mutex> numTimeouts; auto worker = [&](size_t threadIdx) { // Test contextualLock() v.contextualLock()->push_back(2 * threadIdx); // Aaand test the TIMED_SYNCHRONIZED macro for (;;) { TIMED_SYNCHRONIZED(5, lv, v) { if (lv) { // Sleep for a random time to ensure we trigger timeouts // in other threads randomSleep( std::chrono::milliseconds(5), std::chrono::milliseconds(15)); lv->push_back(2 * threadIdx + 1); return; } ++(*numTimeouts.contextualLock()); } } }; static const size_t numThreads = 100; runParallel(numThreads, worker); std::vector<int> result; v.swap(result); EXPECT_EQ(2 * numThreads, result.size()); sort(result.begin(), result.end()); for (size_t i = 0; i < 2 * numThreads; ++i) { EXPECT_EQ(i, result[i]); } // We generally expect a large number of number timeouts here. // I'm not adding a check for it since it's theoretically possible that // we might get 0 timeouts depending on the CPU scheduling if our threads // don't get to run very often. LOG(INFO) << "testTimedSynchronized: " << *numTimeouts.contextualRLock() << " timeouts"; } // Testing the deprecated TIMED_SYNCHRONIZED_CONST API template <class Mutex> void testTimedSynchronizedWithConst() { folly::Synchronized<std::vector<int>, Mutex> v; folly::Synchronized<uint64_t, Mutex> numTimeouts; auto worker = [&](size_t threadIdx) { // Test contextualLock() v.contextualLock()->push_back(threadIdx); // Test TIMED_SYNCHRONIZED_CONST for (;;) { TIMED_SYNCHRONIZED_CONST(10, lv, v) { if (lv) { // Sleep while holding the lock. // // This will block other threads from acquiring the write lock to add // their thread index to v, but it won't block threads that have // entered the for loop and are trying to acquire a read lock. // // For lock types that give preference to readers rather than writers, // this will tend to serialize all threads on the wlock() above. randomSleep( std::chrono::milliseconds(5), std::chrono::milliseconds(15)); auto found = std::find(lv->begin(), lv->end(), threadIdx); CHECK(found != lv->end()); return; } else { ++(*numTimeouts.contextualLock()); } } } }; static const size_t numThreads = 100; runParallel(numThreads, worker); std::vector<int> result; v.swap(result); EXPECT_EQ(numThreads, result.size()); sort(result.begin(), result.end()); for (size_t i = 0; i < numThreads; ++i) { EXPECT_EQ(i, result[i]); } // We generally expect a small number of timeouts here. // For locks that give readers preference over writers this should usually // be 0. With locks that give writers preference we do see a small-ish // number of read timeouts. LOG(INFO) << "testTimedSynchronizedWithConst: " << *numTimeouts.contextualRLock() << " timeouts"; } template <class Mutex> void testConstCopy() { std::vector<int> input = {1, 2, 3}; const folly::Synchronized<std::vector<int>, Mutex> v(input); std::vector<int> result; v.copyInto(result); EXPECT_EQ(input, result); result = v.copy(); EXPECT_EQ(input, result); } struct NotCopiableNotMovable { NotCopiableNotMovable(int, const char*) {} NotCopiableNotMovable(const NotCopiableNotMovable&) = delete; NotCopiableNotMovable& operator=(const NotCopiableNotMovable&) = delete; NotCopiableNotMovable(NotCopiableNotMovable&&) = delete; NotCopiableNotMovable& operator=(NotCopiableNotMovable&&) = delete; }; template <class Mutex> void testInPlaceConstruction() { // This won't compile without in_place folly::Synchronized<NotCopiableNotMovable> a(folly::in_place, 5, "a"); } template <class Mutex> void testExchange() { std::vector<int> input = {1, 2, 3}; folly::Synchronized<std::vector<int>, Mutex> v(input); std::vector<int> next = {4, 5, 6}; auto prev = v.exchange(std::move(next)); EXPECT_EQ((std::vector<int>{{1, 2, 3}}), prev); EXPECT_EQ((std::vector<int>{{4, 5, 6}}), v.copy()); } } // namespace sync_tests } // namespace folly
aloknnikhil/folly
folly/experimental/symbolizer/ElfCache.h
/* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <climits> // for PATH_MAX #include <cstring> #include <memory> #include <mutex> #include <string> #include <unordered_map> #include <vector> #include <boost/container/flat_map.hpp> #include <boost/intrusive/list.hpp> #include <boost/operators.hpp> #include <glog/logging.h> #include <folly/Range.h> #include <folly/experimental/symbolizer/Elf.h> #include <folly/hash/Hash.h> namespace folly { namespace symbolizer { /** * Number of ELF files loaded by the dynamic loader. */ size_t countLoadedElfFiles(); class ElfCacheBase { public: virtual std::shared_ptr<ElfFile> getFile(StringPiece path) = 0; virtual ~ElfCacheBase() {} }; /** * Cache ELF files. Async-signal-safe: does memory allocation upfront. * * Will not grow; once the capacity is reached, lookups for files that * aren't already in the cache will fail (return nullptr). * * Not MT-safe. May not be used concurrently from multiple threads. * * NOTE that async-signal-safety is preserved only as long as the * SignalSafeElfCache object exists; after the SignalSafeElfCache object * is destroyed, destroying returned shared_ptr<ElfFile> objects may * cause ElfFile objects to be destroyed, and that's not async-signal-safe. */ class SignalSafeElfCache : public ElfCacheBase { public: explicit SignalSafeElfCache(size_t capacity); std::shared_ptr<ElfFile> getFile(StringPiece path) override; private: // We can't use std::string (allocating memory is bad!) so we roll our // own wrapper around a fixed-size, null-terminated string. class Path : private boost::totally_ordered<Path> { public: Path() { assign(folly::StringPiece()); } explicit Path(StringPiece s) { assign(s); } void assign(StringPiece s) { DCHECK_LE(s.size(), kMaxSize); if (!s.empty()) { memcpy(data_, s.data(), s.size()); } data_[s.size()] = '\0'; } bool operator<(const Path& other) const { return strcmp(data_, other.data_) < 0; } bool operator==(const Path& other) const { return strcmp(data_, other.data_) == 0; } const char* data() const { return data_; } static constexpr size_t kMaxSize = PATH_MAX - 1; private: char data_[kMaxSize + 1]; }; Path scratchpad_; // Preallocated key for map_ lookups. boost::container::flat_map<Path, int> map_; std::vector<std::shared_ptr<ElfFile>> slots_; }; /** * General-purpose ELF file cache. * * LRU of given capacity. MT-safe (uses locking). Not async-signal-safe. */ class ElfCache : public ElfCacheBase { public: explicit ElfCache(size_t capacity); std::shared_ptr<ElfFile> getFile(StringPiece path) override; private: std::mutex mutex_; typedef boost::intrusive::list_member_hook<> LruLink; struct Entry { std::string path; ElfFile file; LruLink lruLink; }; static std::shared_ptr<ElfFile> filePtr(const std::shared_ptr<Entry>& e); size_t capacity_; std::unordered_map<StringPiece, std::shared_ptr<Entry>, Hash> files_; typedef boost::intrusive::list< Entry, boost::intrusive::member_hook<Entry, LruLink, &Entry::lruLink>, boost::intrusive::constant_time_size<false>> LruList; LruList lruList_; }; } // namespace symbolizer } // namespace folly
aloknnikhil/folly
folly/container/test/F14TestUtil.h
/* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstddef> #include <ostream> #include <vector> #include <folly/container/detail/F14Policy.h> #include <folly/container/detail/F14Table.h> namespace folly { namespace f14 { struct Histo { std::vector<std::size_t> const& data; }; inline std::ostream& operator<<(std::ostream& xo, Histo const& histo) { xo << "["; size_t sum = 0; for (auto v : histo.data) { sum += v; } size_t partial = 0; for (size_t i = 0; i < histo.data.size(); ++i) { if (i > 0) { xo << ", "; } partial += histo.data[i]; if (histo.data[i] > 0) { xo << i << ": " << histo.data[i] << " (" << (static_cast<double>(partial) * 100.0 / sum) << "%)"; } } xo << "]"; return xo; } inline double expectedProbe(std::vector<std::size_t> const& probeLengths) { std::size_t sum = 0; std::size_t count = 0; for (std::size_t i = 1; i < probeLengths.size(); ++i) { sum += i * probeLengths[i]; count += probeLengths[i]; } return static_cast<double>(sum) / static_cast<double>(count); } // Returns i such that probeLengths elements 0 to i (inclusive) account // for at least 99% of the samples. inline std::size_t p99Probe(std::vector<std::size_t> const& probeLengths) { std::size_t count = 0; for (std::size_t i = 1; i < probeLengths.size(); ++i) { count += probeLengths[i]; } std::size_t rv = probeLengths.size(); std::size_t suffix = 0; while ((suffix + probeLengths[rv - 1]) * 100 <= count) { --rv; } return rv; } inline std::ostream& operator<<(std::ostream& xo, F14TableStats const& stats) { xo << "{ " << std::endl; xo << " policy: " << stats.policy << std::endl; xo << " size: " << stats.size << std::endl; xo << " valueSize: " << stats.valueSize << std::endl; xo << " bucketCount: " << stats.bucketCount << std::endl; xo << " chunkCount: " << stats.chunkCount << std::endl; xo << " chunkOccupancyHisto" << Histo{stats.chunkOccupancyHisto} << std::endl; xo << " chunkOutboundOverflowHisto" << Histo{stats.chunkOutboundOverflowHisto} << std::endl; xo << " chunkHostedOverflowHisto" << Histo{stats.chunkHostedOverflowHisto} << std::endl; xo << " keyProbeLengthHisto" << Histo{stats.keyProbeLengthHisto} << std::endl; xo << " missProbeLengthHisto" << Histo{stats.missProbeLengthHisto} << std::endl; xo << " totalBytes: " << stats.totalBytes << std::endl; xo << " valueBytes: " << (stats.size * stats.valueSize) << std::endl; xo << " overheadBytes: " << stats.overheadBytes << std::endl; if (stats.size > 0) { xo << " overheadBytesPerKey: " << (static_cast<double>(stats.overheadBytes) / static_cast<double>(stats.size)) << std::endl; } xo << "}"; return xo; } } // namespace f14 } // namespace folly
AndyM129/AMKPromptView
Example/AMKPromptView/AMKPlaceholderView+Factory.h
// // AMKPlaceholderView+Factory.h // AMKPromptView_Example // // Created by 孟昕欣 on 2019/7/3. // Copyright © 2019 https://github.com/AndyM129. All rights reserved. // #import <AMKPromptView/AMKPlaceholderView.h> #import <YYCategories/YYCategories.h> #import <Masonry/Masonry.h> /// 占位视图:工厂类 @interface AMKPlaceholderView (Factory) + (instancetype)restrictedView; //!< 权限提示 + (instancetype)errorView; //!< 错误提示 + (instancetype)emptyViewForNoColorsLoaded; //!< 空数据提示 + (instancetype)loadingViewWithActivityIndicator; //!< 加载中提示 + (instancetype)loadingViewWithGif; //!< 加载中提示 + (instancetype)emptyViewFor500PX; + (instancetype)loadingViewFor500PX; #pragma mark - - (void)show:(BOOL)willShow inView:(UIView *)superview animated:(BOOL)animated; @end
AndyM129/AMKPromptView
AMKPromptView/Classes/AMKPlaceholderView/AMKPlaceholderView.h
<gh_stars>0 // // AMKPlaceholderView.h // Pods // // Created by Andy on 2017/11/4. // #import <UIKit/UIKit.h> /// 占位视图 @interface AMKPlaceholderView : UIView @property(nonatomic, strong, nullable) UIView *contentView; //!< 内容视图(可指定该视图height或width, 取0则为自动布局) @property(nonatomic, assign) UIOffset contentViewOffset UI_APPEARANCE_SELECTOR; //!< 内容视图距中的偏移 @property(nonatomic, assign) UIEdgeInsets contentViewEdgeInsets UI_APPEARANCE_SELECTOR; //!< 内容视图内边距 @property(nonatomic, strong, nullable) UIView *indicatorView; //!< 提示视图(须指定frame.size) @property(nonatomic, assign) CGFloat indicatorViewMarginBottom; //!< 提示视图底部外边距 @property(nonatomic, strong, nullable) UILabel *titleLabel; //!< 标题 @property(nonatomic, assign) CGFloat titleLabelMarginBottom; //!< 标题底部外边距 @property(nonatomic, strong, nullable) UIFont *titleLabelTextFont UI_APPEARANCE_SELECTOR; //!< 标题字体 @property(nonatomic, strong, nullable) UIColor *titleLabelTextColor UI_APPEARANCE_SELECTOR; //!< 标题字体颜色 @property(nonatomic, strong, nullable) UILabel *subTitleLabel; //!< 子标题 @property(nonatomic, assign) CGFloat subTitleLabelMarginBottom; //!< 子标题底部外边距 @property(nonatomic, strong, nullable) UIFont *subTitleLabelTextFont UI_APPEARANCE_SELECTOR; //!< 子标题字体 @property(nonatomic, strong, nullable) UIColor *subTitleLabelTextColor UI_APPEARANCE_SELECTOR; //!< 子标题字体颜色 @property(nonatomic, strong, nullable) UIButton *button; //!< 操作按钮 @property(nonatomic, strong, nullable) UITapGestureRecognizer *tapGestureRecognizer; //!< tap手势 @end #pragma mark - /// 占位视图:子类重写、定制 @interface AMKPlaceholderView (UISubclassingHooks) /// 根据约束更新布局,如需自定义布局,可重写该方法 - (void)relayoutSubviews; @end #pragma mark - /// 占位指示视图的动画协议 @protocol AMKPlaceholderIndicatorAnimationProtocol <NSObject> @required - (void)startAnimating; - (void)stopAnimating; - (BOOL)isAnimating; @end #pragma mark - /// UIImageView 默认支持 AMKPlaceholderIndicatorAnimationProtocol协议 @interface UIImageView (AMKPlaceholderView) <AMKPlaceholderIndicatorAnimationProtocol> @end #pragma mark - /// UIActivityIndicatorView 默认支持 AMKPlaceholderIndicatorAnimationProtocol协议 @interface UIActivityIndicatorView (AMKPlaceholderView) <AMKPlaceholderIndicatorAnimationProtocol> @end #pragma mark - @interface UIImage (AMKPlaceholderView) /// 以指定颜色、大小、圆角 生成图片 + (UIImage *_Nullable)amkpv_imageWithColor:(UIColor *_Nullable)color size:(CGSize)size cornerRadius:(CGFloat)cornerRadius; /// 加载指定路径的图片数组(即 gif图) + (nullable NSArray<UIImage *> *)amkpv_imagesWithContentsOfFile:(nullable NSString *)path; @end
AndyM129/AMKPromptView
Example/AMKPromptView/AMKDemoViewController.h
<filename>Example/AMKPromptView/AMKDemoViewController.h // // AMKDemoViewController.h // AMKPromptView_Example // // Created by 孟昕欣 on 2019/7/4. // Copyright © 2019 https://github.com/AndyM129. All rights reserved. // #import <UIKit/UIKit.h> #import "AMKPromptView+Factory.h" #import <AMKPromptView/UIViewController+AMKPromptView.h> @interface AMKDemoViewController : UIViewController @end
AndyM129/AMKPromptView
Example/AMKPromptView/AMKAppDelegate.h
<reponame>AndyM129/AMKPromptView // // AMKAppDelegate.h // AMKPromptView // // Created by AndyM129 on 07/03/2019. // Copyright (c) 2019 https://github.com/AndyM129 . All rights reserved. // @import UIKit; @interface AMKAppDelegate : UIResponder <UIApplicationDelegate> @property (strong, nonatomic) UIWindow *window; @end
AndyM129/AMKPromptView
AMKPromptView/Classes/UIViewControllerExtensionMethods/UIViewController+AMKPromptView.h
// // UIViewController+AMKPromptView.h // AMKPromptView // // Created by 孟昕欣 on 2019/7/4. // #import <UIKit/UIKit.h> #import "AMKPromptView.h" /// 可通过预定义 AMKPV_SHORTHAND_GLOBALS 宏,来实现省略前缀来调用 #ifdef AMKPV_SHORTHAND_GLOBALS #define amk_promptView promptView #define setAmk_promptView setPromptView #define amk_placeholderViewWithPromptStatus placeholderViewWithPromptStatus #endif /// 视图控制器:提示视图扩展 @interface UIViewController (AMKPromptView) <AMKPromptViewDelegate> /// 提示视图 @property(nonatomic, strong, nullable) AMKPromptView *amk_promptView; #pragma mark - Subclassing Hooks /// 由子类重写,定制对应状态下的提示视图 - (AMKPlaceholderView *_Nullable)amk_placeholderViewWithPromptStatus:(AMKPromptStatus)status; @end
AndyM129/AMKPromptView
AMKPromptView/Classes/AMKPromptView/AMKPromptView.h
<reponame>AndyM129/AMKPromptView // // AMKPromptView.h // Pods // // Created by Andy on 2017/11/4. // #import <UIKit/UIKit.h> #import "AMKPlaceholderView.h" @class AMKPromptView; /// 提示状态 typedef NS_ENUM(NSInteger, AMKPromptStatus) { AMKPromptStatusHidden = 0, //!< 无提示视图 AMKPromptStatusRestricted, //!< 未授权提示(如未登录) AMKPromptStatusLoading, //!< 加载中提示 AMKPromptStatusError, //!< 错误提示 AMKPromptStatusEmpty //!< 空内容提示 }; /// 提示视图回调 typedef void(^AMKPromptViewCompletionBlock)(AMKPromptView *_Nullable promptView, BOOL finished); /// 提示视图代理 @protocol AMKPromptViewDelegate <NSObject> @optional - (void)promptView:(AMKPromptView *_Nullable)promptView didClickPlaceholderView:(AMKPlaceholderView *_Nullable)placeholderView inStatus:(AMKPromptStatus)status; - (void)promptView:(AMKPromptView *_Nullable)promptView didTapPlaceholderView:(AMKPlaceholderView *_Nullable)placeholderView inStatus:(AMKPromptStatus)status; @end /// 提示视图 @interface AMKPromptView : UIView @property(nonatomic, strong, nullable) AMKPlaceholderView *restrictedView; //!< 未授权提示视图 @property(nonatomic, strong, nullable) AMKPlaceholderView *loadingView; //!< 加载视图 @property(nonatomic, strong, nullable) AMKPlaceholderView *errorView; //!< 错误视图 @property(nonatomic, strong, nullable) AMKPlaceholderView *emptyView; //!< 空数据视图 @property(nonatomic, assign) AMKPromptStatus status; //!< 提示状态 @property(nonatomic, assign) NSTimeInterval animationDuration UI_APPEARANCE_SELECTOR; //!< 动画时长,默认0.3s @property(nonatomic, weak, nullable) id<AMKPromptViewDelegate> delegate; //!< 代理 + (instancetype _Nullable)promptViewWithDelegate:(id<AMKPromptViewDelegate> _Nullable)delegate; - (instancetype _Nullable)initWithDelegate:(id<AMKPromptViewDelegate> _Nullable)delegate; - (void)setStatus:(AMKPromptStatus)status animated:(BOOL)animated completion:(AMKPromptViewCompletionBlock _Nullable)completion; //!< 修改提示状态 @end #pragma mark - #ifdef DEBUG /// 调试 @interface AMKPromptView (Debug) @property(nonatomic, readonly) AMKPromptStatus nextStatus; /// 下一个有效的提示状态 @end #endif
AndyM129/AMKPromptView
Example/AMKPromptView/AMKPromptView+Factory.h
// // AMKPromptView+Factory.h // AMKPromptView_Example // // Created by 孟昕欣 on 2019/7/3. // Copyright © 2019 https://github.com/AndyM129. All rights reserved. // #import <AMKPromptView/AMKPromptView.h> #import "AMKPlaceholderView+Factory.h" typedef NS_ENUM(NSInteger, AMKPromptType) { AMKPromptTypeEmptyViewForNoColorsLoaded = 0, //!< 显示占位视图到 self.view 上 AMKPromptTypeEmptyViewForNoColorsLoadedInWindow, //!< 显示占位视图到 Window 上 AMKPromptTypeLoadingViewWithActivityIndicator, //!< AMKPromptTypeLoadingViewWithGif, //!< AMKPromptTypeCountInSectionOne, //!< 计数 AMKPromptTypeDemo, //!< AMKPromptTypeDemoViewController, //!< AMKPromptTypeCountInSectionTwo, //!< 计数 AMKPromptType500PX, //!< }; /// 提示视图:工厂类 @interface AMKPromptView (Factory) + (instancetype)promptViewForDemo; @end
AndyM129/AMKPromptView
Example/AMKPromptView/AMKCustomViewController.h
<gh_stars>0 // // AMKCustomViewController.h // AMKPromptView_Example // // Created by 孟昕欣 on 2019/7/4. // Copyright © 2019 https://github.com/AndyM129. All rights reserved. // #import <UIKit/UIKit.h> #import "AMKPromptView+Factory.h" /// 自定义页面,查看不同样式的提示视图 @interface AMKCustomViewController : UIViewController - (instancetype)initWithPromptType:(AMKPromptType)promptType; @end
AndyM129/AMKPromptView
Example/AMKPromptView/AMKViewController.h
<gh_stars>0 // // AMKViewController.h // AMKPromptView // // Created by AndyM129 on 07/03/2019. // Copyright (c) 2019 https://github.com/AndyM129 . All rights reserved. // #import <UIKit/UIKit.h> @interface AMKViewController : UIViewController @end
danesh-d/do-sort
src/cocktail_sort.h
#ifndef COCKTAIL_SORT_H #define COCKTAIL_SORT_H #include "do_sort.h" using namespace std; namespace do_sort { // --- Cocktail sort. template <class T> class cocktail_sort : public sort<T> { protected: void specific_do_sort() { LL n = sort<T>::size(); if (n <= 1) { return; } LL upper_bound = n - 1; LL lower_bound = 0; bool swapped = true; if (sort<T>::asc) { // Sort the data in ascending order. while (swapped) { swapped = false; LL tmp = 0; for (LL i = 0; i < upper_bound; ++i) { if (sort<T>::v[i] > sort<T>::v[i + 1]) { swap(sort<T>::v[i], sort<T>::v[i + 1]); swapped = true; // Save the place of last swap. The process in the next round // will continue up to this point since the rest of the list // will be remain sorted if no swap has happened. tmp = i + 1; } } // Shrink the range of the sorting since it is needed to continue // the process up to the last non-sorted element in the list. upper_bound = tmp - 1; if (!swapped) { // No swap has been done, so no reverse check is needed and the // loop will be terminated. break; } for (LL i = upper_bound; i >= 0; --i) { if (sort<T>::v[i] > sort<T>::v[i + 1]) { swap(sort<T>::v[i], sort<T>::v[i + 1]); swapped = true; // Save the place of last swap. The process in the next round // will start from this point since the rest of the list will be // remain sorted if no swap has happened. tmp = i + 1; } } lower_bound = tmp; } // End for the main while. } else { // Sort the data in descending order. while (swapped) { swapped = false; LL tmp = 0; for (LL i = 0; i < upper_bound; ++i) { if (sort<T>::v[i] < sort<T>::v[i + 1]) { swap(sort<T>::v[i], sort<T>::v[i + 1]); swapped = true; // Save the place of last swap. The process in the next round // will continue up to this point since the rest of the list // will be remain sorted if no swap has happened. tmp = i + 1; } } // Shrink the range of the sorting since it is needed to continue // the process up to the last non-sorted element in the list. upper_bound = tmp - 1; if (!swapped) { // No swap has been done, so no reverse check is needed and the // loop will be terminated. break; } for (LL i = upper_bound; i >= 0; --i) { if (sort<T>::v[i] < sort<T>::v[i + 1]) { swap(sort<T>::v[i], sort<T>::v[i + 1]); swapped = true; // Save the place of last swap. The process in the next round // will start from this point since the rest of the list will be // remain sorted if no swap has happened. tmp = i + 1; } } lower_bound = tmp; } // End for the main while. } } public: cocktail_sort() { } virtual ~cocktail_sort() { } }; } #endif /* COCKTAIL_SORT_H */
danesh-d/do-sort
src/heap_sort.h
<reponame>danesh-d/do-sort<filename>src/heap_sort.h #ifndef HEAP_SORT_H #define HEAP_SORT_H #include "do_sort.h" using namespace std; namespace do_sort { // --- Heap sort. template <class T> class heap_sort : public sort<T> { protected: void specific_do_sort() { LL n = sort<T>::size(); if (n <= 1) { return; } // Build the heap from the input data. The heap will be built in place // by re-arranging the input data. for (LL i = n / 2 - 1; i >= 0; --i) { sort<T>::heapify(sort<T>::v, n, i, sort<T>::asc); } // Extract each element from the heap and set as local root. Then move // the local root to the end of the heap and the heapy the reduce heap // starting from the local root. for (LL i = n - 1; i >= 0; --i) { swap(sort<T>::v[0], sort<T>::v[i]); sort<T>::heapify(sort<T>::v, i, 0, sort<T>::asc); } } public: heap_sort() { } virtual ~heap_sort() { } }; } #endif /* HEAP_SORT_H */
danesh-d/do-sort
src/insertion_sort.h
#ifndef INSERTION_SORT_H #define INSERTION_SORT_H #include "do_sort.h" using namespace std; namespace do_sort { // --- Insertion sort. template <class T> class insertion_sort : public sort<T> { protected: void specific_do_sort() { LL n = sort<T>::size(); if (n <= 1) { return; } if (sort<T>::asc) { // Sort the data in ascending order. for (LL i = 1; i < n; i++) { T key = sort<T>::v[i]; LL j = i - 1; while ((j >= 0) && (sort<T>::v[j] > key)) { sort<T>::v[j + 1] = sort<T>::v[j]; --j; } sort<T>::v[j + 1] = key; } } else { // Sort the data in descending order. for (LL i = 1; i < n; i++) { T key = sort<T>::v[i]; LL j = i - 1; while ((j >= 0) && (sort<T>::v[j] < key)) { sort<T>::v[j + 1] = sort<T>::v[j]; --j; } sort<T>::v[j + 1] = key; } } } public: insertion_sort() { } virtual ~insertion_sort() { } }; } #endif /* INSERTION_SORT_H */
danesh-d/do-sort
src/tree_sort.h
#ifndef TREE_SORT_H #define TREE_SORT_H #include "do_sort.h" #include "BST.h" using namespace std; namespace do_sort { // --- Tree sort. template <class T> class tree_sort : public sort<T>, private BST<T> { private: void create_tree(vector<T>& v, LL n) { // Construct the BST from the input data. for (LL i = 0; i < n; i++) { BST<T>::insert(&v[i]); } } void read_tree(vector<T>& v, bool asc) { // Dump the tree either in ascending or descending order. BST<T>::dump(v, asc); } void destroy_tree() { // Destroy the sorted tree i.e BST based on the input data. BST<T>::destroy(); } protected: void specific_do_sort() { LL n = sort<T>::size(); if (n <= 1) { return; } // Create a sorted tree. create_tree(sort<T>::v, n); // Read back the tree elements in a sorted fashion. read_tree(sort<T>::v, sort<T>::asc); // Destroy the tree from its root. destroy_tree(); } public: tree_sort() { } virtual ~tree_sort() { } }; } #endif /* TREE_SORT_H */
danesh-d/do-sort
src/quick_sort.h
#ifndef QUICK_SORT_H #define QUICK_SORT_H #include "do_sort.h" using namespace std; namespace do_sort { // --- Quick sort implementation. template <class T> class quick_sort : public sort<T> { private: LL partition(vector<T>& v, LL left, LL right, bool asc) { T piv = v[right]; // Initial guess of the pivot is the last element. LL ind = left - 1; if (asc) { for (LL i = left; i <= right - 1; ++i) { if (v[i] <= piv) { // Swap all elements smaller than pivot from left to right and // then swap the pivot itself when processing of all elements is // done. swap(sort<T>::v[++ind], sort<T>::v[i]); } } } else { for (LL i = left; i <= right - 1; ++i) { if (v[i] >= piv) { // Swap all elements smaller than pivot from left to right and // then swap the pivot itself when processing of all elements is // done. swap(sort<T>::v[++ind], sort<T>::v[i]); } } } // Adjust the place of the pivot and put it in the correct place in the // final sorted list. swap(sort<T>::v[++ind], sort<T>::v[right]); // Return the correct place of the pivot, so the sorting will continue // for the two halfs, partitioned by the pivot. return ind; } void qs_partition(vector<T>& v, LL left, LL right) { if (left < right) { // Partition the array based on the two given bounds. LL piv_ind = partition(v, left, right, sort<T>::asc); // Sort two separated partitions based on the pivot. qs_partition(v, left, piv_ind - 1); qs_partition(v, piv_ind + 1, right); } } protected: void specific_do_sort() { if (sort<T>::size() <= 1) { return; } qs_partition(sort<T>::v, 0, sort<T>::size() - 1); } public: quick_sort() { } virtual ~quick_sort() { } }; } #endif /* QUICK_SORT_H */
danesh-d/do-sort
src/do_sort.h
<gh_stars>0 #ifndef DO_SORT_H #define DO_SORT_H #include <vector> #include <list> #include <ctime> #include <string.h> #include <stdlib.h> typedef long L; typedef long long LL; typedef unsigned long UL; typedef unsigned long long ULL; typedef long double LD; using namespace std; namespace do_sort { // Abstract class for sort. template <class T> class sort { protected: vector<T> v; // A sequential array-based data structure. list<T> l; // A two way linked list data structure. bool asc; // Specify the order of sorting. // Each sorting algorithm will implement this funciton which will not be // explicitely called but from "do_sort" public function. virtual void specific_do_sort() = 0; // Merge two sorted lists. void merge(vector<T>& v, vector<T>& aux, LL low, LL border, LL high, bool asc) { LL num = high - low + 1; LL ptr = low; LL low_end = border - 1; // Start mergin two lists until a list is finished before the other or // if both lists are finished at the same time, if they have same // length. if (asc) { // Merge two lists while maintaining the ascending order. while (low <= low_end && border <= high) { aux[ptr++] = (v[low] < v[border]) ? v[low++] : v[border++]; } } else { // Merge two lists while maintaining the descending order. while (low <= low_end && border <= high) { aux[ptr++] = (v[low] > v[border]) ? v[low++] : v[border++]; } } // Merge the ramaining. while (low <= low_end) { aux[ptr++] = v[low++]; } // Merge the ramaining. while (border <= high) { aux[ptr++] = v[border++]; } // Copy the sorted list to the original. copy(aux.begin() + (high - num + 1), aux.begin() + (high + 1), v.begin() + (high - num + 1)); } // Heapify a subtree rooted with node "i" which is an index in the vector // "v". If the "i" is set to zero then the whole tree will be heapified. void heapify(vector<T>& v, LL n, LL i, bool asc) { LL hit_ind = i; LL l = 2 * i + 1; LL r = 2 * i + 2; // Check whether the hit node (largest or smallest) is the root node, // otherwise set the hit to point to the child of the root. if (asc) { hit_ind = (l < n && v[l] > v[hit_ind]) ? l : hit_ind; hit_ind = (r < n && v[r] > v[hit_ind]) ? r : hit_ind; } else { hit_ind = (l < n && v[l] < v[hit_ind]) ? l : hit_ind; hit_ind = (r < n && v[r] < v[hit_ind]) ? r : hit_ind; } if (hit_ind != i) { // The hit node was not the root node. Swap the root with child and // then start the heapifying process recursively from the newly hit // node. swap(v[i], v[hit_ind]); heapify(v, n, hit_ind, sort<T>::asc); } } public: sort() { } virtual ~sort() { v.clear(); } // Fill the vector with the input data. virtual size_t set_data(vector<T>& arr) { // Note that while the vector's data i.e. header, metadata and control // parameters are stored on the stack (which is not a big part of the // whole data), the elements themselves will be stored on the heap which // will be managed internally by the C++ container. v = arr; return v.size(); } // Return the data explicitely at given index. virtual T get_data_at(size_t i) { return v[i]; } virtual void clear_data() { v.clear(); } // Dump values in a vector. void dump(string title) { cout << "---------- " << title << " ----------" << endl; for (LL i = 0; i < size(); ++i) { cout << "v[" << i << "]: " << v[i] << endl; } } virtual LD do_sort(bool elapsed_time, bool asc = true) { LD t = 0.0; this->asc = asc; if (!elapsed_time) { // The elapsed time for the current sorting process is not needed to // be reported, so just 0.0 will be returned instead. specific_do_sort(); } else { clock_t begin = clock(); specific_do_sort(); clock_t end = clock(); t = (LD)(end - begin) / CLOCKS_PER_SEC; } return t; } // Return the size of the sorted data. The size data type is signed by // purpose. Otherwise some other sorting algorithms such as quick sort // might fail as they would find some termination condition when the size // is negative. LL size() { return (LL)v.size(); } // Return true if the sorted data is empty. bool empty() { return v.empty(); } // Get the whole sorted data at once. Might not be efficient for large // data. vector<T> getResult() { return v; } // Access sorted data by array indexing. T operator[](size_t i) { return v[i]; } }; } #endif /* DO_SORT_H */
danesh-d/do-sort
src/selection_sort.h
#ifndef SELECTION_SORT_H #define SELECTION_SORT_H #include "do_sort.h" using namespace std; namespace do_sort { // --- Selection sort. template <class T> class selection_sort : public sort<T> { protected: void specific_do_sort() { LL n = sort<T>::size(); if (n <= 1) { return; } if (sort<T>::asc) { // The data will be sorted in ascending order. for (LL i = 0; i < n; ++i) { T key = sort<T>::v[i]; LL ind = i; // From the current element to the end of the list, find the minimum // value and swap it with the current element. for (LL j = i + 1; j < n; ++j) { if (sort<T>::v[j] < key) { key = sort<T>::v[j]; ind = j; } } if (ind != i) { swap(sort<T>::v[i], sort<T>::v[ind]); } } } else { // The data will be sorted in descending order. for (LL i = 0; i < n; ++i) { T key = sort<T>::v[i]; LL ind = i; // From the current element to the end of the list, find the minimum // value and swap it with the current element. for (LL j = i + 1; j < n; ++j) { if (sort<T>::v[j] > key) { key = sort<T>::v[j]; ind = j; } } if (ind != i) { swap(sort<T>::v[i], sort<T>::v[ind]); } } } } public: selection_sort() { } virtual ~selection_sort() { } }; } #endif /* SELECTION_SORT_H */
danesh-d/do-sort
src/shell_sort.h
<filename>src/shell_sort.h #ifndef SHELL_SORT_H #define SHELL_SORT_H #include "do_sort.h" #define MAX_GAPS 9 using namespace std; namespace do_sort { // --- Shell sort implementation. template <class T> class shell_sort : public sort<T> { private: vector<LL> gaps; // This function generates the sequence of gaps. The original // implementation is baed on the origtinal sequence used in the Shell's // algorithm. However, other sequence of gaps such as Knuth's can be used. // In that case, the user needs to re-implement the gap generator. void generate_gaps(LL n) { LL gap = n; while ((gap = gap >> 1) > 0) { gaps.push_back(gap); } } protected: void specific_do_sort() { LL n = sort<T>::size(); if (n <= 1) { return; } // Generate sequence of gaps where the sorting (far elements first) will // be based on this sequence. generate_gaps(n); // Different gap sequences can be used. The gap sequence which is used // here is the original sequence which is [n / 2 ^ k]. if (sort<T>::asc) { for (vector<LL>::iterator it = gaps.begin(); it != gaps.end(); ++it) { LL gap = *it; for (LL i = gap; i < n; ++i) { for (LL j = i - gap; j >= 0 && sort<T>::v[j] > sort<T>::v[j + gap]; j -= gap) { swap(sort<T>::v[j + gap], sort<T>::v[j]); } } } } else { for (vector<LL>::iterator it = gaps.begin(); it != gaps.end(); ++it) { LL gap = *it; for (LL i = gap; i < n; ++i) { for (LL j = i - gap; j >= 0 && sort<T>::v[j] < sort<T>::v[j + gap]; j -= gap) { swap(sort<T>::v[j + gap], sort<T>::v[j]); } } } } } public: shell_sort() { gaps.clear(); } virtual ~shell_sort() { gaps.clear(); } }; } #endif /* SHELL_SORT_H */